Instructions

In [2]:
from matSHEEP import enc_vec, enc_mat, enc_tensor3
from matSHEEP import functions
from matSHEEP import utils
from matSHEEP.circuit import circuit
import numpy as np

Reduce Addition Check

In [6]:
import math
nb = 2
depth = int(math.floor(math.log(nb, 2)))
samples=2
inp = enc_vec(name='in', nb=nb)
out = enc_vec(name='out', nb=depth+1)
ra = functions.reduce_add('adder', inp, out)
ra_obj = circuit('reduce_add', ra)
ra_obj.write_file(filename='./test.sheep')
processing_time = np.zeros(samples)
for idx in range(0, samples):
    inp_arr_val = list(map(lambda x : int(x), np.random.uniform(size=nb)> 0.5))
    inp_file = inp.get_input_dict(inp_arr_val, write_file=True)
    results = ra_obj.run_circuit(inp_file)
    processing_time[idx] = results['Processing times (s)']['circuit_evaluation']
    out_vars = out.get_variables()
    err_str = str(''.join(results['Outputs'][var] for var in out_vars)[::-1]) +" -- "+ str(np.asarray(inp_arr_val).sum())
    assert int(''.join(results['Outputs'][var] for var in out_vars)[::-1], 2) == np.asarray(inp_arr_val).sum(), err_str
print " ".join(["Time taken is" , str(processing_time.mean()),'sec'])
Time taken is 0.0017950000000000002 sec

Binary Vector Dot Product + Sign Fn

In [4]:
from matSHEEP import nn_layer as nn
from tqdm import tqdm
nb = 10
samples = 2
inputs = enc_vec(name='nn_input', nb=nb)
depth = int(math.ceil(math.log(nb*1.0, 2)))
outputs = enc_vec(name='nn_outputs', nb=1)
outputs_sum = enc_vec(name='sum_outputs', nb=depth+1)
weight = np.asarray(list(map(lambda x : int(x), np.random.uniform(size=nb)> 0.5)))
layer = nn.linear_layer_1d(name='linear', weight=weight,
                    inputs=inputs, outputs=outputs)
layer_obj = circuit('linear_layer', layer, const_inputs=[])
layer_obj.write_file(filename='./test_linear.sheep')
processing_time = np.zeros(samples)
for idx in tqdm(range(0, samples)):
    inp_arr_val = list(map(lambda x : int(x), np.random.uniform(size=nb)> 0.5))
    inp_file = inputs.get_input_dict(inp_arr_val, write_file=True)
    results = layer_obj.run_circuit(inp_file)
    processing_time[idx] = results['Processing times (s)']['circuit_evaluation']
    out_vars = outputs.get_variables()
    xor_val = (weight == np.asarray([x for x in inp_arr_val]))
    err_str = str(''.join(results['Outputs'][var] for var in out_vars)[::-1]) +" -- "+ str(np.asarray(xor_val).sum())
    assert int(''.join(results['Outputs'][var] for var in out_vars)[::-1], 2) == int(np.asarray(xor_val).sum()>nb/2), err_str
print " ".join(["Time taken is" , str(processing_time.mean()),'sec'])
100%|██████████| 2/2 [00:02<00:00,  1.11s/it]
Time taken is 0.5766150000000001 sec

Compare Encrypted Number (binary) with clear text

In [ ]:
from matSHEEP.reusable_modules import compare_cp
cp_inp_val = [1, 0, 1, 1, 1, 1, 1]
tgt = 126
comp_inp = enc_vec(name='cp_inp',nb = len(cp_inp_val))
out_vec = enc_vec(name='cp_out', nb=1)
cp_circ = compare_cp(name='cp', inputs=(comp_inp, tgt), outputs=out_vec)
cp_obj = circuit('cp', cp_circ)
cp_obj.write_file(filename='./test_cp.sheep')
inputs_file = comp_inp.get_input_dict(cp_inp_val, write_file=True)
results = cp_obj.run_circuit(inputs_file)
processing_time[idx] = results['Processing times (s)']['circuit_evaluation']
#assert (int(results['Outputs'][out_vec.get_variables()[0]]) == int(int(''.join(map(lambda x : str(x), cp_inp_val)),2) >= tgt))
if int(results['Outputs'][out_vec.get_variables()[0]]) == 1:
    print str( int(int(''.join(map(lambda x : str(x), cp_inp_val[::-1])),2)))+"(CP) is greater than "+str(tgt)+str("(PT)")
else:
    print str( int(int(''.join(map(lambda x : str(x), cp_inp_val[::-1])),2)))+"(CP) is smaller than "+str(tgt)+str("(PT)")
print " ".join(["Time taken is" , results['Processing times (s)']['circuit_evaluation'],'sec'])

Fully Connected Layer

In [ ]:
nb = 10
samples = 1
num_out= 3
depth = int(math.ceil(math.log(nb*1.0, 2)))
inputs = enc_mat(name='nn_input'+str(idx)+'_', size= (1, nb))
outputs = enc_mat(name='nn_outputs'+str(idx)+'_', size=(num_out, 1))
weight = (np.random.uniform(size=(num_out, nb))> 0.5).astype(int)
layer = nn.linear_layer(name='linear', weight=weight,
                    inputs=inputs, outputs=outputs)
ll_obj = circuit('linear_layer', layer, const_inputs=[])
ll_obj.write_file(filename='./test_linear.sheep')
processing_time = np.zeros(samples)
for idx in tqdm(range(0, samples)):
    inp_arr_val = list(map(lambda x : int(x), np.random.uniform(size=nb*1)> 0.5))
    inp_file = inputs.get_input_dict(inp_arr_val, write_file=True)
    results = ll_obj.run_circuit(inp_file)
    processing_time[idx] = results['Processing times (s)']['circuit_evaluation']
    out_vars = outputs.get_variables()
    xor_val = (weight == np.asarray([x for x in inp_arr_val]))
    #print(xor_val)
    out_bit_vec = np.asarray([int(results['Outputs'][var]) for var in out_vars])
    true_bit_vec = (np.sum(np.asarray(xor_val), axis=1)>nb/2).astype(int)
    assert np.alltrue(out_bit_vec == true_bit_vec)
print(processing_time.mean())

Convolutional Layer

In [ ]:
import time
start = time.time()
inp_image = enc_tensor3(name='conv_in', size=(10, 7, 7))
out_image = enc_tensor3(name='conv_out', size=(10, 3, 3))
weight = (np.random.uniform(size=(10, 10,5,5))> 0.5).astype(int)
conv_lyr = nn.conv_layer(name='conv_nn', inputs=inp_image, outputs=out_image, weight=weight)
nb = reduce(lambda x,y : x*y, inp_image.size)
layer_obj = circuit('linear_layer', conv_lyr, const_inputs=[])
layer_obj.write_file(filename='./test_cnn.sheep')

end = time.time()
print(end - start)
samples = 1
processing_time = np.zeros(samples)
for idx in tqdm(range(0, samples)):
    inp_arr_val = list(map(lambda x : int(x), np.random.uniform(size=nb*1)> 0.5))
    inp_dict = inp_image.get_input_dict(inp_arr_val, write_file=True)
    results = layer_obj.run_circuit(inp_dict)
    processing_time[idx] = results['Processing times (s)']['circuit_evaluation']
print(processing_time.mean())

Linear Layer time (inp_dim, out_dim)

In [ ]:
def get_time(nb, out, samples=1):
    # Define input matrix, output matrix and weight matrix
    inputs = enc_mat(name='nn_input', size= (1, nb))
    outputs = enc_mat(name='nn_outputs', size=(out, 1))
    weight = (np.random.uniform(size=(out, nb))> 0.5).astype(int)

    #Initialize Layer and Circuit
    layer = nn.linear_layer(name='linear', weight=weight,
                        inputs=inputs, outputs=outputs)
    ll_obj = circuit('linear_layer', layer, const_inputs=[])
    ll_obj.write_file(filename='./test_linear.sheep')


    processing_time = np.zeros(samples)
    for idx in tqdm(range(0, samples)):
    
        # Random Input Value
        inp_arr_val = list(map(lambda x : int(x), np.random.uniform(size=nb*1)> 0.5))
        inp_file = inputs.get_input_dict(inp_arr_val, write_file=True)
    
        #Get Results
        results = ll_obj.run_circuit(inp_file)
        processing_time[idx] = results['Processing times (s)']['circuit_evaluation']
        out_vars = outputs.get_variables()
    
        #Check if Input is correct
        xor_val = (weight == np.asarray([x for x in inp_arr_val]))
        out_bit_vec = np.asarray([int(results['Outputs'][var]) for var in out_vars])
        true_bit_vec = (np.sum(np.asarray(xor_val), axis=1)>nb/2).astype(int)
        assert np.alltrue(out_bit_vec == true_bit_vec)
    return processing_time.mean()

Diabetes

Structure

$1704\rightarrow 10 \rightarrow 1 $

In [ ]:
# Input 
nb = 1704 # Input bits
samples = 1 #Averaging over
num_out= 10 # Number of Outputs

#t_1 = get_time(nb, num_out)
#t_2 = get_time(nb, 1)
#t_3 = get_time(10, 1)

Time

  • Out Seq $\rightarrow t_1 + t_3 \rightarrow$ 288 sec
  • All Parallel $\rightarrow t_2 + t_3 \rightarrow $ 29 sec

MNIST

Structure

$784 \rightarrow 2048 \rightarrow 2048 \rightarrow 2048 \rightarrow 10$

In [ ]:
# t_1 = get_time(784, 256) 
# t_2 = get_time(2048, 2)
# t_3 = get_time(2048, 10)

TIming

  • OutSeq

\begin{align} \mathbf{TIME} &= 8 * \mathbf{TIME}~(784 \times 256) + 2 * (1024 * \mathbf{TIME}~(2048 \times 2) ) + \mathbf{TIME}~(2048\times 10)\\ &= 8 * t_1 + 2 * 1024 * t_2 + t_3\\ &= 8 * 3277.0 + 2 * 1024 * 70 + 348\\ &= 169,924\mathrm{sec} = 47.2\mathrm{hr} \end{align}

  • All Parallel

\begin{align} \mathbf{TIME} &= \dfrac{\mathbf{TIME}~(784 \times 256)}{256} + 2 * \dfrac{1}{2} (\mathbf{TIME}~(2048 \times 2) ) + \dfrac{1}{10}\mathbf{TIME}~(2048\times 10)\\ &= \frac{t_1}{256} + t_2 + \frac{t_3}{10}\\ &= 12.8 + 70 + 35\\ &= 117.8\mathrm{sec} \end{align}

Cancer

Structure

$90\rightarrow 1$

In [ ]:
t = get_time(90,1, samples=2)
print(t)

Timings

OutSeq = All Parallel = $t = 4\mathrm{sec}$

In [ ]:
def get_conv_time(inp_image, filter_size):
    out_size = (filter_size[0], 
                inp_image[1] - filter_size[2] + 1, 
                inp_image[2] - filter_size[3] + 1)
    inp_image = enc_tensor3(name='conv_in', size=inp_image)
    out_image = enc_tensor3(name='conv_out', size=out_size)
    weight = (np.random.uniform(size=filter_size)> 0.5).astype(int)
    conv_lyr = nn.conv_layer(name='conv_nn', inputs=inp_image, outputs=out_image, weight=weight)
    nb = reduce(lambda x,y : x*y, inp_image.size)
    layer_obj = circuit('linear_layer', conv_lyr, const_inputs=[])
    layer_obj.write_file(filename='./test_cnn.sheep')
    samples = 1
    processing_time = np.zeros(samples)
    for idx in tqdm(range(0, samples)):
        inp_arr_val = list(map(lambda x : int(x), np.random.uniform(size=nb*1)> 0.5))
        inp_dict = inp_image.get_input_dict(inp_arr_val, write_file=True)
        results = layer_obj.run_circuit(inp_dict)
        processing_time[idx] = results['Processing times (s)']['circuit_evaluation']
    return processing_time.mean()

Faces

Structure

\begin{align} (1, 50, 50)&\rightarrow \mathbf{FILTER}~(32, 32, 10, 10)= \mathbf{ACTIVATIONS}~(32, 41, 41)\\ &\rightarrow \mathbf{FILTER}~(32, 32, 10, 10)= \mathbf{ACTIVATIONS}~(32, 32, 32)\\ &\rightarrow \mathbf{FILTER}~(32, 32, 10, 10)= \mathbf{ACTIVATIONS}~(32, 23, 23)\\ &\rightarrow \mathbf{FILTER}~(32, 32, 10, 10)= \mathbf{ACTIVATIONS}~(32, 14, 14)\\ &\rightarrow \mathbf{FILTER}~(32, 32, 10, 10) \\ &= \mathbf{ACTIVATIONS}~(1, 5, 5)\rightarrow 1 \end{align}

In [1]:
# t_1 = get_conv_time(inp_image=(1, 15, 15), filter_size=(32, 1, 10, 10))   # = 1887
# t_2 = get_conv_time(inp_image=(32, 10, 10), filter_size=(32, 32, 10, 10)) # = 1737
# t_3 = get_conv_time(inp_image=(32, 12, 12), filter_size=(32, 32, 10, 10)) # = 16020
# t_4 = get_conv_time(inp_image=(32, 14, 14), filter_size=(1, 32, 10, 10)) # = 1347
# t_5 = get_time(25,1, samples=2)
# tt = 0.1 * t_2 + 0.9 * t3/(3 * 3) = 1775

Timings

  • Out Seq

\begin{align*} \mathbf{TIME} &= tt_1 + tt_2 + t2_3+ tt_4+ tt_5+ tt_6 \\ tt_1 &= t_1 \times\frac{ 41 \times 41}{36} = 88112~\mathrm{sec}\\ tt_2 &= tt * 32 \times 32 = 1,817,600~\mathrm{sec}\\ tt_3 &= tt * 23 \times 23 = 938,975~\mathrm{sec} \\ tt_4 &= tt * 14 \times 14 = 347,900~\mathrm{sec} \\ tt_5 &= 1347\\ tt_6 &= t5 \sim 1\\ \mathbf{TIME} &\sim 88112 + 1817600 + 938975 + 347900 +1347\\ &= 3,193,934~\mathrm{sec} \sim 886.83~\mathrm{hours} =36.95~\mathrm{days} \end{align*}

  • All Parallel

\begin{align} \mathbf{TIME} &= \frac{t_1}{36} + tt * 3 + t4 + t5\\ &= 52.41 + 5325 +1347 + 1 = 1.87~\mathrm{hour} \end{align}

The timings are a bit worse here for Faces as
the binary dot product for a vector of 3200 bits
seems to increase from 47s to 55s (from previous code to SHEEP).

For a vector of 1024 bits, the timing is exactly same for both codes.
I suspect this has something to do with TBB vs OpenMP but I am not very
sure.