From e24e23917ae538f8b4723e6a51eced1e0eaa65af Mon Sep 17 00:00:00 2001
From: Hanjunhao <304716955@qq.com>
Date: Tue, 20 Feb 2024 16:36:51 +0800
Subject: [PATCH] subject2_AdaptiveBatchHE_fist
---
subject2-AdaptiveBatchHE/CITATION.bib | 8 +
subject2-AdaptiveBatchHE/README.md | 110 ++++
.../batch encryption/binary_float_decimal.py | 137 +++++
.../batch encryption/encryption.py | 209 ++++++++
.../federated_experiment_main.py | 474 ++++++++++++++++++
.../batch encryption/models.py | 251 ++++++++++
.../cnn sparisty/federated_main.py | 201 ++++++++
.../cnn sparisty/mobilenetv2.py | 151 ++++++
.../cnn sparisty/models.py | 217 ++++++++
.../cnn sparisty/options.py | 64 +++
.../cnn sparisty/sampling.py | 198 ++++++++
.../cnn sparisty/sparisty_similarity.py | 138 +++++
.../sparsity_extract_mobilenet.py | 148 ++++++
.../cnn sparisty/sparsity_extract_vgg.py | 137 +++++
.../cnn sparisty/update.py | 272 ++++++++++
.../cnn sparisty/utils.py | 122 +++++
subject2-AdaptiveBatchHE/environment.yaml | 88 ++++
.../fig/batchencry_server_client.jpg | Bin 0 -> 472910 bytes
.../fig/batchencryption_epochs_accuracy.jpg | Bin 0 -> 315241 bytes
subject2-AdaptiveBatchHE/fig/framework.jpg | Bin 0 -> 265253 bytes
.../fig/fuzzyworkflow.jpg | Bin 0 -> 311289 bytes
.../fig/non_iid_communication_cost.jpg | Bin 0 -> 189450 bytes
.../fig/non_iid_epochs_accuracy.jpg | Bin 0 -> 312190 bytes
.../fig/non_iid_three_trainingtime.jpg | Bin 0 -> 180363 bytes
subject2-AdaptiveBatchHE/fig/sparsity.jpg | Bin 0 -> 58037 bytes
.../fuzzy logic/fuzzy_logic_main.py | 267 ++++++++++
26 files changed, 3192 insertions(+)
create mode 100644 subject2-AdaptiveBatchHE/CITATION.bib
create mode 100644 subject2-AdaptiveBatchHE/README.md
create mode 100644 subject2-AdaptiveBatchHE/batch encryption/binary_float_decimal.py
create mode 100644 subject2-AdaptiveBatchHE/batch encryption/encryption.py
create mode 100644 subject2-AdaptiveBatchHE/batch encryption/federated_experiment_main.py
create mode 100644 subject2-AdaptiveBatchHE/batch encryption/models.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/federated_main.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/mobilenetv2.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/models.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/options.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/sampling.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/sparisty_similarity.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/sparsity_extract_mobilenet.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/sparsity_extract_vgg.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/update.py
create mode 100644 subject2-AdaptiveBatchHE/cnn sparisty/utils.py
create mode 100644 subject2-AdaptiveBatchHE/environment.yaml
create mode 100644 subject2-AdaptiveBatchHE/fig/batchencry_server_client.jpg
create mode 100644 subject2-AdaptiveBatchHE/fig/batchencryption_epochs_accuracy.jpg
create mode 100644 subject2-AdaptiveBatchHE/fig/framework.jpg
create mode 100644 subject2-AdaptiveBatchHE/fig/fuzzyworkflow.jpg
create mode 100644 subject2-AdaptiveBatchHE/fig/non_iid_communication_cost.jpg
create mode 100644 subject2-AdaptiveBatchHE/fig/non_iid_epochs_accuracy.jpg
create mode 100644 subject2-AdaptiveBatchHE/fig/non_iid_three_trainingtime.jpg
create mode 100644 subject2-AdaptiveBatchHE/fig/sparsity.jpg
create mode 100644 subject2-AdaptiveBatchHE/fuzzy logic/fuzzy_logic_main.py
diff --git a/subject2-AdaptiveBatchHE/CITATION.bib b/subject2-AdaptiveBatchHE/CITATION.bib
new file mode 100644
index 0000000..4177b10
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/CITATION.bib
@@ -0,0 +1,8 @@
+@article{han2023adaptiveBatchHE,
+ title={Adaptive Batch Homomorphic Encryption for Joint Federated Learning in Cross-Device Scenarios},
+ author={Han, Junhao and Yan, Li},
+ journal={IEEE Internet of Things Journal},
+ volume={Early Access},
+ year={2023},
+ publisher={IEEE}
+}
\ No newline at end of file
diff --git a/subject2-AdaptiveBatchHE/README.md b/subject2-AdaptiveBatchHE/README.md
new file mode 100644
index 0000000..365ab83
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/README.md
@@ -0,0 +1,110 @@
+# AdaptiveBatchHE
+
+
+
+This repository provides the implementation of the paper ["Adaptive Batch Homomorphic Encryption for Joint Federated Learning in Cross-Device Scenarios"](https://ieeexplore.ieee.org/document/10275042), which is published in IEEE INTERNET OF THINGS JOURNAL. In this paper, we propose an adaptive batch HE framework for cross-device FL, which determines cost-efficient and sufficiently secure encryption strategies for clients with heterogeneous data and system capabilities. Our framework can achieve comparable accuracy to plain HE (i.e., encryption applied per gradient), while reducing training time by 3×-31×, and communication cost by 45×-66×.
+
+
+
+  |
+  |
+  |
+  |
+
+
+ | Training time over 100 epochs |
+ Testing accuracy over epochs |
+ Communication cost in one epoch |
+ Cost efficiency under various HE key sizes |
+
+
+
+
+Our framework consists of the following three key components:
+
+
+
+
+
+
+
+## 1. Clustering of Clients based on Sparsity of CNNs
+
+
+
+
+
+
+
+The code in the folder [CNN Sparisty](https://github.com/liyan2015/AdaptiveBatchHE/tree/main/CNN%20Sparisty) is for determining the sparsity vectors of clients.
+
+`federated_main.py` is the main function.
+
+The input is the path of the dataset.
+
+
+
+## 2. Selection of HE Key Size for Each Client based on Fuzzy Logic
+
+
+
+
+
+
+
+The code in the folder [fuzzy logic](https://github.com/liyan2015/AdaptiveBatchHE/tree/main/fuzzy%20logic) is for determining the HE key size of clients.
+
+`fuzzy_logic_main.py` is the main function.
+
+There are three inputs: `input_NS`, `input_TR`, and `input_CC`.
+
+Their values are between 0 and 1.
+
+
+
+## 3. Accuracy-lossless Batch Encryption and Aggregation
+
+
+
+
+
+
+
+The code in the folder [batch encryption](https://github.com/liyan2015/AdaptiveBatchHE/tree/main/batch%20encryption) is for accuracy-lossless batch encryption and aggregation of model parameters for FL training.
+
+`federated_experiment_main.py` is the main function.
+
+The code needs a proper hyperparameter K to run correctly, of which reason has been explained with detail in the paper. The default K value is 4. For specific settings, please refer to the comments in the code.
+
+
+
+## Prerequisites
+
+To run the code, it needs some libraies:
+
+- Python >= 3.8
+- Pytorch >= 1.10
+- torchvision >= 0.11
+- phe >= 1.5
+- skfuzzy >= 0.4
+
+Our environment is shown in the file, named `environment.yaml`.
+
+## Citing
+
+
+
+If you use this repository, please cite:
+```bibtex
+@article{han2023adaptiveBatchHE,
+ title={Adaptive Batch Homomorphic Encryption for Joint Federated Learning in Cross-Device Scenarios},
+ author={Han, Junhao and Yan, Li},
+ journal={IEEE Internet of Things Journal},
+ volume={Early Access},
+ year={2023},
+ publisher={IEEE}
+}
+```
+
+
+
diff --git a/subject2-AdaptiveBatchHE/batch encryption/binary_float_decimal.py b/subject2-AdaptiveBatchHE/batch encryption/binary_float_decimal.py
new file mode 100644
index 0000000..04b3a6c
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/batch encryption/binary_float_decimal.py
@@ -0,0 +1,137 @@
+from functools import lru_cache
+index_acc = 0
+a = 0
+index_i = 0
+@lru_cache
+def f2b(num, M=8, K=6, N=30):
+ '''
+ floating number to binary
+ '''
+ global index_acc
+ global a
+ str_num = str(float(num))
+ accuracy = K
+ if '.' in str_num:
+ if num == 0:
+ return '0' * (M + N)
+ else:
+ string_integer, string_decimal = str_num.split('.')
+ integer = int(string_integer)
+ integer2b = '{:b}'.format(integer).zfill(M)
+ lst_accuracy = []
+ if len(string_decimal) >= accuracy:
+ for i in range(accuracy):
+ lst_accuracy.append(string_decimal[i])
+ else:
+ for i in string_decimal:
+ lst_accuracy.append(i)
+ a = accuracy - len(string_decimal)
+ for i in range(a):
+ lst_accuracy.append('0')
+ for i in lst_accuracy:
+ if i != '0':
+ index_acc = lst_accuracy.index(i)
+ break
+ str1 = ''.join(lst_accuracy)
+ str_float = str1[index_acc::]
+ num_float = int(str_float)
+ num_float2b = '{:b}'.format(num_float)
+ if len(str(num_float2b)) <= N:
+ a = N - len(str(num_float2b))
+ else:
+ print('N error!!!')
+ str_all = integer2b + '0' * a + str(num_float2b)
+ return str_all
+ else:
+ integer2b = '{:b}'.format(num).zfill(M)
+ str_all = integer2b + '0' * N
+ return str_all
+
+
+@lru_cache
+def b2f(num, M=8, K=6, N=30):
+ '''
+ binary to floating number
+ '''
+ accuracy = K
+ bit = M + N
+ if len(str(num)) == bit:
+ str_front = str(num)[0:M]
+ str_back = str(num)[M:]
+ bin2int = int(str_front, 2)
+ for i in str_back:
+ if i == '1':
+ index_float = str_back.index(i)
+ break
+ else:
+ index_float = 0
+ str_back_ex = str_back[index_float::]
+ a = int(str_back_ex, 2)
+ if accuracy >= len(str(a)):
+ b = accuracy - len(str(a))
+ str_all = str(bin2int) + '.' + '0' * b + str(a)
+ return str_all
+ else:
+ if len(str(a)) == K + 1:
+ a_real = str(a)[1:]
+ str_all = str(bin2int + int(str(a)[0])) + '.' + str(a_real)
+ return str_all
+ else:
+ print('b2f error!')
+ else:
+ print('num input error')
+
+def dsb(c: str):
+ '''
+ Convert decrypted plaintext into binary
+ '''
+ string = list(c[::-1])
+ for i in range(len(string) - 1):
+ if int(string[i]) % 2 == 0 and int(string[i]) != 0:
+ carry = int(string[i]) // 2
+ string[i] = str(0)
+ string[i + 1] = str(carry + int(string[i + 1]))
+ elif int(string[i]) % 2 == 1 and int(string[i]) != 1:
+ carry = int(string[i]) // 2
+ string[i] = str(1)
+ string[i + 1] = str(carry + int(string[i + 1]))
+ else:
+ continue
+ last = int(string[-1])
+ last_b = '{:b}'.format(last)[::-1]
+ string[-1] = last_b[0]
+ for i in range(1, len(last_b)):
+ string.append(last_b[i])
+ res = ''.join(string[::-1])
+ return res
+
+def integer_floating_dsb(string: str, M, N):
+ M_string = string[0:M]
+ N_string = string[M:]
+ M_dsb = dsb(M_string)
+ N_dsb = dsb(N_string)
+ return M_dsb + N_dsb
+
+
+def total_bits(num: int) -> int:
+ string = '{:b}'.format(num)
+ return len(string)
+
+
+def decode_fillzero(num:int, batch_length) -> str:
+ decoded_num = '{:b}'.format(num).zfill(batch_length)
+ return decoded_num
+
+if __name__ == '__main__':
+ a = f2b(0.)
+ print(a)
+ print(type(a))
+ a_back = b2f(a)
+ # b = f2b(2.49)
+ # b_back = b2f(b)
+ print(a_back)
+ # print(type(a_back))
+ # print(b_back)
+ # # print('-----------------------------')
+ # print(bin(15))
+ # print(total_bits(15))
\ No newline at end of file
diff --git a/subject2-AdaptiveBatchHE/batch encryption/encryption.py b/subject2-AdaptiveBatchHE/batch encryption/encryption.py
new file mode 100644
index 0000000..b1479cc
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/batch encryption/encryption.py
@@ -0,0 +1,209 @@
+from phe.paillier import PaillierPrivateKey, PaillierPublicKey, generate_paillier_keypair
+import numpy as np
+import warnings
+import multiprocessing
+from joblib import Parallel, delayed
+from numba import njit, prange
+import binary_float_decimal
+import torch
+import copy
+import warnings
+
+N_JOBS = multiprocessing.cpu_count()
+public_key, private_key = generate_paillier_keypair(n_length=2048)
+def encrypt(public_key: PaillierPublicKey, x):
+ return public_key.encrypt(x)
+
+def encrypt_array(public_key: PaillierPublicKey, A):
+ encrypt_A = Parallel(n_jobs=N_JOBS)(delayed(public_key.encrypt)(num) for num in A)
+ return np.array(encrypt_A)
+
+def encrypt_matrix(public_key: PaillierPublicKey, A: np.array):
+ og_shape = A.shape
+ if len(A.shape) == 1:
+ A = np.expand_dims(A, axis=0)
+ A = np.reshape(A, (1, -1))
+ A = np.squeeze(A)
+ encrypt_A = Parallel(n_jobs=N_JOBS)(delayed(public_key.encrypt)(num) for num in A)
+ encrypt_A = np.expand_dims(encrypt_A, axis=0)
+ encrypt_A = np.reshape(encrypt_A, og_shape)
+ return np.array(encrypt_A)
+
+# @njit(parallel=True)
+def add_threshold(input, threshold_dict: dict): # input is clients_weight_after_train : dict
+ for client_idx in range(len(input)):
+ for k in input[client_idx].keys():
+ input[client_idx][k] += threshold_dict[k]
+ return input
+
+def de_threshold(input, threshold: int, num_clients: int): # input :ndarray
+ return input - threshold * num_clients
+
+# @njit(parallel=True)
+def f2b_matrix(input, M=8, K=6, N=30):
+ result = Parallel(n_jobs=N_JOBS)(delayed(binary_float_decimal.f2b)(i, M, K, N) for i in input)
+ return np.array(result)
+
+
+def splicing(B: np.array) -> str:
+ return ''.join(B)
+
+
+def encrypt_matrix_batch(public_key: PaillierPublicKey, A, batch_size=4, M=8, K=6, N=30):
+ og_shape = A.shape
+ if len(A.shape) == 1:
+ A = np.expand_dims(A, axis=0)
+ A = np.reshape(A, (1, -1))
+ A = np.squeeze(A)
+ A_len = len(A)
+ # pad array at the end so tha the array is the size of
+ A = A if (A_len % batch_size) == 0 \
+ else np.pad(A, (0, batch_size - (A_len % batch_size)), 'constant', constant_values=(0, 0))
+ A = f2b_matrix(A, M, K, N)
+ idx_range = int(len(A) / batch_size)
+ batched_nums = []
+
+ new_arr = np.array_split(A, idx_range)
+ for i in range(idx_range):
+ batched_one = splicing(new_arr[i])
+ batched_nums.append(batched_one)
+ batched_nums = np.array(batched_nums)
+ encoded_A = Parallel(n_jobs=N_JOBS)(delayed(int)(num, 2) for num in batched_nums)
+ encrypted_A = Parallel(n_jobs=N_JOBS)(delayed(public_key.encrypt)(num) for num in encoded_A)
+ return encrypted_A, og_shape
+
+
+def decrypt(private_key: PaillierPrivateKey, x):
+ return private_key.decrypt(x)
+
+def restore_shape(decrypt_A, shape, batch_size, M=8, K=6, N=30):
+ batch_bits = (M + N) * batch_size
+ decoded_A = Parallel(n_jobs=N_JOBS)(delayed(binary_float_decimal.decode_fillzero)(num, batch_bits) for num in decrypt_A)
+ num_ele = np.prod(shape)
+ num_ele_w_pad = batch_size * len(decoded_A)
+ un_batched_nums = []
+ for t in range(len(decoded_A)):
+ move = 0
+ for j in range(batch_size):
+ tail = move + M + N
+ un_batched_nums.append(decoded_A[t][move:tail])
+ move = tail
+ un_batched_nums = np.array(un_batched_nums)
+ un_batched_nums_2_str = Parallel(n_jobs=N_JOBS)(
+ delayed(binary_float_decimal.b2f)(i, M, K, N) for i in un_batched_nums)
+ un_batched_nums_2_str = np.array(un_batched_nums_2_str).astype(np.float64)
+ res = np.reshape(un_batched_nums_2_str[0:num_ele], shape)
+ return res
+
+
+def decrypt_matrix_batch(private_key: PaillierPrivateKey, A, og_shape, batch_size=4, M=8, K=6, N=30):
+ decrypt_A = Parallel(n_jobs=N_JOBS)(delayed(private_key.decrypt)(num) for num in A)
+ decrypt_A = np.array(decrypt_A)
+ result = restore_shape(decrypt_A, og_shape, batch_size, M, K, N)
+ return result
+
+
+def batch_encrypt_per_layer(publickey: PaillierPrivateKey, party: dict, batch_size: int, M: dict, K: int, N: dict):
+ result: dict = {}
+ og_shapes: dict = {}
+ for k in party.keys():
+ enc, shape_ = encrypt_matrix_batch(publickey, party[k].cpu().numpy(), batch_size=batch_size, M=M[k],
+ K=K, N=N[k])
+ result[k] = enc
+ og_shapes[k] = shape_
+ return result, og_shapes
+
+
+def batch_decrypt_per_layer(privatekey: PaillierPrivateKey, party: dict, og_shap: dict, batch_size: int, M: dict,
+ K: int, N: dict):
+ result = {}
+ for k in party.keys():
+ result[k] = decrypt_matrix_batch(private_key=privatekey, A=party[k], og_shape=og_shap[k],
+ batch_size=batch_size, M=M[k], K=K, N=N[k])
+ return result
+
+###########################################onlyConvert############################################################
+def convert_matric_batch(A, batch_size=4, M=8, K=6, N=30):
+ A = A.cpu().numpy().astype(np.float64)
+ og_shape = A.shape
+ if len(A.shape) == 1:
+ A = np.expand_dims(A, axis=0)
+ A = np.reshape(A, (1, -1))
+ A = np.squeeze(A)
+ A_len = len(A)
+ # pad array at the end so tha the array is the size of
+ A = A if (A_len % batch_size) == 0 \
+ else np.pad(A, (0, batch_size - (A_len % batch_size)), 'constant', constant_values=(0, 0))
+ idx_range = int(len(A) / batch_size)
+ batched_nums = []
+
+ new_arr = np.array_split(A, idx_range)
+ for i in range(idx_range):
+ batched_one = splicing(new_arr[i])
+ batched_nums.append(batched_one)
+ batched_nums = np.array(batched_nums)
+ return batched_nums, og_shape
+
+
+def batch_convert_per_layer(party: dict, batch_size: int, M: dict, K: int, N: dict):
+ result = {}
+ og_shapes = {}
+ for k in party.keys():
+ enc, shape_ = convert_matric_batch(party[k], batch_size=batch_size, M=M[k],
+ K=K, N=N[k])
+ result[k] = enc
+ og_shapes[k] = shape_
+ return result, og_shapes
+
+
+def restore_shape_convert(A, shape, batch_size, M, K, N):
+ batch_bits = (M + N) * batch_size
+ for i in range(len(A)):
+ A[i] = binary_float_decimal.dsb((str(A[i])))
+ for i in range(len(A)):
+ if len(str(A[i])) < batch_bits:
+ A[i] = '0' * (batch_bits - len(str(A[i]))) + str(A[i])
+ elif len(str(A[i])) == batch_bits:
+ A[i] = str(A[i])
+ else:
+ print("overflow:", type(A), A[i])
+ warnings.warn('Overflow detected, consider using longer M,N')
+ num_ele = np.prod(shape)
+ num_ele_w_pad = batch_size * len(A)
+ un_batched_nums = []
+ for t in range(len(A)):
+ move = 0
+ for j in range(batch_size):
+ tail = move + M + N
+ un_batched_nums.append(A[t][move:tail])
+ move = tail
+ un_batched_nums = np.array(un_batched_nums)
+ un_batched_nums_2_str = Parallel(n_jobs=N_JOBS)(
+ delayed(binary_float_decimal.b2f)(i, M, K, N) for i in un_batched_nums)
+ un_batched_nums_2_str = np.array(un_batched_nums_2_str).astype(np.float64)
+ un_batched_nums_2_str = Parallel(n_jobs=N_JOBS)(
+ delayed(float)(i) for i in un_batched_nums_2_str)
+ res = np.reshape(un_batched_nums_2_str[0:num_ele], shape)
+ return res
+
+
+def de_convert_matrix_batch(A, og_shape, batch_size=4, M=8, K=6, N=30):
+ A = np.array(A)
+ result = restore_shape_convert(A, og_shape, batch_size, M, K, N)
+ return result
+
+
+def batch_de_convert_per_layer(party: dict, og_shape: dict, batch_size: int, M: dict, K: int, N: dict):
+ result = {}
+ for k in party.keys():
+ result[k] = de_convert_matrix_batch(party[k], og_shape[k], batch_size=batch_size, M=M[k], K=K, N=N[k])
+ return result
+
+
+if __name__ == '__main__':
+ array_A = {'weight': np.array([1.2, 2.5, 3.9]), 'bias': np.array([4.5, 5.6, 6.9])}
+ B = {'weight': np.array([1.3, 2.1, 3.0]), 'bias': np.array([4.1, 5.1, 6.0])}
+ encry_A, ogshape = batch_encrypt_per_layer(public_key, array_A, batch_size=2, M=3, K=1, N=5)
+ print(encry_A)
+ decry_A = batch_decrypt_per_layer(private_key, encry_A, ogshape, batch_size=2, M=3, K=1, N=5)
+ print(decry_A)
\ No newline at end of file
diff --git a/subject2-AdaptiveBatchHE/batch encryption/federated_experiment_main.py b/subject2-AdaptiveBatchHE/batch encryption/federated_experiment_main.py
new file mode 100644
index 0000000..25b18c7
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/batch encryption/federated_experiment_main.py
@@ -0,0 +1,474 @@
+import copy
+import time
+import torch
+import torchvision.datasets as datasets
+from torchvision import transforms
+import numpy as np
+from phe import paillier
+import multiprocessing
+from joblib import Parallel, delayed
+from models import Net
+from tqdm import tqdm
+import argparse
+from torch.utils.data import DataLoader, Dataset
+import torch.nn.functional as F
+from torch import nn
+import encryption
+import binary_float_decimal
+N_JOBS = multiprocessing.cpu_count()
+criterion = torch.nn.CrossEntropyLoss()
+publickey, privatekey = paillier.generate_paillier_keypair(n_length=4096)
+class DatasetSplit(Dataset):
+ def __init__(self, dataset, idxs):
+ self.dataset = dataset
+ self.idxs = list(idxs)
+
+ def __len__(self):
+ return len(self.idxs)
+
+ def __getitem__(self, item):
+ image, label = self.dataset[self.idxs[item]]
+ return image, label
+
+class Client():
+ def __init__(self, args, dataset=None, idxs=None, w=None):
+ self.args = args
+ self.criterion = nn.CrossEntropyLoss()
+ self.ldr_train = DataLoader(DatasetSplit(dataset, idxs), batch_size=self.args.local_bs, shuffle=True)
+ self.model = Net().to(self.args.device)
+ self.model.load_state_dict(w)
+ # Paillier initialization
+ if self.args.experiment == 'paillier' or self.args.experiment == 'batch':
+ self.pub_key = publickey
+ self.priv_key = privatekey
+
+ def train(self):
+ weight_old = copy.deepcopy(self.model.state_dict())
+ net = copy.deepcopy(self.model)
+ # train and update
+ net.train()
+ local_epoch_loss = []
+ optimizer = torch.optim.SGD(net.parameters(), lr=self.args.lr, momentum=0.5)
+ for iter in range(self.args.local_ep):
+ batch_loss = []
+ for batch_idx, (images, labels) in enumerate(self.ldr_train):
+ images, labels = images.to(self.args.device), labels.to(self.args.device)
+ net.zero_grad()
+ log_prob = net(images)
+ loss = self.criterion(log_prob, labels)
+ loss.backward()
+ optimizer.step()
+ batch_loss.append(loss.item())
+ local_epoch_loss.append(sum(batch_loss) / len(batch_loss))
+ weight_new = net.state_dict()
+ update_w = {}
+ if self.args.experiment == 'plain' or self.args.experiment == 'batch' or self.args.experiment == 'onlyConvert':
+ for k in weight_new.keys():
+ update_w[k] = weight_new[k] - weight_old[k]
+ elif self.args.experiment == 'paillier':
+ print('encrypting...')
+ enc_start = time.time()
+ for k in weight_new.keys():
+ update_w[k] = weight_new[k] - weight_old[k]
+ # flatten weight
+ list_w = update_w[k].view(-1).cpu().tolist()
+ encry_list_w = Parallel(n_jobs=N_JOBS)(delayed(self.pub_key.encrypt)(num) for num in list_w)
+ update_w[k] = encry_list_w
+ enc_end = time.time()
+ print('Encryption time:', enc_end - enc_start)
+ else:
+ raise NotImplementedError
+ return update_w, sum(local_epoch_loss) / len(local_epoch_loss)
+
+ def update(self, weight_glob):
+ if self.args.experiment == 'plain':
+ self.model.load_state_dict(weight_glob)
+ elif self.args.experiment == 'paillier':
+ # for paillier, w_glob is update_w_avg here
+ update_w_avg = copy.deepcopy(weight_glob)
+ print('decrypting...')
+ dec_start = time.time()
+ for k in update_w_avg.keys():
+ # decryption
+ update_w_avg[k] = Parallel(n_jobs=N_JOBS)(delayed(self.priv_key.decrypt)(num) for num in update_w_avg[k])
+ # reshape to original and update
+ origin_shape = list(self.model.state_dict()[k].size())
+ update_w_avg[k] = torch.FloatTensor(update_w_avg[k]).to(self.args.device).view(*origin_shape)
+ self.model.state_dict()[k] += update_w_avg[k]
+ dec_end = time.time()
+ print('Decryption time:', dec_end - dec_start)
+ elif self.args.experiment == 'batch':
+ update_w_avg = copy.deepcopy(self.model.state_dict())
+ for k in update_w_avg.keys():
+ update_w_avg[k] = torch.FloatTensor(weight_glob[k]).to(self.args.device)
+ self.model.state_dict()[k] += update_w_avg[k]
+ elif self.args.experiment == 'onlyConvert':
+ update_w_avg = copy.deepcopy(self.model.state_dict())
+ for k in update_w_avg.keys():
+ update_w_avg[k] = torch.FloatTensor(weight_glob[k]).to(self.args.device)
+ self.model.state_dict()[k] += update_w_avg[k]
+ else:
+ raise NotImplementedError
+
+
+class Server():
+ def __init__(self, args, w):
+ self.args = args
+ self.clients_update_w = []
+ self.clients_loss = []
+ self.model = Net().to(self.args.device)
+ self.model.load_state_dict(w)
+
+ def FedAvg(self):
+ if self.args.experiment == 'plain':
+ update_w_avg = copy.deepcopy(self.clients_update_w[0])
+ for k in update_w_avg.keys():
+ for i in range(1, len(self.clients_update_w)):
+ update_w_avg[k] += self.clients_update_w[i][k] # update server's weight
+ update_w_avg[k] = torch.div(update_w_avg[k], len(self.clients_update_w))
+ self.model.state_dict()[k] += update_w_avg[k]
+ return copy.deepcopy(self.model.state_dict()), sum(self.clients_loss) / len(self.clients_loss)
+ elif self.args.experiment == 'paillier':
+ update_w_avg = copy.deepcopy(self.clients_update_w[0])
+ for k in update_w_avg.keys():
+ client_num = len(self.clients_update_w)
+ for i in range(1, client_num): # client-wise sum
+ for j in range(len(update_w_avg[k])): # element-wise sum
+ update_w_avg[k][j] += self.clients_update_w[i][k][j]
+ for j in range(len(update_w_avg[k])): # element-wise avg
+ update_w_avg[k][j] /= client_num
+ return update_w_avg, sum(self.clients_loss) / len(self.clients_loss)
+ elif self.args.experiment == 'batch':
+ update_w_avg: dict = copy.deepcopy(self.clients_update_w[0])
+ for k in update_w_avg.keys():
+ client_num = len(self.clients_update_w)
+ for i in range(1, client_num):
+ for j in range(len(update_w_avg[k])):
+ update_w_avg[k][j] += self.clients_update_w[i][k][j]
+ return update_w_avg, sum(self.clients_loss) / len(self.clients_loss)
+ elif self.args.experiment == 'onlyConvert':
+ update_w_avg: dict = copy.deepcopy(self.clients_update_w[0])
+ for k in update_w_avg.keys():
+ client_num = len(self.clients_update_w)
+ for i in range(1, client_num):
+ for j in range(len(update_w_avg[k])):
+ update_w_avg[k][j] = str(int(update_w_avg[k][j]) + int(self.clients_update_w[i][k][j]))
+ return update_w_avg, sum(self.clients_loss) / len(self.clients_loss)
+ else:
+ raise NotImplementedError
+
+ def test(self, datatest):
+ self.model.eval()
+ # testing
+ test_loss = 0
+ correct = 0
+ data_loader = DataLoader(datatest, batch_size=self.args.bs)
+ for idx, (data, target) in enumerate(data_loader):
+ if self.args.gpu != -1:
+ data, target = data.cuda(), target.cuda()
+ log_probs = self.model(data)
+
+ # sum up batch loss
+ test_loss += F.cross_entropy(log_probs, target, reduction='sum').item()
+
+ # get the index of the max log-probability
+ y_pred = log_probs.data.max(1, keepdim=True)[1]
+ correct += y_pred.eq(target.data.view_as(y_pred)).long().cpu().sum()
+
+ test_loss /= len(data_loader.dataset)
+ accuracy = 100.00 * correct / len(data_loader.dataset)
+ return accuracy, test_loss
+
+
+def load_dataset():
+ data_dir = '/home/hjh/hepaper/data/cifar' # data_dir
+ apply_transform = transforms.Compose(
+ [transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
+
+ dataset_train = datasets.CIFAR10(data_dir, train=True, download=True,
+ transform=apply_transform)
+
+ dataset_test = datasets.CIFAR10(data_dir, train=False, download=True,
+ transform=apply_transform)
+ return dataset_train, dataset_test
+
+
+def create_client_server():
+ num_items = int(len(dataset_train) / args.num_clients)
+ clients, all_idxs = [], [i for i in range(len(dataset_train))]
+ net_glob = Net().to(args.device)
+
+ # divide training data, i.i.d.
+ # init models with same parameters
+ for i in range(args.num_clients):
+ new_idxs = set(np.random.choice(all_idxs, num_items, replace=False))
+ all_idxs = list(set(all_idxs) - new_idxs)
+ new_client = Client(args=args, dataset=dataset_train, idxs=new_idxs, w=copy.deepcopy(net_glob.state_dict()))
+ clients.append(new_client)
+
+ server = Server(args=args, w=copy.deepcopy(net_glob.state_dict()))
+
+ return clients, server
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ # global setting
+ parser.add_argument('--experiment', type=str, default='paillier',
+ choices=['plain', 'paillier', 'batch', 'onlyConvert']) # choose experiment mode
+ parser.add_argument('--num_clients', type=int, default=10) # choose the number of clients
+ parser.add_argument('--num_epochs', type=int, default=1) # choose the global epoch
+ parser.add_argument('--batch_size', type=int, default=10) # choose the encryption batch size
+ parser.add_argument('--dataset', type=str, default='cifar10', help="name of dataset") # choose the dataset
+ parser.add_argument('--gpu', type=int, default=0, help="GPU ID, -1 for CPU") # choose gpu
+ # local setting
+ parser.add_argument('--lr', type=float, default=0.015, help='learning rate')
+ parser.add_argument('--local_ep', type=int, default=10, help="the number of local epochs: E")
+ parser.add_argument('--local_bs', type=int, default=64, help="local batch size: B")
+ parser.add_argument('--bs', type=int, default=64, help="test batch size")
+ args = parser.parse_args()
+ args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
+ print(args)
+
+ print('----------------------------initialization-------------------------------')
+ print('load dataset for {} client'.format(args.num_clients))
+ dataset_train, dataset_test = load_dataset()
+ print("clients and server initialization...")
+ clients, server = create_client_server()
+ num_epochs = args.num_epochs
+ ## build clients -->num_clients
+ num_clients = args.num_clients
+ # statistics for plot
+ all_acc_train = []
+ all_acc_test = []
+ all_loss_glob = []
+ print('start training...')
+ print('Algorithm:', args.experiment)
+ if args.experiment == 'plain':
+ test_epochs = 2
+ for epoch in tqdm(range(num_epochs)): ##global epoch
+ # print(f'\n | Global Training Round : {epoch + 1} |\n')
+ epoch_start = time.time()
+ server.clients_update_w, server.clients_loss = [], []
+ for idx in range(num_clients):
+ update_w, loss = clients[idx].train()
+ server.clients_update_w.append(update_w)
+ server.clients_loss.append(loss)
+
+ w_glob, loss_glob = server.FedAvg()
+ # update local weights
+ for idx in range(args.num_clients):
+ clients[idx].update(w_glob)
+ epoch_end = time.time()
+ print('=====Global Epoch {:3d}====='.format(epoch + 1))
+ print('Training time:', epoch_end - epoch_start)
+ # testing
+ acc_train, loss_train = server.test(dataset_train)
+ acc_test, loss_test = server.test(dataset_test)
+ print("Training accuracy: {:.2f}".format(acc_train))
+ print("Testing accuracy: {:.2f}".format(acc_test))
+ print('Training average loss {:.3f}'.format(loss_glob))
+ all_acc_train.append(float(acc_train))
+ all_acc_test.append(float(acc_test))
+ all_loss_glob.append(float(loss_glob))
+ print('{}epochs training accuracy:{}'.format(num_epochs, all_acc_train))
+ print('{}epochs testing accuracy:{}'.format(num_epochs, all_acc_test))
+
+ elif args.experiment == 'paillier':
+ for epoch in tqdm(range(num_epochs)): ##global epoch
+ # print(f'\n | Global Training Round : {epoch + 1} |\n')
+ epoch_start = time.time()
+ server.clients_update_w, server.clients_loss = [], []
+ for idx in range(num_clients):
+ update_w, loss = clients[idx].train()
+ server.clients_update_w.append(update_w)
+ server.clients_loss.append(loss)
+ w_glob, loss_glob = server.FedAvg()
+ # update local weights
+ for idx in range(args.num_clients):
+ clients[idx].update(w_glob)
+ epoch_end = time.time()
+ print('=====Global Epoch {:3d}====='.format(epoch + 1))
+ print('Training time:', epoch_end - epoch_start)
+ # testing
+ server.model.load_state_dict(copy.deepcopy(clients[0].model.state_dict()))
+ acc_train, loss_train = server.test(dataset_train)
+ acc_test, loss_test = server.test(dataset_test)
+ print("Training accuracy: {:.2f}".format(acc_train))
+ print("Testing accuracy: {:.2f}".format(acc_test))
+ print('Training average loss {:.3f}'.format(loss_glob))
+ all_acc_train.append(float(acc_train))
+ all_acc_test.append(float(acc_test))
+ all_loss_glob.append(float(loss_glob))
+ print('{}epochs training accuracy:{}'.format(num_epochs, all_acc_train))
+ print('{}epochs testing accuracy:{}'.format(num_epochs, all_acc_test))
+ elif args.experiment == 'batch':
+ test_epochs = 10
+ for epoch in tqdm(range(num_epochs)): ##global epoch
+ epoch_start = time.time()
+ theta = 2.5
+ server.clients_update_w, server.clients_loss = [], []
+ clients_weight_after_train = []
+ for idx in range(num_clients):
+ update_w, loss = clients[idx].train()
+ clients_weight_after_train.append(update_w)
+ server.clients_loss.append(loss)
+ clients_layer_max = []
+ for client_idx in range(len(clients_weight_after_train)):
+ temp_max = {}
+ for k in clients_weight_after_train[client_idx].keys():
+ temp_max[k] = torch.max(clients_weight_after_train[client_idx][k])
+ clients_layer_max.append(temp_max)
+ clipping_thresholds = {}
+ for k in clients_layer_max[0].keys():
+ temp1 = []
+ for idx_client in range(len(clients_layer_max)):
+ temp1.append(clients_layer_max[idx_client][k])
+ clipping_thresholds[k] = max(temp1)
+ print('clipping_thresholds:', clipping_thresholds)
+ # clipping with threshold
+ for client_idx in range(len(clients_weight_after_train)):
+ for k in clients_weight_after_train[client_idx].keys():
+ clients_weight_after_train[client_idx][k] = torch.clamp(clients_weight_after_train[client_idx][k],
+ -1 * clipping_thresholds[k],
+ clipping_thresholds[k])
+ # adding threshold
+ for client_idx in range(len(clients_weight_after_train)):
+ for k in clients_weight_after_train[client_idx].keys():
+ clients_weight_after_train[client_idx][k] += clipping_thresholds[k]
+ integerPart = {}
+ floatPart = {}
+ M_main = {}
+ K_main = 4
+ J = 9999
+ N_main = {}
+ for k in clipping_thresholds.keys():
+ integerPart[k] = int(clipping_thresholds[k] * 2)
+ M_main[k] = binary_float_decimal.total_bits(integerPart[k] * num_clients)
+ N_main[k] = binary_float_decimal.total_bits(J * num_clients)
+ print('three parameters :', M_main, K_main, N_main)
+ enc_grads_batch_clients = []
+ og_shape_batch_clients = []
+ for item in clients_weight_after_train:
+ enc_grads_temp, og_shape_temp = encryption.batch_encrypt_per_layer(publickey=publickey, party=item,
+ batch_size=args.batch_size, M=M_main,
+ K=K_main, N=N_main)
+ enc_grads_batch_clients.append(enc_grads_temp)
+ og_shape_batch_clients.append(og_shape_temp)
+ server.clients_update_w = enc_grads_batch_clients
+ w_glob, loss_glob = server.FedAvg()
+ wg_de_batch: dict = encryption.batch_decrypt_per_layer(privatekey=privatekey, party=w_glob,
+ og_shap=og_shape_batch_clients[0],
+ batch_size=args.batch_size, M=M_main, K=K_main,
+ N=N_main)
+ for k in wg_de_batch.keys():
+ wg_de_batch[k] = 1 / num_clients * (wg_de_batch[k] - clipping_thresholds[k].cpu().numpy() * num_clients)
+ for idx in range(args.num_clients):
+ clients[idx].update(wg_de_batch)
+ epoch_end = time.time()
+ print('=====Global Epoch {:3d}====='.format(epoch + 1))
+ print('Training time:', epoch_end - epoch_start)
+ # testing
+ server.model.load_state_dict(copy.deepcopy(clients[0].model.state_dict()))
+ acc_train, loss_train = server.test(dataset_train)
+ acc_test, loss_test = server.test(dataset_test)
+ print("Training accuracy: {:.2f}".format(acc_train))
+ print("Testing accuracy: {:.2f}".format(acc_test))
+ print('Training average loss {:.3f}'.format(loss_glob))
+ all_acc_train.append(float(acc_train))
+ all_acc_test.append(float(acc_test))
+ all_loss_glob.append(float(loss_glob))
+ print('{}epochs training accuracy:{}'.format(num_epochs, all_acc_train))
+ print('{}epochs testing accuracy:{}'.format(num_epochs, all_acc_test))
+ elif args.experiment == 'onlyConvert':
+ test_epochs = 10
+ for epoch in tqdm(range(num_epochs)): ##global epoch
+ # print(f'\n | Global Training Round : {epoch + 1} |\n')
+ epoch_start = time.time()
+ theta = 2.5
+ server.clients_update_w, server.clients_loss = [], []
+ clients_weight_after_train = []
+ for idx in range(num_clients):
+ update_w, loss = clients[idx].train()
+ clients_weight_after_train.append(update_w)
+ server.clients_loss.append(loss)
+ clients_layer_max = []
+ for client_idx in range(len(clients_weight_after_train)):
+ temp_max = {}
+ for k in clients_weight_after_train[client_idx].keys():
+ temp_max[k] = torch.max(clients_weight_after_train[client_idx][k])
+ clients_layer_max.append(temp_max)
+ clipping_thresholds_max = {}
+ for k in clients_layer_max[0].keys():
+ temp1 = []
+ for idx_client in range(len(clients_layer_max)):
+ temp1.append(clients_layer_max[idx_client][k])
+ clipping_thresholds_max[k] = max(temp1)
+ # print('clipping_thresholds_max:', clipping_thresholds_max)
+ clients_layer_min = []
+ for client_idx in range(len(clients_weight_after_train)):
+ temp_min = {}
+ for k in clients_weight_after_train[client_idx].keys():
+ temp_min[k] = torch.min(clients_weight_after_train[client_idx][k])
+ clients_layer_min.append(temp_min)
+ clipping_thresholds_min = {}
+ for k in clients_layer_min[0].keys():
+ temp1 = []
+ for idx_client in range(len(clients_layer_min)):
+ temp1.append(clients_layer_min[idx_client][k])
+ clipping_thresholds_min[k] = min(temp1)
+ # print('clipping_thresholds_min:', clipping_thresholds_min)
+ clipping_thresholds = {}
+ for k in clipping_thresholds_max.keys():
+ clipping_thresholds[k] = max(clipping_thresholds_max[k],abs(clipping_thresholds_min[k]))
+ print('clipping_thresholds:', clipping_thresholds)
+ for client_idx in range(len(clients_weight_after_train)):
+ for k in clients_weight_after_train[client_idx].keys():
+ clients_weight_after_train[client_idx][k] += clipping_thresholds[k]
+ integerPart = {}
+ floatPart = {}
+ M_main = {}
+ K_main = 6
+ J = 999999
+ N_main = {}
+ for k in clipping_thresholds.keys():
+ integerPart[k] = int(clipping_thresholds[k])
+ floatPart[k] = clipping_thresholds[k] - integerPart[k]
+ M_main[k] = binary_float_decimal.total_bits(2 * integerPart[k] * num_clients) + 3
+ N_main[k] = binary_float_decimal.total_bits(2 * J * num_clients)
+ print('three parameters :', M_main, K_main, N_main)
+ # converting and batch_size
+ grads_batch_clients = []
+ shape_batch_clients = []
+ for item in clients_weight_after_train:
+ grad_temp, shape_temp = encryption.batch_convert_per_layer(party=item, batch_size=args.batch_size,
+ M=M_main, K=K_main, N=N_main)
+ grads_batch_clients.append(grad_temp)
+ shape_batch_clients.append(shape_temp)
+ server.clients_update_w = grads_batch_clients
+ w_glob, loss_glob = server.FedAvg()
+ wg_de_convert: dict = encryption.batch_de_convert_per_layer(party=w_glob, og_shape=shape_batch_clients[0],
+ batch_size=args.batch_size, M=M_main, K=K_main,
+ N=N_main)
+ for k in wg_de_convert.keys():
+ wg_de_convert[k] = 1 / num_clients * (
+ wg_de_convert[k] - clipping_thresholds[k].cpu().numpy() * num_clients)
+ for idx in range(num_clients):
+ clients[idx].update(wg_de_convert)
+ epoch_end = time.time()
+ print('=====Global Epoch {:3d}====='.format(epoch + 1))
+ print('Training time:', epoch_end - epoch_start)
+ # testing
+ server.model.load_state_dict(copy.deepcopy(clients[0].model.state_dict()))
+ acc_train, loss_train = server.test(dataset_train)
+ acc_test, loss_test = server.test(dataset_test)
+ print("Training accuracy: {:.2f}".format(acc_train))
+ print("Testing accuracy: {:.2f}".format(acc_test))
+ print('Training average loss {:.3f}'.format(loss_glob))
+ all_acc_train.append(float(acc_train))
+ all_acc_test.append(float(acc_test))
+ all_loss_glob.append(float(loss_glob))
+ print('{}epochs training accuracy:{}'.format(num_epochs, all_acc_train))
+ print('{}epochs testing accuracy::{}'.format(num_epochs, all_acc_test))
+ else:
+ raise NotImplementedError
\ No newline at end of file
diff --git a/subject2-AdaptiveBatchHE/batch encryption/models.py b/subject2-AdaptiveBatchHE/batch encryption/models.py
new file mode 100644
index 0000000..bc70142
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/batch encryption/models.py
@@ -0,0 +1,251 @@
+import torch
+from torch import nn
+import torch.nn.functional as F
+from torchvision import models, transforms
+
+criterion = torch.nn.CrossEntropyLoss()
+
+
+class MLP(nn.Module):
+ def __init__(self, dim_in=784, dim_hidden=64, dim_out=10):
+ super(MLP, self).__init__()
+ self.layer_input = nn.Linear(dim_in, dim_hidden)
+ self.relu = nn.ReLU()
+ self.dropout = nn.Dropout()
+ self.layer_hidden = nn.Linear(dim_hidden, dim_out)
+ self.softmax = nn.Softmax(dim=1)
+
+ def forward(self, x):
+ x = x.view(-1, x.shape[1] * x.shape[-2] * x.shape[-1])
+ x = self.layer_input(x)
+ x = self.dropout(x)
+ x = self.relu(x)
+ x = self.layer_hidden(x)
+ return self.softmax(x)
+
+
+class CNNCifar(nn.Module):
+ def __init__(self, num_classes=10):
+ super(CNNCifar, self).__init__()
+ self.conv1 = nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0)
+ self.pool = nn.MaxPool2d(2, 2)
+ self.conv2 = nn.Conv2d(6, 16, 5)
+ self.fc1 = nn.Linear(16 * 53 * 53, 120)
+ self.fc2 = nn.Linear(120, 84)
+ self.fc3 = nn.Linear(84, num_classes)
+
+ def forward(self, x):
+ x = self.pool(F.relu(self.conv1(x)))
+ x = self.pool(F.relu(self.conv2(x)))
+ x = x.view(-1, 16 * 53 * 53)
+ x = F.relu(self.fc1(x))
+ x = F.relu(self.fc2(x))
+ x = self.fc3(x)
+ return F.log_softmax(x, dim=1)
+
+
+class CNNCifar_test(nn.Module):
+ def __init__(self, num_classes=10):
+ super(CNNCifar_test, self).__init__()
+ self.conv1 = nn.Conv2d(3, 6, 5)
+ self.pool = nn.MaxPool2d(2, 2)
+ self.conv2 = nn.Conv2d(6, 16, 5)
+ self.fc1 = nn.Linear(16 * 5 * 5, 120)
+ self.fc2 = nn.Linear(120, 84)
+ self.fc3 = nn.Linear(84, num_classes)
+
+ def forward(self, x):
+ x = self.pool(F.relu(self.conv1(x)))
+ x = self.pool(F.relu(self.conv2(x)))
+ x = x.view(-1, 16 * 5 * 5)
+ x = F.relu(self.fc1(x))
+ x = F.relu(self.fc2(x))
+ x = self.fc3(x)
+ return F.log_softmax(x, dim=1)
+
+
+class CNNCifar_all(nn.Module):
+ def __init__(self, num_classes=10):
+ super(CNNCifar_all, self).__init__()
+ self.conv1 = nn.Conv2d(3, 6, 5)
+ self.pool = nn.MaxPool2d(2, 2)
+ self.conv2 = nn.Conv2d(6, 16, 5)
+ self.fc1 = nn.Linear(16 * 5 * 5, 120)
+ self.fc2 = nn.Linear(120, 84)
+ self.fc3 = nn.Linear(84, num_classes)
+
+ def forward(self, x):
+ x = self.pool(F.relu(self.conv1(x)))
+ x = self.pool(F.relu(self.conv2(x)))
+ x = x.view(-1, 16 * 5 * 5)
+ x = F.relu(self.fc1(x))
+ x = F.relu(self.fc2(x))
+ x = self.fc3(x)
+ return F.log_softmax(x, dim=1)
+
+
+class CNNMnist(nn.Module):
+ def __init__(self):
+ super(CNNMnist, self).__init__()
+ self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
+ self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
+ self.conv2_drop = nn.Dropout2d()
+ self.fc1 = nn.Linear(320, 50)
+ self.fc2 = nn.Linear(50, 10)
+
+ def forward(self, x):
+ x = F.relu(F.max_pool2d(self.conv1(x), 2))
+ x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
+ x = x.view(-1, x.shape[1] * x.shape[2] * x.shape[3])
+ x = F.relu(self.fc1(x))
+ x = F.dropout(x, training=self.training)
+ x = self.fc2(x)
+ return x
+
+
+class CNNFashion_Mnist(nn.Module):
+ def __init__(self):
+ super(CNNFashion_Mnist, self).__init__()
+ self.layer1 = nn.Sequential(
+ nn.Conv2d(1, 16, kernel_size=5, padding=2),
+ nn.BatchNorm2d(16),
+ nn.ReLU(),
+ nn.MaxPool2d(2))
+ self.layer2 = nn.Sequential(
+ nn.Conv2d(16, 32, kernel_size=5, padding=2),
+ nn.BatchNorm2d(32),
+ nn.ReLU(),
+ nn.MaxPool2d(2))
+ self.fc = nn.Linear(7 * 7 * 32, 10)
+
+ def forward(self, x):
+ out = self.layer1(x)
+ out = self.layer2(out)
+ out = out.view(out.size(0), -1)
+ out = self.fc(out)
+ return out
+
+class CNNnet(nn.Module):
+ def __init__(self):
+ super(CNNnet, self).__init__()
+ self.cnn1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=5, stride=1, padding=2)
+ self.relu1 = nn.ReLU()
+ self.norm1 = nn.BatchNorm2d(32)
+ nn.init.xavier_uniform(self.cnn1.weight)
+ self.maxpool1 = nn.MaxPool2d(kernel_size=2)
+ self.cnn2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=2)
+ self.relu2 = nn.ReLU()
+ self.norm2 = nn.BatchNorm2d(64)
+ nn.init.xavier_uniform(self.cnn2.weight)
+ self.maxpool2 = nn.MaxPool2d(kernel_size=2)
+ self.fc1 = nn.Linear(4096, 4096)
+ self.fcrelu = nn.ReLU()
+ self.fc2 = nn.Linear(4096, 10)
+
+ def forward(self, x):
+ out = self.cnn1(x)
+ out = self.relu1(out)
+ out = self.norm1(out)
+ out = self.maxpool1(out)
+ out = self.cnn2(out)
+ out = self.relu2(out)
+ out = self.norm2(out)
+ out = self.maxpool2(out)
+ out = out.view(out.size(0), -1)
+ out = self.fc1(out)
+ out = self.fcrelu(out)
+ out = self.fc2(out)
+ return out
+
+class Net(nn.Module):
+ def __init__(self):
+ super(Net, self).__init__()
+ self.conv1 = nn.Sequential(
+ nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(),
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
+ )
+ self.conv2 = nn.Sequential(
+ nn.Conv2d(in_channels=6, out_channels=16, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(),
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
+ )
+ self.conv3 = nn.Sequential(
+ nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(),
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
+ )
+ self.conv4 = nn.Sequential(
+ nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(),
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
+ )
+ self.conv5 = nn.Sequential(
+ nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(),
+ nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
+ )
+ self.dense = nn.Sequential(
+ nn.Linear(128, 120),
+ nn.ReLU(),
+ nn.Linear(120, 84),
+ nn.ReLU(),
+ nn.Linear(84, 10)
+ )
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = self.conv2(x)
+ x = self.conv3(x)
+ x = self.conv4(x)
+ x = self.conv5(x)
+ x = x.view(-1, 128)
+ x = self.dense(x)
+ return x
+
+
+class AlexNet(nn.Module):
+ def __init__(self, num_classes=10, init_weights=False):
+ super(AlexNet, self).__init__()
+ self.features = nn.Sequential(
+ nn.Conv2d(3, 48, kernel_size=11, stride=4, padding=2),
+ nn.ReLU(inplace=True),
+ nn.MaxPool2d(kernel_size=3, stride=2),
+ nn.Conv2d(48, 128, kernel_size=5, padding=2),
+ nn.ReLU(inplace=True),
+ nn.MaxPool2d(kernel_size=3, stride=2),
+ nn.Conv2d(128, 192, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(192, 192, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(192, 128, kernel_size=3, padding=1),
+ nn.ReLU(inplace=True),
+ nn.MaxPool2d(kernel_size=3, stride=2),
+ )
+ self.classifier = nn.Sequential(
+ nn.Dropout(p=0.5),
+ nn.Linear(128 * 6 * 6, 2048),
+ nn.ReLU(inplace=True),
+ nn.Dropout(p=0.5),
+ nn.Linear(2048, 2048),
+ nn.ReLU(inplace=True),
+ nn.Linear(2048, num_classes),
+ )
+ if init_weights:
+ self._initialize_weights()
+
+ def forward(self, x):
+ x = self.features(x)
+ x = torch.flatten(x, start_dim=1)
+ x = self.classifier(x)
+ return x
+
+ def _initialize_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
+ if m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.Linear):
+ nn.init.normal_(m.weight, 0, 0.01)
+ nn.init.constant_(m.bias, 0)
\ No newline at end of file
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/federated_main.py b/subject2-AdaptiveBatchHE/cnn sparisty/federated_main.py
new file mode 100644
index 0000000..cc510af
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/federated_main.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Python version: 3.6
+
+
+import copy
+import os
+import pickle
+import time
+from torchvision import models, transforms
+
+import numpy as np
+import torch
+from tensorboardX import SummaryWriter
+from tqdm import tqdm
+from utils import get_dataset, average_weights, exp_details
+from mobilenetv2 import mobilenetv2
+from update import LocalUpdate, test_inference
+from models import CNNMnist, CNNFashion_Mnist, CNNCifar, ModifiedVGG11Model,MobileNet,ModifiedAlexnetModel
+from options import args_parser
+
+if __name__ == '__main__':
+ start_time = time.time()
+
+ # define paths
+ path_project = os.path.abspath('..')
+ logger = SummaryWriter('../logs')
+
+ args = args_parser()
+ exp_details(args)
+
+ # if args.gpu:
+ # torch.cuda.set_device(args.gpu)
+ # device = 'cuda' if args.gpu else 'cpu'
+ device = torch.device('cuda:' + str(args.gpu)
+ if torch.cuda.is_available() else 'cpu')
+ # load dataset and user groups
+ test_dir, user_dir = get_dataset(args)
+
+ # BUILD MODEL
+ if args.model == 'cnn':
+ # Convolutional neural netork
+ if args.dataset == 'mnist':
+ global_model = CNNMnist(args=args)
+ elif args.dataset == 'fmnist':
+ global_model = CNNFashion_Mnist(args=args)
+ elif args.dataset == 'cifar':
+ global_model = CNNCifar(args=args)
+ elif args.dataset == 'office-home':
+ #cifar 32*32
+ # global_model = ModifiedVGG11Model(args=args)
+
+ # global_model = mobilenetv2(width_mult=0.25)
+ # global_model.load_state_dict(torch.load('mobilenetv2_0.25-b61d2159.pth',map_location='cpu'))
+ # global_model.classifier = torch.nn.Linear(in_features=1280, out_features=args.num_classes)
+
+ # global_model=models.resnet18(pretrained=False)cd
+ # global_model.fc = torch.nn.Linear(in_features=512, out_features=args.num_classes)
+ global_model = torch.load('/home/liuby/privacy-model-adapt/adapt_fl/mobilenet/office_home/global_model/local5_ep50_0.25/localep50.pt',map_location='cpu')
+
+ # elif args.model == 'mlp':
+ # # Multi-layer preceptron
+ # img_size = train_dataset[0][0].shape
+ # len_in = 1
+ # for x in img_size:
+ # len_in *= x
+ # global_model = MLP(dim_in=len_in, dim_hidden=64,
+ # dim_out=args.num_classes)
+ else:
+ exit('Error: unrecognized model')
+
+ # Set the model to train and send it to device.
+ global_model.to(device)
+ global_model.train()
+ # print(global_model)
+
+ # copy weights
+ global_weights = global_model.state_dict()
+
+ # Training
+ train_loss, train_accuracy = [], []
+ val_acc_list, net_list = [], []
+ cv_loss, cv_acc = [], []
+ print_every = 1
+ val_loss_pre, counter = 0, 0
+ test_acc, test_loss = [], []
+ best_acc=0
+ best_epoch=0
+ for epoch in tqdm(range(args.epochs)):
+ local_weights, local_losses = [], []
+ print(f'\n | Global Training Round : {epoch+1} |\n')
+
+ global_model.train()
+ # m = max(int(args.frac * args.num_users), 1)
+ # idxs_users = np.random.choice(range(args.num_users), m, replace=False)
+
+ idxs_users=args.num_users
+
+ for idx in range(idxs_users):
+ local_model = LocalUpdate(args=args, test_dir=test_dir,
+ user_dir=user_dir[idx], logger=logger)
+ w, loss = local_model.update_weights(
+ model=copy.deepcopy(global_model), global_round=epoch)
+ # loss=loss.cpu()
+ local_weights.append(copy.deepcopy(w))
+ local_losses.append(copy.deepcopy(loss))
+
+ # update global weights
+ global_weights = average_weights(local_weights)
+ # global_weights=global_weights.cuda()
+ # update global weights
+ global_model.load_state_dict(global_weights)
+
+ loss_avg = sum(local_losses) / len(local_losses)
+ train_loss.append(loss_avg)
+
+ # Calculate avg training accuracy over all users at every epoch
+ list_acc, list_loss = [], []
+ global_model.eval()
+ for idx in range(args.num_users):
+ local_model = LocalUpdate(args=args, test_dir=test_dir,
+ user_dir=user_dir[idx], logger=logger)
+ acc, loss = local_model.inference(model=global_model)
+ # acc, loss =acc.cpu(),loss.cpu()
+ list_acc.append(acc)
+ # list_loss.append(loss)
+ train_accuracy.append(sum(list_acc)/len(list_acc))
+
+ # print global training loss after every 'i' rounds
+ if (epoch+1) % print_every == 0:
+ print(f' \nAvg Training Stats after {epoch+1} global rounds:')
+ # print(f'Training Loss : {np.mean(np.array(train_loss))}')
+ print(f'Training Loss : {loss_avg}')
+ print('Train Accuracy: {:.2f}% \n'.format(100*train_accuracy[-1]))
+
+
+ # Test inference
+ acc, loss = test_inference(args, global_model, test_dir)
+ # acc, loss = acc.cpu(), loss.cpu()
+ test_acc.append(acc)
+ test_loss.append(loss)
+ print('Test Accuracy: {:.2f}% \n'.format(100 * acc))
+ print('test loss is ',loss)
+ if acc>best_acc:
+ best_epoch=epoch
+ best_acc=acc
+ best_model = copy.deepcopy(global_model)
+
+
+ print('the best acc is',best_acc)
+ print('the best epoch is ',best_epoch)
+ # torch.save(best_model,'./mobilenet/office_home/random/real/localep5_ep50_50.pt')
+ # np.savetxt('./mobilenet/office_home/random/real/localep5_ep50_train_loss_50',train_loss)
+ # np.savetxt('./mobilenet/office_home/random/real/localep5_ep50_train_acc_50', train_accuracy)
+ # np.savetxt('./mobilenet/office_home/random/real/localep5_ep50_test_loss_50', test_loss)
+ # np.savetxt('./mobilenet/office_home/random/real/localep5_ep50_test_acc_50', test_acc)
+
+ # torch.save(best_model, './mobilenet/cifar10_32/global_model/local5_ep50/localep50_50.pt')
+ # np.savetxt('./mobilenet/cifar10_32/global_model/local5_ep50/localep50_train_loss_50', train_loss)
+ # np.savetxt('./mobilenet/cifar10_32/global_model/local5_ep50/localep50_train_acc_50', train_accuracy)
+ # np.savetxt('./mobilenet/cifar10_32/global_model/local5_ep50/localep50_test_loss_50', test_loss)
+ # np.savetxt('./mobilenet/cifar10_32/global_model/local5_ep50/mobilenet_cifar_test', test_acc)
+
+ # print(f' \n Results after {args.epochs} global rounds of training:')
+ # print("|---- Avg Train Accuracy: {:.2f}%".format(100*train_accuracy[-1]))
+ # print("|---- Test Accuracy: {:.2f}%".format(100*test_acc))
+
+ # Saving the objects train_loss and train_accuracy:
+ # file_name = '../save/objects/{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}].pkl'.\
+ # format(args.dataset, args.model, args.epochs, args.frac, args.iid,
+ # args.local_ep, args.local_bs)
+
+ # with open(file_name, 'wb') as f:
+ # pickle.dump([train_loss, train_accuracy], f)
+
+ print('\n Total Run Time: {0:0.4f}'.format(time.time()-start_time))
+
+ # PLOTTING (optional)
+ # import matplotlib
+ # import matplotlib.pyplot as plt
+ # matplotlib.use('Agg')
+
+ # Plot Loss curve
+ # plt.figure()
+ # plt.title('Training Loss vs Communication rounds')
+ # plt.plot(range(len(train_loss)), train_loss, color='r')
+ # plt.ylabel('Training loss')
+ # plt.xlabel('Communication Rounds')
+ # plt.savefig('../save/fed_{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}]_loss.png'.
+ # format(args.dataset, args.model, args.epochs, args.frac,
+ # args.iid, args.local_ep, args.local_bs))
+ #
+ # # Plot Average Accuracy vs Communication rounds
+ # plt.figure()
+ # plt.title('Average Accuracy vs Communication rounds')
+ # plt.plot(range(len(train_accuracy)), train_accuracy, color='k')
+ # plt.ylabel('Average Accuracy')
+ # plt.xlabel('Communication Rounds')
+ # plt.savefig('../save/fed_{}_{}_{}_C[{}]_iid[{}]_E[{}]_B[{}]_acc.png'.
+ # format(args.dataset, args.model, args.epochs, args.frac,
+ # args.iid, args.local_ep, args.local_bs))
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/mobilenetv2.py b/subject2-AdaptiveBatchHE/cnn sparisty/mobilenetv2.py
new file mode 100644
index 0000000..fdd8354
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/mobilenetv2.py
@@ -0,0 +1,151 @@
+"""
+Creates a MobileNetV2 Model as defined in:
+Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen. (2018).
+MobileNetV2: Inverted Residuals and Linear Bottlenecks
+arXiv preprint arXiv:1801.04381.
+import from https://github.com/tonylins/pytorch-mobilenet-v2
+"""
+
+import torch.nn as nn
+import math
+
+__all__ = ['mobilenetv2']
+
+
+def _make_divisible(v, divisor, min_value=None):
+ """
+ This function is taken from the original tf repo.
+ It ensures that all layers have a channel number that is divisible by 8
+ It can be seen here:
+ https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
+ :param v:
+ :param divisor:
+ :param min_value:
+ :return:
+ """
+ if min_value is None:
+ min_value = divisor
+ new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+ # Make sure that round down does not go down by more than 10%.
+ if new_v < 0.9 * v:
+ new_v += divisor
+ return new_v
+
+
+def conv_3x3_bn(inp, oup, stride):
+ return nn.Sequential(
+ nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
+ nn.BatchNorm2d(oup),
+ nn.ReLU6(inplace=True)
+ )
+
+
+def conv_1x1_bn(inp, oup):
+ return nn.Sequential(
+ nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
+ nn.BatchNorm2d(oup),
+ nn.ReLU6(inplace=True)
+ )
+
+
+class InvertedResidual(nn.Module):
+ def __init__(self, inp, oup, stride, expand_ratio):
+ super(InvertedResidual, self).__init__()
+ assert stride in [1, 2]
+
+ hidden_dim = round(inp * expand_ratio)
+ self.identity = stride == 1 and inp == oup
+
+ if expand_ratio == 1:
+ self.conv = nn.Sequential(
+ # dw
+ nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
+ nn.BatchNorm2d(hidden_dim),
+ nn.ReLU6(inplace=True),
+ # pw-linear
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
+ nn.BatchNorm2d(oup),
+ )
+ else:
+ self.conv = nn.Sequential(
+ # pw
+ nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
+ nn.BatchNorm2d(hidden_dim),
+ nn.ReLU6(inplace=True),
+ # dw
+ nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
+ nn.BatchNorm2d(hidden_dim),
+ nn.ReLU6(inplace=True),
+ # pw-linear
+ nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
+ nn.BatchNorm2d(oup),
+ )
+
+ def forward(self, x):
+ if self.identity:
+ return x + self.conv(x)
+ else:
+ return self.conv(x)
+
+
+class MobileNetV2(nn.Module):
+ def __init__(self, num_classes=1000, width_mult=1.):
+ super(MobileNetV2, self).__init__()
+ # setting of inverted residual blocks
+ self.cfgs = [
+ # t, c, n, s
+ [1, 16, 1, 1],
+ [6, 24, 2, 2],
+ [6, 32, 3, 2],
+ [6, 64, 4, 2],
+ [6, 96, 3, 1],
+ [6, 160, 3, 2],
+ [6, 320, 1, 1],
+ ]
+
+ # building first layer
+ input_channel = _make_divisible(32 * width_mult, 4 if width_mult == 0.1 else 8)
+ layers = [conv_3x3_bn(3, input_channel, 2)]
+ # building inverted residual blocks
+ block = InvertedResidual
+ for t, c, n, s in self.cfgs:
+ output_channel = _make_divisible(c * width_mult, 4 if width_mult == 0.1 else 8)
+ for i in range(n):
+ layers.append(block(input_channel, output_channel, s if i == 0 else 1, t))
+ input_channel = output_channel
+ self.features = nn.Sequential(*layers)
+ # building last several layers
+ output_channel = _make_divisible(1280 * width_mult, 4 if width_mult == 0.1 else 8) if width_mult > 1.0 else 1280
+ self.conv = conv_1x1_bn(input_channel, output_channel)
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
+ self.classifier = nn.Linear(output_channel, num_classes)
+
+ self._initialize_weights()
+
+ def forward(self, x):
+ x = self.features(x)
+ x = self.conv(x)
+ x = self.avgpool(x)
+ x = x.view(x.size(0), -1)
+ x = self.classifier(x)
+ return x
+
+ def _initialize_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+ m.weight.data.normal_(0, math.sqrt(2. / n))
+ if m.bias is not None:
+ m.bias.data.zero_()
+ elif isinstance(m, nn.BatchNorm2d):
+ m.weight.data.fill_(1)
+ m.bias.data.zero_()
+ elif isinstance(m, nn.Linear):
+ m.weight.data.normal_(0, 0.01)
+ m.bias.data.zero_()
+
+def mobilenetv2(**kwargs):
+ """
+ Constructs a MobileNet V2 model
+ """
+ return MobileNetV2(**kwargs)
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/models.py b/subject2-AdaptiveBatchHE/cnn sparisty/models.py
new file mode 100644
index 0000000..be50a84
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/models.py
@@ -0,0 +1,217 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Python version: 3.6
+
+# from torch import nn
+from torchvision import models, transforms
+import torch.nn as nn
+import torch.nn.functional as F
+
+
+class MLP(nn.Module):
+ def __init__(self, dim_in, dim_hidden, dim_out):
+ super(MLP, self).__init__()
+ self.layer_input = nn.Linear(dim_in, dim_hidden)
+ self.relu = nn.ReLU()
+ self.dropout = nn.Dropout()
+ self.layer_hidden = nn.Linear(dim_hidden, dim_out)
+ self.softmax = nn.Softmax(dim=1)
+
+ def forward(self, x):
+ x = x.view(-1, x.shape[1]*x.shape[-2]*x.shape[-1])
+ x = self.layer_input(x)
+ x = self.dropout(x)
+ x = self.relu(x)
+ x = self.layer_hidden(x)
+ return self.softmax(x)
+
+
+class MobileNet(nn.Module):
+ def __init__(self,args):
+ super(MobileNet, self).__init__()
+
+ def conv_bn(inp, oup, stride):
+ return nn.Sequential(
+ nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
+ nn.BatchNorm2d(oup),
+ nn.ReLU(inplace=True)
+ )
+
+ def conv_dw(inp, oup, stride):
+ return nn.Sequential(
+ nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
+ nn.BatchNorm2d(inp),
+ nn.ReLU(inplace=True),
+
+ nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
+ nn.BatchNorm2d(oup),
+ nn.ReLU(inplace=True),
+ )
+
+ self.model = nn.Sequential(
+ conv_bn(3, 32, 2),
+ conv_dw(32, 64, 1),
+ conv_dw(64, 128, 2),
+ conv_dw(128, 128, 1),
+ conv_dw(128, 256, 2),
+ conv_dw(256, 256, 1),
+ conv_dw(256, 512, 2),
+ conv_dw(512, 512, 1),
+ conv_dw(512, 512, 1),
+ conv_dw(512, 512, 1),
+ conv_dw(512, 512, 1),
+ conv_dw(512, 512, 1),
+ conv_dw(512, 1024, 2),
+ conv_dw(1024, 1024, 1),
+ nn.AvgPool2d(7),
+ )
+ self.fc = nn.Linear(1024,args.num_classes)
+
+ def forward(self, x):
+ x = self.model(x)
+ x = x.view(-1, 1024)
+ x = self.fc(x)
+ return x
+
+class CNNMnist(nn.Module):
+ def __init__(self, args):
+ super(CNNMnist, self).__init__()
+ self.conv1 = nn.Conv2d(args.num_channels, 10, kernel_size=5)
+ self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
+ self.conv2_drop = nn.Dropout2d()
+ self.fc1 = nn.Linear(320, 50)
+ self.fc2 = nn.Linear(50, args.num_classes)
+
+ def forward(self, x):
+ x = F.relu(F.max_pool2d(self.conv1(x), 2))
+ x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
+ x = x.view(-1, x.shape[1]*x.shape[2]*x.shape[3])
+ x = F.relu(self.fc1(x))
+ x = F.dropout(x, training=self.training)
+ x = self.fc2(x)
+ return F.log_softmax(x, dim=1)
+
+
+class CNNFashion_Mnist(nn.Module):
+ def __init__(self, args):
+ super(CNNFashion_Mnist, self).__init__()
+ self.layer1 = nn.Sequential(
+ nn.Conv2d(1, 16, kernel_size=5, padding=2),
+ nn.BatchNorm2d(16),
+ nn.ReLU(),
+ nn.MaxPool2d(2))
+ self.layer2 = nn.Sequential(
+ nn.Conv2d(16, 32, kernel_size=5, padding=2),
+ nn.BatchNorm2d(32),
+ nn.ReLU(),
+ nn.MaxPool2d(2))
+ self.fc = nn.Linear(7*7*32, 10)
+
+ def forward(self, x):
+ out = self.layer1(x)
+ out = self.layer2(out)
+ out = out.view(out.size(0), -1)
+ out = self.fc(out)
+ return out
+
+
+class CNNCifar(nn.Module):
+ def __init__(self, args):
+ super(CNNCifar, self).__init__()
+ self.conv1 = nn.Conv2d(3, 6, 5)
+ self.pool = nn.MaxPool2d(2, 2)
+ self.conv2 = nn.Conv2d(6, 16, 5)
+ self.fc1 = nn.Linear(16 * 5 * 5, 120)
+ self.fc2 = nn.Linear(120, 84)
+ self.fc3 = nn.Linear(84, args.num_classes)
+
+ def forward(self, x):
+ x = self.pool(F.relu(self.conv1(x)))
+ x = self.pool(F.relu(self.conv2(x)))
+ x = x.view(-1, 16 * 5 * 5)
+ x = F.relu(self.fc1(x))
+ x = F.relu(self.fc2(x))
+ x = self.fc3(x)
+ return F.log_softmax(x, dim=1)
+
+class ModifiedVGG11Model(nn.Module):
+ def __init__(self,args):
+ super(ModifiedVGG11Model, self).__init__()
+
+ model = models.vgg11_bn(pretrained=True)
+ self.features = model.features
+
+ for param in self.features.parameters():
+ param.requires_grad = True
+
+ self.avpool = nn.AvgPool2d(1, stride=1)
+ classifier = nn.Sequential(
+ nn.Linear(512, args.num_classes),
+ )
+ self.fc = classifier
+
+ def forward(self, x):
+ x = self.features(x)
+ x = self.avpool(x)
+ x = x.view(x.size(0), -1)
+ x = self.fc(x)
+ return x
+
+
+class ModifiedAlexnetModel(nn.Module):
+ def __init__(self,args):
+ super(ModifiedAlexnetModel, self).__init__()
+
+ model = models.alexnet(pretrained=True)
+ self.features = model.features
+
+ for param in self.features.parameters():
+ param.requires_grad = True
+
+ self.avpool = nn.AvgPool2d(6, stride=1)
+ classifier = nn.Sequential(
+ nn.Linear(256, args.num_classes),
+ )
+ self.fc = classifier
+
+ def forward(self, x):
+ x = self.features(x)
+ x = self.avpool(x)
+ x = x.view(x.size(0), -1)
+ x = self.fc(x)
+ return x
+
+
+class modelC(nn.Module):
+ def __init__(self, input_size, n_classes=10, **kwargs):
+ super(AllConvNet, self).__init__()
+ self.conv1 = nn.Conv2d(input_size, 96, 3, padding=1)
+ self.conv2 = nn.Conv2d(96, 96, 3, padding=1)
+ self.conv3 = nn.Conv2d(96, 96, 3, padding=1, stride=2)
+ self.conv4 = nn.Conv2d(96, 192, 3, padding=1)
+ self.conv5 = nn.Conv2d(192, 192, 3, padding=1)
+ self.conv6 = nn.Conv2d(192, 192, 3, padding=1, stride=2)
+ self.conv7 = nn.Conv2d(192, 192, 3, padding=1)
+ self.conv8 = nn.Conv2d(192, 192, 1)
+
+ self.class_conv = nn.Conv2d(192, n_classes, 1)
+
+
+ def forward(self, x):
+ x_drop = F.dropout(x, .2)
+ conv1_out = F.relu(self.conv1(x_drop))
+ conv2_out = F.relu(self.conv2(conv1_out))
+ conv3_out = F.relu(self.conv3(conv2_out))
+ conv3_out_drop = F.dropout(conv3_out, .5)
+ conv4_out = F.relu(self.conv4(conv3_out_drop))
+ conv5_out = F.relu(self.conv5(conv4_out))
+ conv6_out = F.relu(self.conv6(conv5_out))
+ conv6_out_drop = F.dropout(conv6_out, .5)
+ conv7_out = F.relu(self.conv7(conv6_out_drop))
+ conv8_out = F.relu(self.conv8(conv7_out))
+
+ class_out = F.relu(self.class_conv(conv8_out))
+ pool_out = F.adaptive_avg_pool2d(class_out, 1)
+ pool_out.squeeze_(-1)
+ pool_out.squeeze_(-1)
+ return pool_out
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/options.py b/subject2-AdaptiveBatchHE/cnn sparisty/options.py
new file mode 100644
index 0000000..869b898
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/options.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Python version: 3.6
+
+import argparse
+
+
+def args_parser():
+ parser = argparse.ArgumentParser()
+
+ # federated arguments (Notation for the arguments followed from paper)
+ parser.add_argument('--epochs', type=int, default=50,
+ help="number of rounds of training")
+ parser.add_argument('--num_users', type=int, default=20,
+ help="number of users: K")
+ parser.add_argument('--frac', type=float, default=1,
+ help='the fraction of clients: C')
+ parser.add_argument('--local_ep', type=int, default=10,
+ help="the number of local epochs: E")
+ parser.add_argument('--local_bs', type=int, default=64,
+ help="local batch size: B")
+ parser.add_argument('--lr', type=float, default=0.01,
+ help='learning rate')
+ parser.add_argument('--momentum', type=float, default=0.5,
+ help='SGD momentum (default: 0.5)')
+
+ # model arguments
+ parser.add_argument('--model', type=str, default='mlp', help='model name')
+ parser.add_argument('--kernel_num', type=int, default=9,
+ help='number of each kind of kernel')
+ parser.add_argument('--kernel_sizes', type=str, default='3,4,5',
+ help='comma-separated kernel size to \
+ use for convolution')
+ parser.add_argument('--num_channels', type=int, default=1, help="number \
+ of channels of imgs")
+ parser.add_argument('--norm', type=str, default='batch_norm',
+ help="batch_norm, layer_norm, or None")
+ parser.add_argument('--num_filters', type=int, default=32,
+ help="number of filters for conv nets -- 32 for \
+ mini-imagenet, 64 for omiglot.")
+ parser.add_argument('--max_pool', type=str, default='True',
+ help="Whether use max pooling rather than \
+ strided convolutions")
+
+ # other arguments
+ parser.add_argument('--dataset', type=str, default='mnist', help="name \
+ of dataset")
+ parser.add_argument('--num_classes', type=int, default=10, help="number \
+ of classes")
+ parser.add_argument('--gpu', default=None, help="To use cuda, set \
+ to a specific GPU ID. Default set to use CPU.")
+ parser.add_argument('--optimizer', type=str, default='sgd', help="type \
+ of optimizer")
+ parser.add_argument('--iid', type=int, default=1,
+ help='Default set to IID. Set to 0 for non-IID.')
+ parser.add_argument('--unequal', type=int, default=0,
+ help='whether to use unequal data splits for \
+ non-i.i.d setting (use 0 for equal splits)')
+ parser.add_argument('--stopping_rounds', type=int, default=10,
+ help='rounds of early stopping')
+ parser.add_argument('--verbose', type=int, default=1, help='verbose')
+ parser.add_argument('--seed', type=int, default=1, help='random seed')
+ args = parser.parse_args()
+ return args
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/sampling.py b/subject2-AdaptiveBatchHE/cnn sparisty/sampling.py
new file mode 100644
index 0000000..ee0a5f2
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/sampling.py
@@ -0,0 +1,198 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Python version: 3.6
+
+
+import numpy as np
+from torchvision import datasets, transforms
+
+
+def mnist_iid(dataset, num_users):
+ """
+ Sample I.I.D. client data from MNIST dataset
+ :param dataset:
+ :param num_users:
+ :return: dict of image index
+ """
+ num_items = int(len(dataset)/num_users)
+ dict_users, all_idxs = {}, [i for i in range(len(dataset))]
+ for i in range(num_users):
+ dict_users[i] = set(np.random.choice(all_idxs, num_items,
+ replace=False))
+ all_idxs = list(set(all_idxs) - dict_users[i])
+ return dict_users
+
+
+def mnist_noniid(dataset, num_users):
+ """
+ Sample non-I.I.D client data from MNIST dataset
+ :param dataset:
+ :param num_users:
+ :return:
+ """
+ # 60,000 training imgs --> 200 imgs/shard X 300 shards
+ num_shards, num_imgs = 200, 300
+ idx_shard = [i for i in range(num_shards)]
+ dict_users = {i: np.array([]) for i in range(num_users)}
+ idxs = np.arange(num_shards*num_imgs)
+ labels = dataset.train_labels.numpy()
+
+ # sort labels
+ idxs_labels = np.vstack((idxs, labels))
+ idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
+ idxs = idxs_labels[0, :]
+
+ # divide and assign 2 shards/client
+ for i in range(num_users):
+ rand_set = set(np.random.choice(idx_shard, 2, replace=False))
+ idx_shard = list(set(idx_shard) - rand_set)
+ for rand in rand_set:
+ dict_users[i] = np.concatenate(
+ (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)
+ return dict_users
+
+
+def mnist_noniid_unequal(dataset, num_users):
+ """
+ Sample non-I.I.D client data from MNIST dataset s.t clients
+ have unequal amount of data
+ :param dataset:
+ :param num_users:
+ :returns a dict of clients with each clients assigned certain
+ number of training imgs
+ """
+ # 60,000 training imgs --> 50 imgs/shard X 1200 shards
+ num_shards, num_imgs = 1200, 50
+ idx_shard = [i for i in range(num_shards)]
+ dict_users = {i: np.array([]) for i in range(num_users)}
+ idxs = np.arange(num_shards*num_imgs)
+ labels = dataset.train_labels.numpy()
+
+ # sort labels
+ idxs_labels = np.vstack((idxs, labels))
+ idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
+ idxs = idxs_labels[0, :]
+
+ # Minimum and maximum shards assigned per client:
+ min_shard = 1
+ max_shard = 30
+
+ # Divide the shards into random chunks for every client
+ # s.t the sum of these chunks = num_shards
+ random_shard_size = np.random.randint(min_shard, max_shard+1,
+ size=num_users)
+ random_shard_size = np.around(random_shard_size /
+ sum(random_shard_size) * num_shards)
+ random_shard_size = random_shard_size.astype(int)
+
+ # Assign the shards randomly to each client
+ if sum(random_shard_size) > num_shards:
+
+ for i in range(num_users):
+ # First assign each client 1 shard to ensure every client has
+ # atleast one shard of data
+ rand_set = set(np.random.choice(idx_shard, 1, replace=False))
+ idx_shard = list(set(idx_shard) - rand_set)
+ for rand in rand_set:
+ dict_users[i] = np.concatenate(
+ (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]),
+ axis=0)
+
+ random_shard_size = random_shard_size-1
+
+ # Next, randomly assign the remaining shards
+ for i in range(num_users):
+ if len(idx_shard) == 0:
+ continue
+ shard_size = random_shard_size[i]
+ if shard_size > len(idx_shard):
+ shard_size = len(idx_shard)
+ rand_set = set(np.random.choice(idx_shard, shard_size,
+ replace=False))
+ idx_shard = list(set(idx_shard) - rand_set)
+ for rand in rand_set:
+ dict_users[i] = np.concatenate(
+ (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]),
+ axis=0)
+ else:
+
+ for i in range(num_users):
+ shard_size = random_shard_size[i]
+ rand_set = set(np.random.choice(idx_shard, shard_size,
+ replace=False))
+ idx_shard = list(set(idx_shard) - rand_set)
+ for rand in rand_set:
+ dict_users[i] = np.concatenate(
+ (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]),
+ axis=0)
+
+ if len(idx_shard) > 0:
+ # Add the leftover shards to the client with minimum images:
+ shard_size = len(idx_shard)
+ # Add the remaining shard to the client with lowest data
+ k = min(dict_users, key=lambda x: len(dict_users.get(x)))
+ rand_set = set(np.random.choice(idx_shard, shard_size,
+ replace=False))
+ idx_shard = list(set(idx_shard) - rand_set)
+ for rand in rand_set:
+ dict_users[k] = np.concatenate(
+ (dict_users[k], idxs[rand*num_imgs:(rand+1)*num_imgs]),
+ axis=0)
+
+ return dict_users
+
+
+def cifar_iid(dataset, num_users):
+ """
+ Sample I.I.D. client data from CIFAR10 dataset
+ :param dataset:
+ :param num_users:
+ :return: dict of image index
+ """
+ num_items = int(len(dataset)/num_users)
+ dict_users, all_idxs = {}, [i for i in range(len(dataset))]
+ for i in range(num_users):
+ dict_users[i] = set(np.random.choice(all_idxs, num_items,
+ replace=False))
+ all_idxs = list(set(all_idxs) - dict_users[i])
+ return dict_users
+
+
+def cifar_noniid(dataset, num_users):
+ """
+ Sample non-I.I.D client data from CIFAR10 dataset
+ :param dataset:
+ :param num_users:
+ :return:
+ """
+ num_shards, num_imgs = 200, 250
+ idx_shard = [i for i in range(num_shards)]
+ dict_users = {i: np.array([]) for i in range(num_users)}
+ idxs = np.arange(num_shards*num_imgs)
+ # labels = dataset.train_labels.numpy()
+ labels = np.array(dataset.train_labels)
+
+ # sort labels
+ idxs_labels = np.vstack((idxs, labels))
+ idxs_labels = idxs_labels[:, idxs_labels[1, :].argsort()]
+ idxs = idxs_labels[0, :]
+
+ # divide and assign
+ for i in range(num_users):
+ rand_set = set(np.random.choice(idx_shard, 2, replace=False))
+ idx_shard = list(set(idx_shard) - rand_set)
+ for rand in rand_set:
+ dict_users[i] = np.concatenate(
+ (dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)
+ return dict_users
+
+
+if __name__ == '__main__':
+ dataset_train = datasets.MNIST('./data/mnist/', train=True, download=True,
+ transform=transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.1307,),
+ (0.3081,))
+ ]))
+ num = 100
+ d = mnist_noniid(dataset_train, num)
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/sparisty_similarity.py b/subject2-AdaptiveBatchHE/cnn sparisty/sparisty_similarity.py
new file mode 100644
index 0000000..0c15f27
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/sparisty_similarity.py
@@ -0,0 +1,138 @@
+from __future__ import print_function
+import torch
+import math
+import random
+# import seaborn as sns
+import heapq
+import numpy as np
+# from matplotlib import pyplot as plt
+import scipy.stats
+from scipy.stats import norm, stats
+from torchvision import models
+import collections
+
+'''异常检测主运行程序'''
+
+
+def anomalyDetection_example():
+
+ '''加载并显示数据'''
+
+ sim_list=[]
+ whole_list=[]
+ # total_list=np.zeros((1,8))
+ path_dir = []
+ test_path = 'result_sparsity_8client/client'
+ # test_path = 'result_mobilenet/client'
+
+ for i in range(8):
+ tardir = test_path + str(i) + '_layer27_ratio_office'
+ path_dir.append(tardir)
+
+ # print(path_dir)
+ # exit()
+
+ for index in range(8):
+ target = np.loadtxt('result_sparsity_8client/client0_layer27_ratio_office', dtype=np.float32)
+ # target = np.loadtxt('result_mobilenet/client0_layer2_ratio_mobilenet', dtype=np.float32)
+ test = np.loadtxt(path_dir[index], dtype=np.float32)
+ channel_num = 20
+ # print(target.shape)
+ # print(test1.shape)
+ sample_mean = np.mean(target, axis=0)
+ sample_mean = sample_mean.tolist()
+ max_num_index_mean = list(map(sample_mean.index, heapq.nlargest(channel_num, sample_mean)))
+ # sample_var = np.var(target, axis=0)
+ # sample_var = sample_var.tolist()
+ # min_num_index_var = list(map(sample_var.index, heapq.nsmallest(channel_num, sample_var)))
+ random.seed(102)
+ index_list_bn = random.sample(range(0, 512), channel_num)
+ # index_list_bn = max_num_index_mean
+ target = target[:, index_list_bn]
+ test = test[:, index_list_bn]
+
+ target_avg=np.mean(target,axis=0)
+ test_avg = np.mean(test, axis=0)
+ # print(target_avg.shape)
+ # print(target_avg)
+ # print(test_avg)
+
+ #Euclidean Distance
+ dist = np.sqrt(np.sum(np.square(target_avg - test_avg)))
+ sim_list.append(dist)
+ # sim=cos_sim(target_avg,test_avg)
+ # print(dist,sim)
+ whole_list.append(sim_list)
+ temp = np.array(whole_list)
+ f=np.loadtxt('../plot/neuron_location/20neuron')
+ # f=f[np.newaxis,:]
+ print(temp.shape)
+ print(f.shape)
+ f=np.concatenate((f, temp), axis=0)
+ # f=np.delete(f, 0, 1)
+
+ np.savetxt('../plot/neuron_location/20neuron',f)
+
+
+
+
+
+def cos_sim(vector_a, vector_b):
+ """
+ 计算两个向量之间的余弦相似度
+ :param vector_a: 向量 a
+ :param vector_b: 向量 b
+ :return: sim
+ """
+ vector_a = np.mat(vector_a)
+ vector_b = np.mat(vector_b)
+ num = float(vector_a * vector_b.T)
+ denom = np.linalg.norm(vector_a) * np.linalg.norm(vector_b)
+ cos = num / denom
+ sim = 0.5 + 0.5 * cos
+ return sim
+
+def calculate_gaussian_kl_divergence(m1,m2,v1,v2):
+ ###m1,m2 指两个高斯分布的均值
+ ###v1,v2指两个高斯分布的方差
+ return np.log(v2 / v1) + (v1*v1+(m1-m2)*(m1-m2))/(2*v2*v2) - 0.5
+
+
+def compute_cov(mu,X):
+ dim=X[0].size
+ # print(dim)
+ mu = np.mean(X, axis=0)
+ for i in range(X[:,0].size):
+ X[i]=X[i]-mu
+ sigma2=(1/dim)*np.dot(X.T,X)
+ return sigma2
+
+def estimateGaussian(X):
+ m, n = X.shape
+ mu = np.zeros((n, 1))
+ sigma2 = np.zeros((n, 1))
+ mu = np.mean(X, axis=0) # axis=0表示列,每列的均值
+ var=np.var(X,axis=0)
+ # sigma2 = np.cov(X.T) # 求每列的协方差
+ return mu, var
+
+def sigmoid(x):
+ s = 1 / (1 + np.exp(-x))
+ return s
+
+def tanh(x):
+ s1 = np.exp(x) - np.exp(-x)
+ s2 = np.exp(x) + np.exp(-x)
+ s = s1 / s2
+ return s
+
+# 高斯分布函数
+
+def norm_gaussian(dataset, mu, sigma):
+ # p = norm(mu, sigma)
+ # return p.pdf(dataset)
+ return norm(mu, sigma).pdf(dataset)
+
+
+if __name__ == '__main__':
+ anomalyDetection_example()
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/sparsity_extract_mobilenet.py b/subject2-AdaptiveBatchHE/cnn sparisty/sparsity_extract_mobilenet.py
new file mode 100644
index 0000000..be9c382
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/sparsity_extract_mobilenet.py
@@ -0,0 +1,148 @@
+from torchvision import transforms
+from torchvision import models
+import torch.nn as nn
+import torch
+import torchvision.datasets as datasets
+import numpy as np
+
+
+# myresnet=resnet50(pretrained=True)
+# print (myresnet)
+
+
+class ModifiedVGG16Model(torch.nn.Module):
+ def __init__(self):
+ super(ModifiedVGG16Model, self).__init__()
+
+ model = models.vgg16_bn(pretrained=True)
+ self.features = model.features
+
+ for param in self.features.parameters():
+ param.requires_grad = True
+
+ self.avpool = nn.AvgPool2d(7, stride=1)
+ classifier = nn.Sequential(
+ nn.Linear(512, 1000),
+ )
+ self.fc = classifier
+
+ def forward(self, x):
+ x = self.features(x)
+ x = self.avpool(x)
+ x = x.view(x.size(0), -1)
+ x = self.fc(x)
+ return x
+
+
+mean_list = np.zeros((1, 96))
+
+
+class FeatureExtractor(nn.Module):
+ def __init__(self, submodule, extracted_layers):
+ super(FeatureExtractor, self).__init__()
+ self.submodule = submodule
+ self.extracted_layers = extracted_layers
+
+ def forward(self, x):
+
+ global mean_list
+ mean_temp = np.zeros((x.size(0), 1))
+ # var_temp = np.zeros((x.size(0), 1))
+ for name, module in self.submodule.features._modules.items():
+ # if name is "fc": x = x.view(x.size(0), -1)
+ if name in self.extracted_layers:
+
+ x = module.conv[0](x)
+ x = module.conv[1](x)
+ x = module.conv[2](x)
+
+ temp = x.cpu().detach().numpy()
+ density = np.count_nonzero(temp, (2, 3))
+ sparsity = (np.size(temp, 2) * np.size(temp, 3) - density) / (np.size(temp, 2) * np.size(temp, 3))
+ mean_temp = np.concatenate((mean_temp, sparsity), axis=1)
+ x = module.conv[3](x)
+ x = module.conv[4](x)
+ x = module.conv[5](x)
+ temp = x.cpu().detach().numpy()
+ density = np.count_nonzero(temp, (2, 3))
+ sparsity = (np.size(temp, 2) * np.size(temp, 3) - density) / (np.size(temp, 2) * np.size(temp, 3))
+ mean_temp = np.concatenate((mean_temp, sparsity), axis=1)
+
+ x = module.conv[6](x)
+ x = module.conv[7](x)
+
+
+ # var_temp = np.concatenate((var_temp, var), axis=1)
+
+
+ else:
+ x = module(x) # last layer output put into current layer input
+ mean_temp = np.delete(mean_temp, 0, 1)
+ # var_temp = np.delete(var_temp, 0, 1)
+
+ mean_list = np.concatenate((mean_list, mean_temp), axis=0)
+ # var_list = np.concatenate((var_list, var_temp), axis=0)
+
+
+
+
+if __name__ == '__main__':
+
+ path_dir = []
+ # test_path = '/mnt/data/liuby/aaai-privacy-model-adapt/office-home/federated_learning/client'
+ test_path = '/mnt/data/liuby/aaai-privacy-model-adapt/cifar10png/client'
+
+ for i in [1,2,6,7,11,12,16,17,21,22]:
+ tardir = test_path + str(i) + '/'
+ path_dir.append(tardir)
+ for client in range(10):
+ mean_list = np.zeros((1, 96))
+ var_list = np.zeros((1, 96))
+
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225])
+ test_loader = torch.utils.data.DataLoader(
+ datasets.ImageFolder(path_dir[client], transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(224),
+ # transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ normalize,
+ ])),
+ batch_size=64, shuffle=False, drop_last=False,
+ num_workers=4)
+
+ layer_number = [3, 7, 10, 14, 17, 20, 24, 27, 30, 34, 37, 40]
+ # exact_list=['3','7','10']
+ # exact_list = ['14', '17', '20']
+ exact_list = ['2']
+ # exact_list = ['34', '37', '40']
+ # model=models.vgg16(pretrained=True).cuda()
+ model = torch.load("/home/liuby/privacy-model-adapt/adapt_fl/mobilenet/cifar10/global_model/local5_ep50/localep50.pt",
+ map_location='cpu').cuda()
+ model.eval()
+ myexactor = FeatureExtractor(model, exact_list)
+
+ output = []
+ i = 0
+ for data, target in test_loader:
+ # print(data.size(0))
+ data, target = data.cuda(), target.cuda()
+ if i % 10 == 0:
+ print(i)
+ # t0 = time.time()
+ myexactor(data)
+ i += 1
+ # breakv
+
+ print(mean_list.shape)
+
+ mean_list = np.delete(mean_list, 0, 0)
+
+ print(mean_list.shape)
+
+ # np.savetxt('./res18/office-home/feature_extract/result_relu/art_target_mean_layer3', mean_list)
+ # np.savetxt('./res18/office-home/feature_extract/result_relu/art_target_var_layer3', var_list)
+ file_name = './mobilenet/office_home/sparsity_extract/client' + str(client) + '_layer2_ratio_mobilenet'
+ np.savetxt(file_name, mean_list)
+
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/sparsity_extract_vgg.py b/subject2-AdaptiveBatchHE/cnn sparisty/sparsity_extract_vgg.py
new file mode 100644
index 0000000..3892e6a
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/sparsity_extract_vgg.py
@@ -0,0 +1,137 @@
+from torchvision import transforms
+from torchvision import models
+import torch.nn as nn
+import torch
+import torchvision.datasets as datasets
+import numpy as np
+
+
+# myresnet=resnet50(pretrained=True)
+# print (myresnet)
+
+
+class ModifiedVGG16Model(torch.nn.Module):
+ def __init__(self):
+ super(ModifiedVGG16Model, self).__init__()
+
+ model = models.vgg11_bn(pretrained=False)
+ self.features = model.features
+
+ for param in self.features.parameters():
+ param.requires_grad = True
+
+ self.avpool = nn.AvgPool2d(7, stride=1)
+ classifier = nn.Sequential(
+ nn.Linear(512, 1000),
+ )
+ self.fc = classifier
+
+ def forward(self, x):
+ x = self.features(x)
+ x = self.avpool(x)
+ x = x.view(x.size(0), -1)
+ x = self.fc(x)
+ return x
+
+
+mean_list = np.zeros((1, 512))
+
+
+class FeatureExtractor(nn.Module):
+ def __init__(self, submodule, extracted_layers):
+ super(FeatureExtractor, self).__init__()
+ self.submodule = submodule
+ self.extracted_layers = extracted_layers
+
+ def forward(self, x):
+
+ global mean_list
+ mean_temp = np.zeros((x.size(0), 1))
+ # var_temp = np.zeros((x.size(0), 1))
+ for name, module in self.submodule.features._modules.items():
+ # if name is "fc": x = x.view(x.size(0), -1)
+ if name in self.extracted_layers:
+
+ x = module(x)
+ temp = x.cpu().detach().numpy()
+ density=np.count_nonzero(temp,(2, 3))
+ sparsity=(np.size(temp, 2) * np.size(temp, 3)-density)/(np.size(temp, 2) * np.size(temp, 3))
+ # mean = np.mean(temp, (2, 3))
+ # var = np.var(temp, (2, 3))
+ mean_temp = np.concatenate((mean_temp, sparsity), axis=1)
+ # var_temp = np.concatenate((var_temp, var), axis=1)
+
+
+ else:
+ x = module(x) # last layer output put into current layer input
+ mean_temp = np.delete(mean_temp, 0, 1)
+ # var_temp = np.delete(var_temp, 0, 1)
+
+ mean_list = np.concatenate((mean_list, mean_temp), axis=0)
+ # var_list = np.concatenate((var_list, var_temp), axis=0)
+
+
+
+
+if __name__ == '__main__':
+
+ path_dir = []
+ test_path = '/mnt/data/liuby/aaai-privacy-model-adapt/office-home/federated_learning/client'
+
+ for i in [1,2,6,7,11,12,16,17]:
+ tardir = test_path + str(i) + '/'
+ path_dir.append(tardir)
+ for client in range(8):
+ mean_list = np.zeros((1, 512))
+ var_list = np.zeros((1, 512))
+
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225])
+ test_loader = torch.utils.data.DataLoader(
+ datasets.ImageFolder(path_dir[client], transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(224),
+ # transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ normalize,
+ ])),
+ batch_size=64, shuffle=False, drop_last=False,
+ num_workers=4)
+
+ layer_number = [2, 6, 10, 13, 17, 20, 24, 27]
+ channel_num=[64,128,256,256,512,512,512,512]
+ # exact_list=['3','7','10']
+ # exact_list = ['14', '17', '20']
+ exact_list = ['27']
+ # exact_list = ['34', '37', '40']
+ # model=models.vgg11(pretrained=False).cuda()
+ # model = ModifiedVGG16Model().cuda()
+
+ model = torch.load("/home/liuby/privacy-model-adapt/adapt_fl/vgg11/office_home/global_model/local5_ep50/localep50.pt",
+ map_location='cuda:0').cuda()
+ model.eval()
+ myexactor = FeatureExtractor(model, exact_list)
+
+ output = []
+ i = 0
+ for data, target in test_loader:
+ # print(data.size(0))
+ data, target = data.cuda(), target.cuda()
+ if i % 10 == 0:
+ print(i)
+ # t0 = time.time()
+ myexactor(data)
+ i += 1
+ # breakv
+
+ print(mean_list.shape)
+
+ mean_list = np.delete(mean_list, 0, 0)
+
+ print(mean_list.shape)
+
+ # np.savetxt('./res18/office-home/feature_extract/result_relu/art_target_mean_layer3', mean_list)
+ # np.savetxt('./res18/office-home/feature_extract/result_relu/art_target_var_layer3', var_list)
+ file_name = './vgg11/office_home/sparsity_extract/client' + str(client) + '_layer27_ratio_office'
+ np.savetxt(file_name, mean_list)
+
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/update.py b/subject2-AdaptiveBatchHE/cnn sparisty/update.py
new file mode 100644
index 0000000..088b75a
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/update.py
@@ -0,0 +1,272 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Python version: 3.6
+
+import torch
+import torch.nn as nn
+import torchvision.datasets as datasets
+# from torch.utils.data import DataLoader, Dataset
+from torchvision import models, transforms
+from copy import deepcopy
+
+criterion = torch.nn.CrossEntropyLoss()
+
+
+
+
+class LocalUpdate(object):
+ def __init__(self, args, test_dir, user_dir, logger):
+ self.args = args
+ self.logger = logger
+ self.trainloader, self.testloader = self.train_val_test(test_dir,user_dir)
+
+ self.device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
+ # Default criterion set to NLL loss function
+ self.criterion = nn.CrossEntropyLoss()
+
+ def train_val_test(self,test_dir, user_dir):
+ """
+ Returns train, validation and test dataloaders for a given dataset
+ and user indexes.
+ """
+
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225])
+
+ trainloader = torch.utils.data.DataLoader(
+ datasets.ImageFolder(user_dir, transforms.Compose([
+ transforms.Resize(256),
+ # transforms.RandomCrop(32, padding=4),
+ transforms.CenterCrop(224),
+ # transforms.RandomResizedCrop(224),
+ # transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ normalize,
+ ])),
+ batch_size=self.args.local_bs, shuffle=True,
+ num_workers=4, pin_memory=True)
+
+ testloader = torch.utils.data.DataLoader(
+ datasets.ImageFolder(test_dir, transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(224),
+ # transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ normalize,
+ ])),
+ batch_size=self.args.local_bs, shuffle=True,
+ num_workers=4, pin_memory=True)
+
+ return trainloader, testloader
+
+ def update_weights(self, model, global_round):
+ # Set mode to train model
+ model.train()
+ epoch_loss = []
+
+ # Set optimizer for the local updates
+ if self.args.optimizer == 'sgd':
+ optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr,
+ momentum=0.5)
+ elif self.args.optimizer == 'adam':
+ optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr,
+ weight_decay=1e-4)
+
+ for iter in range(self.args.local_ep):
+ batch_loss = []
+ for batch_idx, (images, labels) in enumerate(self.trainloader):
+ images, labels = images.to(self.device), labels.to(self.device)
+
+ model.zero_grad()
+ log_probs = model(images)
+ loss = self.criterion(log_probs, labels)
+ loss.backward()
+ optimizer.step()
+
+ # if self.args.verbose and (batch_idx % 10 == 0):
+ # print('| Global Round : {} | Local Epoch : {} | [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
+ # global_round, iter, batch_idx * len(images),
+ # len(self.trainloader.dataset),
+ # 100. * batch_idx / len(self.trainloader), loss.item()))
+ # self.logger.add_scalar('loss', loss.item())
+ batch_loss.append(loss.item()/self.args.local_bs)
+ epoch_loss.append(sum(batch_loss)/len(batch_loss))
+
+ return model.state_dict(), sum(epoch_loss) / len(epoch_loss)
+
+ def criterion_kd(self,outputs, targets, teacher_outputs):
+ """
+ Compute the knowledge-distillation (KD) loss given outputs, labels.
+ "Hyperparameters": temperature and alpha
+ NOTE: the KL Divergence for PyTorch comparing the softmaxs of teacher
+ and student expects the input tensor to be log probabilities! See Issue #2
+ """
+ alpha = 0.95
+ T = 6
+ KD_loss = torch.nn.KLDivLoss()(torch.nn.functional.log_softmax(outputs / T, dim=1),
+ torch.nn.functional.softmax(teacher_outputs / T, dim=1)) * (alpha * T * T) + \
+ torch.nn.functional.cross_entropy(outputs, targets) * (1. - alpha)
+ return KD_loss
+
+ def update_weights_kd(self, model,teacher_model, global_round):
+ # Set mode to train model
+ model.train()
+ epoch_loss = []
+
+ # Set optimizer for the local updates
+ if self.args.optimizer == 'sgd':
+ optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr,
+ momentum=0.5)
+ elif self.args.optimizer == 'adam':
+ optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr,
+ weight_decay=1e-4)
+
+ for iter in range(self.args.local_ep):
+ batch_loss = []
+ for batch_idx, (images, labels) in enumerate(self.trainloader):
+ images, labels = images.to(self.device), labels.to(self.device)
+
+ model.zero_grad()
+ teacher_model=teacher_model.cuda()
+ log_probs = model(images)
+ teacher_outputs = teacher_model(images)
+ loss = self.criterion_kd(log_probs, labels,teacher_outputs)
+ loss.backward()
+ optimizer.step()
+
+ # if self.args.verbose and (batch_idx % 10 == 0):
+ # print('| Global Round : {} | Local Epoch : {} | [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
+ # global_round, iter, batch_idx * len(images),
+ # len(self.trainloader.dataset),
+ # 100. * batch_idx / len(self.trainloader), loss.item()))
+ # self.logger.add_scalar('loss', loss.item())
+ batch_loss.append(loss.item()/self.args.local_bs)
+ epoch_loss.append(sum(batch_loss)/len(batch_loss))
+
+ return model.state_dict(), sum(epoch_loss) / len(epoch_loss)
+
+ def criterion_ewc(self,global_model, model, fisher, output, targets, criterion, lamb=5000):
+ model_old = deepcopy(global_model).to(self.device)
+ model_old.eval()
+ # model.cpu()
+ for param in model_old.parameters(): # Freeze the weights
+ param.requires_grad = False
+ # Regularization for all previous tasks
+ loss_reg = 0
+ for (name, param), (_, param_old) in zip(model.named_parameters(), model_old.named_parameters()):
+ loss_reg += torch.sum(fisher[name].to(self.device) * (param_old - param).pow(2)) / 2
+ # model.cuda()
+ # loss_reg.cuda()
+ model_old.cpu()
+ # print(type(loss_reg))
+ return criterion(output, targets) + lamb * loss_reg
+
+
+ def update_weights_ewc(self, model,teacher_model,fisher, global_round):
+ # Set mode to train model
+ model.train()
+ epoch_loss = []
+
+ # Set optimizer for the local updates
+ if self.args.optimizer == 'sgd':
+ optimizer = torch.optim.SGD(model.parameters(), lr=self.args.lr,
+ momentum=0.5)
+ elif self.args.optimizer == 'adam':
+ optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr,
+ weight_decay=1e-4)
+
+ for iter in range(self.args.local_ep):
+ batch_loss = []
+ for batch_idx, (images, labels) in enumerate(self.trainloader):
+ images, labels = images.to(self.device), labels.to(self.device)
+
+ model.zero_grad()
+ log_probs = model(images)
+ # teacher_model = teacher_model.cuda()
+ loss = self.criterion_ewc(teacher_model, model, fisher, log_probs, labels, criterion)
+ # loss.to(self.device)
+ loss.backward()
+ optimizer.step()
+
+ # if self.args.verbose and (batch_idx % 10 == 0):
+ # print('| Global Round : {} | Local Epoch : {} | [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
+ # global_round, iter, batch_idx * len(images),
+ # len(self.trainloader.dataset),
+ # 100. * batch_idx / len(self.trainloader), loss.item()))
+ # self.logger.add_scalar('loss', loss.item())
+ batch_loss.append(loss.item()/self.args.local_bs)
+ epoch_loss.append(sum(batch_loss)/len(batch_loss))
+
+ return model.state_dict(), sum(epoch_loss) / len(epoch_loss)
+
+ def inference(self, model):
+ """ Returns the inference accuracy and loss.
+ """
+
+ model.eval()
+ loss, total, correct = 0.0, 0.0, 0.0
+
+ for batch_idx, (images, labels) in enumerate(self.trainloader):
+ images, labels = images.to(self.device), labels.to(self.device)
+
+ # Inference
+ outputs = model(images)
+ batch_loss = self.criterion(outputs, labels)
+ loss += batch_loss.item()
+
+ # Prediction
+ _, pred_labels = torch.max(outputs, 1)
+ pred_labels = pred_labels.view(-1)
+ correct += torch.sum(torch.eq(pred_labels, labels)).item()
+ total += len(labels)
+
+ accuracy = correct/total
+ loss=loss/len(self.trainloader.dataset)
+ return accuracy, loss
+
+
+
+
+
+
+def test_inference(args, model, test_dir):
+ """ Returns the test accuracy and loss.
+ """
+ normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225])
+ testloader = torch.utils.data.DataLoader(
+ datasets.ImageFolder(test_dir, transforms.Compose([
+ transforms.Resize(256),
+ transforms.CenterCrop(224),
+ # transforms.RandomHorizontalFlip(),
+ transforms.ToTensor(),
+ normalize,
+ ])),
+ batch_size=32, shuffle=True,
+ num_workers=4, pin_memory=True)
+
+ model.eval()
+ loss, total, correct = 0.0, 0.0, 0.0
+
+ device = torch.device('cuda:' + str(args.gpu) if torch.cuda.is_available() else 'cpu')
+ criterion = nn.CrossEntropyLoss(size_average=False)
+ # testloader = DataLoader(test_dataset, batch_size=128,
+ # shuffle=False)
+
+ for batch_idx, (images, labels) in enumerate(testloader):
+ images, labels = images.to(device), labels.to(device)
+
+ # Inference
+ outputs = model(images)
+ batch_loss = criterion(outputs, labels)
+ loss += batch_loss.item()
+
+ # Prediction
+ _, pred_labels = torch.max(outputs, 1)
+ pred_labels = pred_labels.view(-1)
+ correct += torch.sum(torch.eq(pred_labels, labels)).item()
+ total += len(labels)
+
+ accuracy = correct/total
+ loss = loss / len(testloader.dataset)
+ return accuracy, loss
diff --git a/subject2-AdaptiveBatchHE/cnn sparisty/utils.py b/subject2-AdaptiveBatchHE/cnn sparisty/utils.py
new file mode 100644
index 0000000..1764742
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/cnn sparisty/utils.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Python version: 3.6
+
+import copy
+import torch
+from torchvision import datasets, transforms
+from sampling import mnist_iid, mnist_noniid, mnist_noniid_unequal
+from sampling import cifar_iid, cifar_noniid
+
+
+def get_dataset(args):
+ """ Returns train and test datasets and a user group which is a dict where
+ the keys are the user index and the values are the corresponding data for
+ each of those users.
+ """
+
+ if args.dataset=='office-home':
+ data_dir = '/mnt/data/liuby/aaai-privacy-model-adapt/office-home/federated_learning/client'
+ # data_dir = '/mnt/data/liuby/aaai-privacy-model-adapt/cifar10_50sample/client'
+
+ user_dir=[]
+ for i in [1,2,3,4,5,6,7,8,9,10,16,17,18,19,20]:
+ train_dir=data_dir+str(i)+'/'
+ user_dir.append(train_dir)
+
+ # test_dir = '/mnt/data/liuby/aaai-privacy-model-adapt/office-home/domain_adaptation/Art_edge-test/'
+ test_dir = '/mnt/data/liuby/aaai-privacy-model-adapt/office-home/federated_learning/test_client20/'
+ # test_dir = '/mnt/data/liuby/aaai-privacy-model-adapt/cifar10png/federated_test/'
+ # test_dir = '/mnt/data/liuby/aaai-privacy-model-adapt/cifar10png/test_type5/'
+ # test_dir = '/mnt/data/liuby/aaai-privacy-model-adapt/office-home/federated_learning/federated_test/'
+
+
+ return test_dir, user_dir
+
+
+
+ elif args.dataset == 'cifar':
+ data_dir = '../data/cifar/'
+ apply_transform = transforms.Compose(
+ [transforms.ToTensor(),
+ transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
+
+ train_dataset = datasets.CIFAR10(data_dir, train=True, download=True,
+ transform=apply_transform)
+
+ test_dataset = datasets.CIFAR10(data_dir, train=False, download=True,
+ transform=apply_transform)
+
+ # sample training data amongst users
+ if args.iid:
+ # Sample IID user data from Mnist
+ user_groups = cifar_iid(train_dataset, args.num_users)
+ else:
+ # Sample Non-IID user data from Mnist
+ if args.unequal:
+ # Chose uneuqal splits for every user
+ raise NotImplementedError()
+ else:
+ # Chose euqal splits for every user
+ user_groups = cifar_noniid(train_dataset, args.num_users)
+
+ elif args.dataset == 'mnist' or 'fmnist':
+ if args.dataset == 'mnist':
+ data_dir = '../data/mnist/'
+ else:
+ data_dir = '../data/fmnist/'
+
+ apply_transform = transforms.Compose([
+ transforms.ToTensor(),
+ transforms.Normalize((0.1307,), (0.3081,))])
+
+ train_dataset = datasets.MNIST(data_dir, train=True, download=True,
+ transform=apply_transform)
+
+ test_dataset = datasets.MNIST(data_dir, train=False, download=True,
+ transform=apply_transform)
+
+ # sample training data amongst users
+ if args.iid:
+ # Sample IID user data from Mnist
+ user_groups = mnist_iid(train_dataset, args.num_users)
+ else:
+ # Sample Non-IID user data from Mnist
+ if args.unequal:
+ # Chose uneuqal splits for every user
+ user_groups = mnist_noniid_unequal(train_dataset, args.num_users)
+ else:
+ # Chose euqal splits for every user
+ user_groups = mnist_noniid(train_dataset, args.num_users)
+
+ return train_dataset, test_dataset, user_groups
+
+
+def average_weights(w):
+ """
+ Returns the average of the weights.
+ """
+ w_avg = copy.deepcopy(w[0])
+ for key in w_avg.keys():
+ for i in range(1, len(w)):
+ w_avg[key] += w[i][key]
+ w_avg[key] = torch.div(w_avg[key], len(w))
+ return w_avg
+
+
+def exp_details(args):
+ print('\nExperimental details:')
+ print(f' Model : {args.model}')
+ print(f' Optimizer : {args.optimizer}')
+ print(f' Learning : {args.lr}')
+ print(f' Global Rounds : {args.epochs}\n')
+
+ print(' Federated parameters:')
+ if args.iid:
+ print(' IID')
+ else:
+ print(' Non-IID')
+ print(f' Fraction of users : {args.frac}')
+ print(f' Local Batch size : {args.local_bs}')
+ print(f' Local Epochs : {args.local_ep}\n')
+ return
diff --git a/subject2-AdaptiveBatchHE/environment.yaml b/subject2-AdaptiveBatchHE/environment.yaml
new file mode 100644
index 0000000..c3aea2f
--- /dev/null
+++ b/subject2-AdaptiveBatchHE/environment.yaml
@@ -0,0 +1,88 @@
+name: hepaper
+channels:
+ - defaults
+dependencies:
+ - _libgcc_mutex=0.1=main
+ - _openmp_mutex=4.5=1_gnu
+ - ca-certificates=2022.4.26=h06a4308_0
+ - certifi=2021.10.8=py38h06a4308_2
+ - ld_impl_linux-64=2.35.1=h7274673_9
+ - libffi=3.3=he6710b0_2
+ - libgcc-ng=9.3.0=h5101ec6_17
+ - libgomp=9.3.0=h5101ec6_17
+ - libstdcxx-ng=9.3.0=hd4cf53a_17
+ - ncurses=6.3=h7f8727e_2
+ - openssl=1.1.1n=h7f8727e_0
+ - pip=21.2.4=py38h06a4308_0
+ - python=3.8.13=h12debd9_0
+ - readline=8.1.2=h7f8727e_1
+ - setuptools=61.2.0=py38h06a4308_0
+ - sqlite=3.38.3=hc218d9a_0
+ - tk=8.6.11=h1ccaba5_0
+ - wheel=0.37.1=pyhd3eb1b0_0
+ - xz=5.2.5=h7f8727e_1
+ - zlib=1.2.12=h7f8727e_2
+ - pip:
+ - absl-py==1.0.0
+ - astunparse==1.6.3
+ - cachetools==5.0.0
+ - chainmap==1.0.3
+ - charset-normalizer==2.0.12
+ - combomethod==1.0.12
+ - cycler==0.11.0
+ - flatbuffers==2.0
+ - fonttools==4.33.3
+ - gast==0.5.3
+ - gmpy2==2.1.2
+ - google-auth==2.6.6
+ - google-auth-oauthlib==0.4.6
+ - google-pasta==0.2.0
+ - grpcio==1.46.1
+ - h5py==3.6.0
+ - idna==3.3
+ - importlib-metadata==4.11.3
+ - joblib==1.1.0
+ - keras==2.8.0
+ - keras-preprocessing==1.1.2
+ - kiwisolver==1.4.2
+ - libclang==14.0.1
+ - llvmlite==0.38.1
+ - markdown==3.3.7
+ - matplotlib==3.5.2
+ - nulltype==2.3.1
+ - numba==0.55.1
+ - numpy==1.21.6
+ - oauthlib==3.2.0
+ - opt-einsum==3.3.0
+ - packaging==21.3
+ - phe==1.5.0
+ - pillow==9.1.0
+ - protobuf==3.20.1
+ - pyasn1==0.4.8
+ - pyasn1-modules==0.2.8
+ - pyparsing==3.0.9
+ - pysnooper==1.1.1
+ - python-dateutil==2.8.2
+ - requests==2.27.1
+ - requests-oauthlib==1.3.1
+ - rsa==4.8
+ - scipy==1.8.1
+ - six==1.12.0
+ - tensorboard==2.8.0
+ - tensorboard-data-server==0.6.1
+ - tensorboard-plugin-wit==1.8.1
+ - tensorboardx==2.5
+ - tensorflow==2.8.0
+ - tensorflow-io-gcs-filesystem==0.25.0
+ - termcolor==1.1.0
+ - tf-estimator-nightly==2.8.0.dev2021122109
+ - torch==1.10.0+cu113
+ - torchaudio==0.10.0+cu113
+ - torchvision==0.11.1+cu113
+ - tqdm==4.64.0
+ - typing-extensions==4.2.0
+ - urllib3==1.26.9
+ - werkzeug==2.1.2
+ - wrapt==1.14.1
+ - zipp==3.8.0
+prefix: /home/han_j/anaconda3/envs/hepaper
diff --git a/subject2-AdaptiveBatchHE/fig/batchencry_server_client.jpg b/subject2-AdaptiveBatchHE/fig/batchencry_server_client.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..5eefaee56ee1fad50769d7dbd0f73d40d7852705
GIT binary patch
literal 472910
zcmeFZXIPY5wk`Y?1r$gUL~@QwR6uf4B#Q(kgNS62C^=OLl5<87CAvpnbGrBLdwchH@Au=R;(?+bc&gT1YpyZJoO7&oHGVY@Tvw7)lmj3b5Fi2k
z0>2?r9SsZyzNBTQf#Kp2QE}a($Hlur!z3iYj!#0va9cox
zhmwJvOF%-LpM~enU12@}J_s%@J_R56z{MkA{Pm1aK=bQ~kK(sizy118e_XXeJ^%m&
z3-a3o`0E3L0ma0^hT-5|!@CY(K%h_zOehu>CMNiw-r(N=Okyk=G~iZ>Sr?m>fv?
z-b82MFh3}6zNztTkA?q*qc83?GI9z^Dpof3Temp`?g8;rQ(u
z8H>OIE9#d?zghIJ8RYwaWzoMI^zZguH37spSf6plpb&8M#1JSkt>z0?c)Nh9vI=mr8!La1l+)_6Ll2Uf{s>faCN)DO#rs`=H(D
z`qbyP%8rhe&GJ%aucry^-8&z+BZ-v%HnDfajhDiu9#-;gd~LJnuBcS4cNc^D2hl%Q
z{emP)JxZT+ZRan(?9nF>l{z&KdNa;@ArNEWhh{)tBZHU0!#{|mJT0rCT}sA;G-Na6
z93hEbqODLJ!-kTEN1rE*-mK;`tnfyAq?k(#I>W>ran}I4chXn@DWC@VpQry0Q{d%^
zf_Ic`RPMjf68v@}+X}UYV0y(5xdK$)-#BdAmZqE7>zogWH^xvbTCX^9-NEYnuyCKs
z-XW^^;j|g-N!VIOJK&m}v&
z?PFgk%PStIv$pwe%4RK9FXuxMQpi!|+q&oXIC0kYJ66Mb9VoV6W6NM2&*spLKvqTN
zx3%pY__$93NWPL8)3$FKEbBe=C{|^$usk7!L>FFuA8RnCqZ-VA3v5SN&$Q>LPj^!D
z@LwLN!BqARjzdGAzB5K)rxLv1@yg>&Qk-zOdr27Ot@kIIKu~okd#F#^#e}}IMY`fX%XdsD2(HTX1TdrbZ<_VP1ya05E4(?
zd|=;>bSCb33r;@rEuOZa-ZqG=$E?3^R5TU>oL&JfR{(~Wb2Jxd~_Vf*}bmGwkK;vm2Mu
z5$m>ZpVPYzV3ZNh{sw#M>fj&&*b|
z_^IJ}cE^zvNW!J2G?7q)a@~ybv%9PXYvnVwTSIL#_(?`}RZ>pQNc@3dqnj`Gwj4^W
z(rseo?#kP(zM7&@?zNkobBd=GJ3?Z637|g;1tn-n(w||f$G>4csJzJ5#*Bg`RPA?&
zrIkTaQ8$)4Qa(r}TD)||o3>2`#`PImBg+*1x@qQ#KGCSb-nE};yCH;j54j04cd2k8
z%##n+u7LYUB9lE>dp)PoljD1y)*q>N4%Z2l7%y+pI2)keV~D-!kY+?DqO>oGM}ixR
z8)jC?LhVvYyY~^h#ig~9Bal9QpJ6fr^MhxW;h4==KI{+4V?jB#7Zsa=HRNliVgblOPN0Bl<#a1~lA0ouZk^wyE=wVV0bU}4Br04&3(X$|
z_R^6J>)u_VoOsQ+R5UrzpN_r>cttzEob+DeuFKb>47=VtU7nAI9gsp!Bmh!Ki>h7ouEa3rt=*a4~ON+*d?ugE8`p
zSb%8nl28N}A$1%1as(aTcYKG-Z|@W@WC0KO8!4;U*BqUTXPlOJ#F(OSl-FYJE^Vvd
z6~_1Udoyhk1<5#*MaSrHGlk$@JN;@})p=f4S$56dr?3Zp4OnWxUcV#_H%n>vNI=8}
zka;+l9~MWl-IG?%;-g<;0UXAc>H!1voXPy@f}$7yXE*1fn{DK3y2_M72}97{R?5vA
zkguY2o**1eRJLh+sq(d=jItTozJ3MtGIbI}o<#Hj`|x1+{*Brzpm5*{$WVh-yYU{o
z>@8G;pDS8Bcsp(I@(3wxd=cm&93SOtw5MBMzI+Na(utGbSpQc4R`~3$PA09AjuJ+6
z?8PXKB{cmaMO@a;0W)&Uw&-QUroY|5ZE2dhZP+4F*-H%Jx!d^Gc)TCmJNq1t9{3_8
zBAcl6eu&<&TEw`2108azic&x&v_?J+0YvS%i|yr*}w2
z!-|Rci6e!>oOjy+Z_YgSR^{5)q&1bkA42V7q3r^|M1`;=jSv=5qr?yBY9p3)%(~uN
zUV@gWl2GeJnP`c;hS5oRwI7eAM1f<~N&O4WqO*PY4_s*)?dBqXGQA~*pIfmhI$gos
znXkM*FRJ;RMRXyZnkDPhVBgXx_Q--W1L5irlP`eNB<-m9(#R
znwI%~-sn0ag$(uzIQUvbGfU&gbgf|ssDB?n#z07lqKlU-%TrD~*|}dKtZF>3fB_w2
z_EqR*vVugWR)+LV)k;I=xM_8jR3mh4Co$`=$#MM70~r~__CJ$
zGEkb{S`F4l$9V-rg?CC>~<)#D}>
z33K*E1%NXhfI6o-%1yv?uJrD=aW$uRD2t{JAF?qz3idmZ*8?WLdr9ROF&dhvl+}iv
ze`Hg0Svz}scz(wcx|mMBu9e^#+nKH0fEm5Log&g^x#oKXY&ju`bkL_Zjy8uU)r9)J
zmbOV(06}Xz?rGy}^LZ{5Zy~W+OzOIRU99!+WLF6_d*wseGVv$OnATm>h}2wYQSC)S
zSrEDNNw`^M4
z$#Z4U{M5M%7x=|Bgkt}qu5b&X5~VCtcx^IpO5G%y
zl}uJZe;ga+t1u~;t@=W(_0BQjiL+OK76s1&xjpCs?q&XWf-i+hDC3>xkzMPDOAKM3LSr%o#~!
zbp-?{7lUwGcyM`MbOqd$rtCfwN1N$DP;66uFw4jj_XEg!W#48p@a^Jb<9EF)pppF&
z5eKn5wz&c#6~QR^VRD%(Wj^!aM$
z3J*F5H0Y`|-p~#@`nYK80;>|DnUpFan^gI%^=8zMW?W22SP$-!FB7%g+;f{!nT+xS|TlGoXs63;{lTneD5sP{-zl?nL0ENZ@K+K
zGyW-@n8Sm}132YhxB}jY%M)h28cVx5wh(Nl$eyO}j?oe_W9sq@_&Eq0bAq7Og1w-N
zio9!<;_QoHwXIj+32WI~xo`${yA!&Qrk4g_@WfSwZeiXZ-DDZr%3OXv=^MyVf&`)0+h~yo6KIW|W=3*jZ
z@}8E%dJQQo9Rw_Tf<}uAt&OL#?JgR+6Q9Vjhl~S9bX8|8RT&jG-_td@0(6@wXp{YB
zLO5v&mxigK)d}pQ+U7tikRJ@3gG%HK&m?eH!muFooe!1h_UBh|wXg;tRMu+s3
zs7B)6-)*hBSI_B!&*T8A*cHD{o)_Xl`y-7m1YI_tR>5JCgs~5m?x?YF7C~`!dP`X!GFRsGbjgQjr(lX^971CU$F95tva+tXk
zXqBQq=jWhx7&atwG5P_o0aVdK!soG771)E3BBprRzSU7SW2_;2nJpM~6KswRA#@3N
zNi>$lSe4JP9rw3{i?)a*O(+Kw>UzcNV>J#rR7Ueh>tMy>R?s~c7($;a$B<6HpcTCFsD6+6B6Dv-L0!9AbnOVSsnTTj)n;Ap)`?em`-
zMkip41?2?5TSkW(*TyuifClX=AQVaXq2rW?Snp;OC(&JK*ma^4e}H>?Lpp4j%m1l+
z#C6jsshV5Rge$Cs5zZu}Ln
z_yhQdkD!{Q4=uQd(ejnLj5FXhM}!PryCRa{Gq76D8|2}gAC^dVvbDKi%M?Z;zn5Tq
zo{N$5`!W4tHet%yFC7f!ifl_h3*9QgC-$56p(}$(w70b74`Fj4dluvrpVGIKLAt>z
z^zWw#dP!CRj#08E@nXO6+z?Q-Ly0|#MkpITBzgE!sR%35p5hAN05)$y`SnpZ>wN?2
zA{Lq@HG&Yr!k0`6mrfA>KQ1KE?-vD<^TBPX=OSdH)h!H`+#yTB89kUsuLsN^30<{N
z+y3wUc?x#lbx^(|eI7vHq8qr6ye9Q$vqwu7o8RhA;raBPT1ClBxMzg7V9hY7>TX_w$i<96oViH!F=gCPm)sDOb{=W(tl
z&eN|;mSuxF9o}isOi`9ZZV>p+5`_x>)#^wal6MwWUOVEE7W1ny%B{r-c$OA~;^w)CoEy
zRx7^4hUj!W*7->yl?uF0|Ghc?=&IySMfBs(6i6m#PYssyM8gO&(ILf`Pa)q1vFR$8
zIK{B>+zF0Ty{Jm>K&C6=ay|JgfIC$k507mcI{?_7tje2Ig^$nQmg3rL^@tnm1-
ztw-~7B}@jJv9U2~$K-tBl8Ogp|IEj}PZ{Lr7EsvdV|GzI_jfpdfM6PNzO`*)QKhR(
zzUq8G=Dt*m)Xzgm-=F%w-f#nx)fgYgeLJ7R&KLZ*nxwXEOu$S(8;Ni8y1gRV-Kavw
z>8#{Qk9x=4F4@0~Yt$gF4gDRi{X49U?(?bG+|)2;pRwTd^HpnNYvb@K80~}rD~*4GxLi8t3d6Q6PeW!ebb~?!%?z+j&UJp-
zn){b#Gc@&kou{z{qjH|-szZReECplWU$7|hpJS11*5;jAIlAv`Pg#(<*o4|Y{y%Td
zqW`Fyv*zOOH>aBD^K>IU;2V-qPBlbNFNUD-WAuJ{yhr4VD_{sg{YR&TfROZCz#HEA
zEzHn5X~2No=QxDO2x;SKH7pfv?SQX*C%5?-nm8%CPa`v!PX}Qle|(oe!mFzi>cW5d
z4R^tA{8y#Y+@?vXpgj5&XxE(r6Vq-j15w9W9bnZ-=0b7i%)aqK@m?j>50CjK)>b9Ncy
zLCMa2AyN8s
zdBx*lJ$H!C^mTqkr}bPYwE@Z|JH2u{YDRJ^Za>bU@~k}wi68m62s}q)3{k{-n)Ya8^#Spd6#;(PK?*ti-e0Bsd2EF5T~m%O4kH
zL}#wkK4dRHBwxIZ@Q$?9EGbZ#7>vRHs8I8Md<%XI`Pm1Ah~1wI40r32)vB~6UuzO!
z&P2+p4FT(pWWPn?4NEAy0ZO7UjdWJlaiQeX3Sh5I?-nBgwwTEOWNGzfP~K@s9o<$J
zf4LLB+-A&U4i&&g4R-I-`GetY)7uVqK40s^n8Iix7r1`WE7d<4Bw^yC?@YDf&t_Pk
zzKYky#+met@RqQ|a9`6IDGgwNa$-@ks===f!r1?8%2?=c9ClM+mXEw)k<0qLr*i@s
z*hdnUAo%Y!66=pMSaZ-H!?}v43*HL_HMzHSWm4a#
z{T^8W*bc(P^3eM&Jr5GqjQ9~0CAXUK#mIiqt3QP_Yqi;qzd#xZK26bJ`kFMkVTk!9G>Oq0j6oo}RquElEX^Fxn=mF;)Jne0O{!aG>BHTKOp&UjUy53xnNOK
z!d@2got{U0d>r#NbS^Qi8OKNh!y9cbV;d^(U2!1$<4Qnx=ik_XB*+Ht>T)}i+`n6V
ztu9{T&9AlM{WG^LoLn)}v~2Z%&MMuC$r-sw3L*RWsok1dqNS&+k^eJ(4DOZ`$Us-&
ze-a?Mt>%aYHb32DsBF}>i>HixP6Hn|CL|&D+D+%Lm=!5`W
z|3LHqkV#Aooy0ZnW`M`usfa*a;DF=J!r$n))i
z2fha3^(NRK{O5_2XejlMo&QskQ~3XDl0)!+o#b#pMdIZd(*B>5oakRkPRw7D9AA&f
z)=>m|Rh7>7Wa;*&lm@rDIsDLC2ob42aS&Vvbb7Wz6KmlIT&8=h+Pem?#7LuncsI57)c^p|kwpJ#}}-^8uVd{$m9)X71=nlOdk?Apl(@z@0+
zGNa#FLFNedZyPKq%uUsODd4!~^gW1Mf67Ch^d6TIY#DEBm_1O!ih91{^Y#i51*P
zNhX?3BP3u*B%#8&=1|~y&Bn8I=U-{+AJIAwrK-#xYN&jc`4`A3!bb2PrMlpe&biWT
zRTICs8(Uraufxr1$LX=&eUAb^HdFO5-cMQrkU=$U?9!!BYVs*ptW81j>5fba$*q4G
zZG=Fy;d#WObjKC=e`ZmE{|6Rj7G`?hIT(HZ0}6_|{`m^lessHR*_TcjJ?U9v)g5_m
zLwWj-NluLT_q(}G1}Ekq?f=tlDI}muqHpv`(7}KK)@ytV$`ZN207ea}I7pAdxymq;
zxh{7(NzbE44~ke-Uwr}dDe~XL>K_tTn6`~$$%iuImM=FLjD2r*qOJj<>LZ!u;b{wI
zrbh3+V=H2UQm)_RC;WdFJ**$2gL4eJK4I1WtfdX=%LEgM^72S9;wJyKc5~)4K`^}2
zt$JIY8eaD=0-L6Ft}O^`iXgCkhl(AXD8;TzQyoHj=e*>H3zkijr_?~!xT0yV3$LyI
zmE>Ome-^}PWy8;1iSt@YIKCWQ7ykP+hvT25IbbPdBWk9j$hy(Q6lvTw+2++c9tnbd
z0}Z^h7Ct6(1$>A>i2cVcWa1MpO?&=DYYRywY_vV7Qz$w8{9#d!*|(r_=l$`tm>PEY
ze;PS1qw`lh9uNN`=%A+>2H3*?O-Kx*pT7do=sHl7lYXY92W8JBMg&%u*XCfx(y)G1x1ix(vLaE1~
z>>%q3IO>D{xHAYd{!i1LrwRDbd8R@5SiH3J99s|A{PEYG95i)rC)nBUUaz6t=NPHf
zm-p%GRCe53a`Uo12Mg}MR%k#YKu^SN|He|qiy6<{!M>30us`Mtc=;-w_pkPYEI8@orP
z5}LNGvR%i7vVe+l{;G|hdAAD&%P(f?>tt4w7tOB~W56Ti7#zKSU{t(Y
zRN*+08lN0}=UIvEV9n7OLoa+Fcq4pV0Fv=fHaPYxpWEMNjX1bnl|1W`zNVEjMv2~L
zp|*b=r>3}w5Xk+zt~X;%edQbFXVcA?N<#hO`4}ykbuof>PY1Di&$-}Wq#s;aXgt3b
z`l}^Fz%ABBWi|^hakh~>psB8m*0;O@tc?XAe^eOq+txkFY-Wf!F&six3G3vqyn}X`
ziof!Y@OeEP;i(2Kmm*%*(uY?#rwVqu_zWC>(iFc)8c3`Q+v@5E3`0!q@%flDFcg-b
zL^Iu)&m`=-1^M5KuFcyQQ)yTBGsc{9YP!So$xQh;J8&7A2pthLsCK|g4U1M49B&j%
z=lkI%s}vXaYYt?5;+cQfx-^+h5ksns?zD{fBk?Ya;5WKaC*fncQhGp!`(i)YtaRef
z%V;~1j$XxVS(gJ*o8Tn+^h%mR?2o@3}tThm!XuZ
z$8~WS_>NgT)G*5cHQ0MECG><9@@B+r?{;OrI0}1IvOVxKx1>RKOTY0$Y8r#f!8d8U_z~<(r
z^)4j7yU#tjnxgS5;LCIc%KG>U5QTK(@Brn?e??~q)@X(e(7!!Xhc{A}
z4*>A8&T_y#Uy62{$5~;!jMLMz)?D)8B+{g>Crou>gz&*U{#x3!8Z7rWvSCk8;&XSx
z&5I*c#@%h{tM8y7rkY>)7?EG|bB8xyiIjkYzzd|8JJo5TB-R^lsZsBYf;DKb)ZxB9
z&BZO~@FlI6PB9AKW?8?!pvPo8;nC}Iqs;qk*!P(SiRUV$MKV#5I+s&t^y7y`C3a^*
z>E-AB=@v65=Y1BgXQYtLI7lD*#OQn3nOtnEhh=>YmImh&>V`vC;BteBsHfo&NW(x8
z7Qih_+)`BZsbdVyD~M~{t*GDAb7-jcH5@`#8|cv|ixSUHP4-R#@@G5yG;8DIdneZi
z?qID%2?PEjXE~rk=DXYQ(o+wKEtlPl`tB%}mSyG2p0kJfjT^`GwP9fY)F}8F$5>gf
z6a1RxX6|ECvSH~LuKU}Q>@27DQEtz%UxxtWdkF<_cPrB5nW*v_vjd*(2Yja_w&Ly@
zB=U6G2*JPYbRs@!~4|HYMdrxeQ;flXgFcR#R|HxQs9piM6~$~(m7+c=b`Cz@3Z`Au
zJGM%c_JIcv3f^O}qjNC^TH&PD$*HAMA!Yu3LqLa;b`d2bBKm
zUlwkDdSqn#J%f~Q9qLhUY1~^#*&%yA#}Wja
zJ=9>@{nY1{FGN?O?9D7#5m;HX!#8dYa
z>%_F<^Xo>-&2%MTWnB}dxWdS%@mpX)Nk--2r%z+)62Db
zG{>?|#wJJqt9*L)y)Vys#Orpn3AekR$xA?xgprE_ZQL^V%M}GeJtL?wqI7Dwuh*?6t7@J4Q^7q*aG~PBL+S`Zi1G$G@E@{9`D)1^p@x
zL4B5RSyhOY@q7?8Cij|WSEx5$n_7^twsaK1gj$O{*Uy=ir4JMrNUR9^#QlB3S8Lxa
z#sbs5_zv_1;0{*T4txG>MZxg&zm4_J^(=*OJ-G@^_TOO=1MagJ&U90jP@lgL79xC9
zdMB-ien&alGbgJgk6MIB$<}Iz>1_6a5b*GRH^a}pTTpU!@W4fTJt?lZiUm!=>9xyO
z4y1;iSuuL^;j_zXurfFDPM67*&;2&UZBpv*Om~KbU%tIAUyLN=To~xEQPpWk
zpq1-<6u_d$!<2A+Dm5dO)RwEwk>VIhr1!TO60cm;?qtkVV*KXxy|Ix&Jg9BEobO^#
z-JP3Z3un6@a6E1-vk2lc(?p&4g}+xhfJKN9$B17nhrDoo1+M0azIAT4thKI|!Z_}H
zyoWNWAH%0Bz>*3`0F6C8TTE={CD+-IqbIPYdul3|Fgp8G-svFs3P^~_IP3GQxP?Jk
zZWq3H+d9a?3ECG;#;MiwW>Po+Y({KfUOzWZF(jfZ!n`}%fz=$t=wDySPKoo{H$3bh
z!VQT_W)uXU9DjZVjEfBj`_WtM!|TJrA_1Jf1tG!r57cnchsEnj+1@!r9n6@yT+P#bjy2UQIO>d_;nwi4dvAz6(o2j2OY&=aZ8i_ps5d+`9}vrbS_|yj>j(N
zOe&1aq`bb{chXtogYMEs;vFHm`Li1xprmih>3`3;10`yXK`mzTIe+3Jae^vZ$M`(l
zZ%g+&(cJD8kmHbEyV(wS82)=q_{*Dkod~t7(riq|MY##Cj?M~LLR5n>4QdfJbD@9F=mu
zjy&sDFKZWDuf}s=<~8w#rZj!sgB%1*Tn%O>O{3Z#NwOhop3aiYIybR%GgBdZvY^~F
z4!W;^vrMl9EK1^gS&rNjc}9P8j^zZ?J5Q-Sjnr}mP@9Ug1tfVG!BYPB-6
z6SB9Od93B)wnP0wC@D8Df>rB>>MJ#v!@RfF#(u}FikzLGYq%|sSreUUVO?qnur_`T
znERVE{&Bnd?$5V{^;R-I`b^}aDdAHFSvA6yvriZyD}gRALaJPq-Q^`j%X01|Gv}rT^iyFHHb4=S&0tc4vYke4g(F
zJReZ>Z)3sd#%sG_s4Ut~s#Gi$jmP7-Ssgf#e)3!8Gd<`TWA$*^S+EnBPWB{KzJh;_
z(Z;K#KOAtegf>f(d$D{$F)8x~dENIhBQ%b|(e>(Ny+7x`@}?^4JyBxuDuUNK9qU{f
zX(-R$I&qJB$VC#xEGh#3+e0PIE&NRxag47>9H}uPUz(*>G0=5wc`c6w47)
zDmm|XNB){62Jgfb(5V$Gb8N(MIKvRDncxC7d0?*o)_dZs0h|y#K=Tnr4oW{&L^Waz
zp3N%d^i-WuVK79|g)^5#CXT0nMTipNoGT5Mm$N@;;MzPe&{MjzcUwY_e`=cK7L?Nf
z{XJuSEYBu)YU9BRlb{(aRUTsJFiJBkNQ*^!VhGSJRo3YDwpM&b)!fCqPIjMo1A7}J
zJQYrmbuB&r`X0`416gq{%t2Mku6XL2FG$=U&~8ayw~(Ff#V>nHs~e8=Pov7`_$)MgYYpZ|c67;c-D96YoSZ1YH*kXhl5OxRvcLZu%m&G3mX
zBJMeDKc3tXh*eGL_a8CT>-7~M&!U7`1MYN4Dxg%FjuSkYmD61jfn(a*GxxGa9aI5T
zj5iryPG^DWAT0H^($#~kka3i>#Y{cNpabr!AM`yI#r+072pbuJtj)5aPZqp+OlU_=
z5}(}O-%0oe#gjjbpNKQ_oR1MZkS30tgtqcJ;A_dK*I0!)Od7Q0l?c_uT9Q-0ziwjN
z$R_`45N5JSM7myMz^3AO#Jxbzw$z;<_k-#FNaYuheEAn&WO=a*M|~PuG=1V2r4&N2
z-yuSl<4)Q6qDKXAes&H0CTDZ7ELpbxP(3dNrW~*KC^y7MEo|C2&;wX4E1INlIeBK~
z@M$7T`S{&RwQO5s9l(W0UCjDx6WFa^0i*&VTMgg2`x0DqEi0!H6lJz@WCGkV%l6oc
zhe$##qRH3IJ7-2`&se|CmDkBw(q(bpxB?>7VA&Hd$xdz+8NVjo`*QF+jX#=0$k|w$
zhCb+0%1!@KQm2E&_Q+?iyrL2YrlI?j+UC!+8-z?URuMRtl5Q`RlQN2_!iW3Rxp@{f
z$ElX*ag~J`Iy}zbkwQk?(mN*QBIzzXPM<&Us=AJ$w<;|v1NMB62v-rp`gW79EhLhx
zhA*(b&Xp-&pUfP2>mm;BSF%ekulvI`WlJk%CwPvLRv)LQ=5s9~1ENC-aU|Y==T)07
z2O7l7gOev$#m1`MO>RsY`$l(SRkJ9ymv%k*rUF>6LtBjYqS8yHg26hlcy}d?*qzR9
zB5Ni9<|1f8VY(;sdr8jJ$<+MC2Yib=B+_DmXVPjgJo0VE
z1wlkNS3=!eO0T2psr->_H55)ITy)
zhE`r|4gSW$9O+3Lm&u*Mv^x?!<~Tox>qEHiajsF3=lw~#YjKvfk^{=g=HtCF^8@0;
zaL!$~m(ng3aFkLJ>KaS42H7fj-u_9aUA%ylG4^X6%Yx0y
z+o)5M30l=BLkQu`-AR*{>s?wx-+2&X1S7-*@91ZEdf==-)nKj*bL8uKWzWq0;IwT<
z8_ON{eeX|NPGJ%s{&bM0*$wp(bhDQ8PWJ!a-rlrot*nXvybBEO`R&pyE;H#b0oS*Z
z0&aGC-pOX)+k|N9TwIlIW^2-%$|}Mvw||{D9&`(B|EU#A{;tTV(~f>RQiRR@?hr*M
zOM1>ILKqJ|>DfYvj=q;fzH^bS5HKjAAP*K#A_ieOIvavx>UrxYcUAF-1aX%{NHQ+h
zNg-=LcejZb#Bd+k-Mg6jv|q?tCand7MVXo#Y#;Z7)Ge$|-uRPsLdlOYWpVt`Y!TRA
z$yYv>Cxf4mL=Vtg;&(5>JRaB6EeKwCMs#c$wsxVlTjt&dFAeitMTpVrcP48E8sk^m
zztotF5Bd7tM3c8d!BKpLD|4qvSi(aM`{o?D=)y0;$2V+;pm&PHN$Wqum@nAZnr6b@
z)V&9@k&QY=6uI0Oams@l7f%10ig^3u_Ji>C0V2OH7Bws*N-v#30WJdbRf~6lDL2Dw
zR{GpGnxX*q?*X1_FfoTSMYIZ|A~RLiS+K9N&NtVI8UQ}T>`EgneqBcmmSuHNwJ#GAYQIt?*e~3rvqe@FgC<+jEb#iq*tTO}D-4_12QKj52WdAq_%nI4-=m!K8fc4GM8n-}H|o_TsZr
z@RP%b3E5&MAUZS=VOvYAo=f&f!nM{*_Th2`Ekm+x3;zbSafXh~5v9NkJ>WD2to4Tp
z^fT?OsyJ?ThxS-TK@!rmsvmLP5{-deT7Vy$j(_gGSMljq#H{s{V$!M0CpQG%CnC#b
zIpc)w82+hbU{hRv@q$w^v~R2^W|r&JVVT4tDk7?*E=Il*+yFRmE3os$Z@e`6;aR}K
zBK1?cW#-sUFykC{D_svy$gIJ=i$Q6)CH@R+DidnbB8gf0%UAO#iow8@zf
z@CY$dI>F`^?apkz-ebzlpJ3eCCyHSS{lQCiE$=_#f6;bo-|dFrtd>}odu1_)fe>k&d~zvK(72fi|M3~9%shA9`xOHRd;siiV`bJE6rFyi+E;b(wuP6uzjbH7Cv|)?f`3z*wxJr8Co{Y&Tak&KRZ0?_
zg8a17!sFn_Rlp!D?SAS8)?1{k(qZ{250f*qUA(JboZ<|q;n{YcV<04W2GlC6!k_gl
ziWooVVMtXaXzJzS+3>W;+=e{C0Wo
z2UrF$A2u0biQBpR3`>>6EP`nwLTz~=&S4*fMd_ROloLZJsYrx41W>UP-*A_`Eiuvq
z?%m$8iQc6V2(*tzbZp1m`5_^V#J3BiNA1iVfs^_p3kfQC!NjO{i9Vn_g#(z~
z0-Lj`S()p`#L`NQ<_~&H$@hD&LCH({w*10b9Ooht(Z-zsY`nLT)|t#<*H1a@<kIQjw;4WtCGzP&h|9sTH#iHtQ|q+7#`H(&(F!FRhKxgmp(h_Id6o@|8#cvg!k?M~4