Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added past code #1

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 66 additions & 0 deletions dwc_client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import tensorflow as tf
import tensorflow_probability as tfp
import os
import numpy as np
import asyncio
from fastapi import FastAPI, WebSocket, File, UploadFile
from fastapi.responses import HTMLResponse, FileResponse
import websockets
import uuid
import glob

client_id = 'A'
model = None
inputs = None
outputs = None

# set up lightweight bayesian neural network
def sample_net(input_shape,
activation = tf.nn.relu,
batch_size = None):
def one_layer(x, dilation_rate=(1, 1, 1)):
x = tfpl.Convolution3DReparameterization(
filters,
kernel_size=3,
padding="same",
dilation_rate=dilation_rate,
activation=activation,
name="layer/vwnconv3d",
)(x)
x = tfkl.Activation(activation, name="layer/activation")(x)
return x

inputs = tf.keras.layers.input(shape = input_shape, batch_size=batch_size, name="inputs")
x = one_layer(inputs)

return tf.keras.Model(inputs=inputs, outputs=x)

def train(inputs, outputs):
if model == None:
model = sample_net(np.shape(inputs))
# input shape: 4 x 1
_op = 'adam'
_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
_metrics = ['accuracy']
model.compile(optimizer=_op, loss=_loss, metrics=_metrics)
model.fit(inputs=inputs, outputs=outputs, epochs=1, verbose=2)


def most_recent_consolidated():
list_of_files = glob.glob('/server/*')
latest_file = max(list_of_files, key=os.path.getctime)
return latest_file

# ping server side
@app.get('/')
async def send_weights():
return FileResponse('model-'+client_id+'.h5')

@app.post("/")
async def load_consolidated():
model.load_weights(most_recent_consolidated())
train()
return {'consolidated weights': model}
88 changes: 88 additions & 0 deletions dwc_server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import tensorflow as tf
import os
import asyncio
from fastapi import FastAPI, WebSocket, File, UploadFile
from fastapi.responses import HTMLResponse, FileResponse
import websockets
import copy
import numpy as np
import uuid

app = FastAPI()

datasets = []
num_clients = 1
model = None

@app.get('/')
async def root:
return 'root'

@app.get("/{model}/")
async def update_model(model: tf.keras.Model):
return FileResponse('consolidated.h5')

@app.post("/weights")
async def load_weights(image: UploadFile = File(...)): #do we still need to load a file here
model.load_weights(image.filename)
datasets.append(copy.deepcopy(model))
dwc_frame()
return {'consolidated weights': model}

def load_data():
for root, dirs, files in os.walk(path):
for filename in files:
if filename[:6] != 'model-':
continue
model.load_weights(filename)
datasets.append(copy.deepcopy(model))


async def dwc_frame():
#load weights
load_data()

# need to figure out how to only run this code after all (or most) client weights

# dwc implementation
# what are the priors
model = distributed_weight_consolidation(datasets, priors)

model.save_weights(
'server/consolidated-'+uuid.uuid4().__str__()+'.h5',
save_format = 'h5')
update_model(model)


# LOOK AT THIS LATERRRRR

async def distributed_weight_consolidation(models_weights, model_priors):
# models is a list of weights of client-models; models = [model1, model2, model3...]
num_layers = int(len(model_weights[0])/2.0)
num_datasets = np.shape(model_weights)[0]
consolidated_model = model_weights[0]
mean_idx = [i for i in range(0,len(model_weights[0])) if i % 2 == 0]
std_idx = [i for i in range(0,len(model_weights[0])) if i % 2 != 0]
ep = 1e-5
for i in range(num_layers):
num_1 = 0; num_2 = 0
den_1 = 0; den_2 = 0
for m in range(num_datasets):
model = model_weights[m]
prior = model_priors[m]
mu_s = model[mean_idx[i]]
mu_o = prior[mean_idx[i]]
sig_s = model[std_idx[i]]
sig_o = prior[std_idx[i]]
d1 = np.power(sig_s,2) + ep; d2= np.power(sig_o,2) + ep
num_1 = num_1 + (mu_s/d1)
num_2 = num_2 + (mu_o/d2)
den_1 = den_1 + (1.0/d1)
den_2 = den_2 + (1.0/d2)
consolidated_model[mean_idx[i]] = (num_1 - num_2)/(den_1 -den_2)
consolidated_model[std_idx[i]] = 1/(den_1 -den_2)
return consolidated_model

63 changes: 63 additions & 0 deletions fedavg.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import tensorflow as tf
from tensorflow_federated.python.learning import federated_averaging
import os
import asyncio
from fastapi import FastAPI, WebSocket, File, UploadFile
from fastapi.responses import HTMLResponse
import websockets
import copy

app = FastAPI()

datasets = []
num_clients = 1
model = None
clients = [0] #this would be a list of all client ids

@app.get('/')
async def root:
return 'root'

@app.get("/{model}/")
async def update_model(model: tf.keras.Model):
return model.load_weights('consolidated.h5')

@app.post("/weights")
async def load_weights(image: UploadFile = File(...)):
model.load_weights(image.filename)
datasets.append(copy.deepcopy(model))
load_data()
return {'consolidated weights': model}

async def load_data():
for client_id in clients:
await load_weights(client_id)

async def fed_avg():
await load_data()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
await load_data()

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you can load the weights in the for loop below.


iterative_process = federated_averaging.build_federated_averaging_process(
model_fn=model_examples.LinearRegression,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.1))

model = iterative_process.initialize()

self.assertIsInstance(
iterative_process.get_model_weights(model), model_utils.ModelWeights)
self.assertAllClose(model.model.trainable,
iterative_process.get_model_weights(model).trainable)

for _ in range(num_clients):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

load the weights for the latest submitted model from each client inside this loop.

model, _ = iterative_process.next(model, datasets)
# self.assertIsInstance(
# iterative_process.get_model_weights(model), model_utils.ModelWeights)
# self.assertAllClose(model.model.trainable,
# iterative_process.get_model_weights(model).trainable)


model.save_weights('consolidated.h5', save_format = 'h5')
update_model(model)

76 changes: 76 additions & 0 deletions nobrainer_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import nobrainer
import tensorflow as tf
import os

def load_data_and_train(path, dataset_size, model, name):
train_records = []
eval_records = []
for root, dirs, files in os.walk(path):
for filename in files:
if 'tfrec' not in filename:
continue
if 'train' in filename:
train_records.append(os.path.join(path, filename))
elif 'evaluate' in filename:
eval_records.append(os.path.join(path, filename))

print("read raw records")
#print(str(len(train_records))+" in training set")
#print(str(len(eval_records))+" in testing set")

train_records = train_records[:dataset_size]

dataset = tf.data.TFRecordDataset(train_records, compression_type="GZIP")
print(dataset.element_spec)

train_data = nobrainer.dataset.tfrecord_dataset(
file_pattern = train_records,
volume_shape = (256, 256, 256),
shuffle = None,
scalar_label = False)


block_shape = (128, 128, 128)

train_data = train_data.map(lambda x, y: (nobrainer.volume.to_blocks(x, block_shape),
nobrainer.volume.to_blocks(y, block_shape)))

train_data = train_data.map(lambda x, y: (tf.reshape(x, x.shape+(1,)), tf.reshape(y, y.shape+(1,))))
train_data = train_data.batch(256)

#x, y = next(iter(train_data))

num_epochs = 1

_op = 'adam'
_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
_metrics = ['accuracy']

model.compile(optimizer=_op, loss=_loss, metrics=_metrics)

print("initialized and compiled models")
for data in train_data.as_numpy_iterator():
model.fit(data, epochs=num_epochs, verbose=2)
model.save_weights('saved_models/'+name+'.h5', save_format = 'h5')


#example of how i use the function
num_classes = 2
shape = (128, 128, 128, 1)

model, name = (nobrainer.models.meshnet(num_classes, shape), 'meshnet')
path = os.getcwd()
size = 1
load_data_and_train(path, size, model, name)