Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

added past code #1

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 63 additions & 0 deletions fedavg.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import tensorflow as tf
from tensorflow_federated.python.learning import federated_averaging
import os
import asyncio
from fastapi import FastAPI, WebSocket, File, UploadFile
from fastapi.responses import HTMLResponse
import websockets
import copy

app = FastAPI()

datasets = []
num_clients = 1
model = None
clients = [0] #this would be a list of all client ids

@app.get('/')
async def root:
return 'root'

@app.get("/{model}/")
async def update_model(model: tf.keras.Model):
return model.load_weights('consolidated.h5')

@app.post("/weights")
async def load_weights(image: UploadFile = File(...)):
model.load_weights(image.filename)
datasets.append(copy.deepcopy(model))
load_data()
return {'consolidated weights': model}

async def load_data():
for client_id in clients:
await load_weights(client_id)

async def fed_avg():
await load_data()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
await load_data()

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

you can load the weights in the for loop below.


iterative_process = federated_averaging.build_federated_averaging_process(
model_fn=model_examples.LinearRegression,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.1))

model = iterative_process.initialize()

self.assertIsInstance(
iterative_process.get_model_weights(model), model_utils.ModelWeights)
self.assertAllClose(model.model.trainable,
iterative_process.get_model_weights(model).trainable)

for _ in range(num_clients):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

load the weights for the latest submitted model from each client inside this loop.

model, _ = iterative_process.next(model, datasets)
# self.assertIsInstance(
# iterative_process.get_model_weights(model), model_utils.ModelWeights)
# self.assertAllClose(model.model.trainable,
# iterative_process.get_model_weights(model).trainable)


model.save_weights('consolidated.h5', save_format = 'h5')
update_model(model)

76 changes: 76 additions & 0 deletions nobrainer_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import nobrainer
import tensorflow as tf
import os

def load_data_and_train(path, dataset_size, model, name):
train_records = []
eval_records = []
for root, dirs, files in os.walk(path):
for filename in files:
if 'tfrec' not in filename:
continue
if 'train' in filename:
train_records.append(os.path.join(path, filename))
elif 'evaluate' in filename:
eval_records.append(os.path.join(path, filename))

print("read raw records")
#print(str(len(train_records))+" in training set")
#print(str(len(eval_records))+" in testing set")

train_records = train_records[:dataset_size]

dataset = tf.data.TFRecordDataset(train_records, compression_type="GZIP")
print(dataset.element_spec)

train_data = nobrainer.dataset.tfrecord_dataset(
file_pattern = train_records,
volume_shape = (256, 256, 256),
shuffle = None,
scalar_label = False)


block_shape = (128, 128, 128)

train_data = train_data.map(lambda x, y: (nobrainer.volume.to_blocks(x, block_shape),
nobrainer.volume.to_blocks(y, block_shape)))

train_data = train_data.map(lambda x, y: (tf.reshape(x, x.shape+(1,)), tf.reshape(y, y.shape+(1,))))
train_data = train_data.batch(256)

#x, y = next(iter(train_data))

num_epochs = 1

_op = 'adam'
_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
_metrics = ['accuracy']

model.compile(optimizer=_op, loss=_loss, metrics=_metrics)

print("initialized and compiled models")
for data in train_data.as_numpy_iterator():
model.fit(data, epochs=num_epochs, verbose=2)
model.save_weights('saved_models/'+name+'.h5', save_format = 'h5')


#example of how i use the function
num_classes = 2
shape = (128, 128, 128, 1)

model, name = (nobrainer.models.meshnet(num_classes, shape), 'meshnet')
path = os.getcwd()
size = 1
load_data_and_train(path, size, model, name)