Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/ContinualAI/avalanche int…
Browse files Browse the repository at this point in the history
…o newdocs
  • Loading branch information
AntonioCarta committed Feb 27, 2024
2 parents aa880fd + 373c4cc commit 4bace17
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 23 deletions.
24 changes: 13 additions & 11 deletions avalanche/training/plugins/early_stopping.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,18 +115,20 @@ def _update_best(self, strategy):
f"Metric {self.metric_name} used by the EarlyStopping plugin "
f"is not computed yet. EarlyStopping will not be triggered."
)
if self.best_val is None or self.operator(val_acc, self.best_val):

if self.best_val is None:
self.best_state = deepcopy(strategy.model.state_dict())
self.best_val = val_acc
self.best_step = self._get_strategy_counter(strategy)
return None

delta_val = float(val_acc - self.best_val)
if self.operator(delta_val, 0) and abs(delta_val) >= self.margin:
self.best_state = deepcopy(strategy.model.state_dict())
if self.best_val is None:
self.best_val = val_acc
self.best_step = 0
return None

if self.operator(float(val_acc - self.best_val), self.margin):
self.best_step = self._get_strategy_counter(strategy)
self.best_val = val_acc
if self.verbose:
print("EarlyStopping: new best value:", val_acc)
self.best_val = val_acc
self.best_step = self._get_strategy_counter(strategy)
if self.verbose:
print("EarlyStopping: new best value:", val_acc)

return self.best_val

Expand Down
20 changes: 9 additions & 11 deletions avalanche/training/supervised/lamaml_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,6 @@ def __init__(
buffer_mb_size=buffer_mb_size,
device=device,
)

self.model.apply(init_kaiming_normal)

def _before_training_exp(self, **kwargs):
Expand Down Expand Up @@ -305,16 +304,15 @@ def __len__(self):

def get_buffer_batch(self):
rnd_ind = torch.randperm(len(self))[: self.buffer_mb_size]
buff_x = torch.cat(
[self.storage_policy.buffer[i][0].unsqueeze(0) for i in rnd_ind]
).to(self.device)
buff_y = torch.LongTensor(
[self.storage_policy.buffer[i][1] for i in rnd_ind]
).to(self.device)
buff_t = torch.LongTensor(
[self.storage_policy.buffer[i][2] for i in rnd_ind]
).to(self.device)

buff = self.storage_policy.buffer.subset(rnd_ind)
buff_x, buff_y, buff_t = [], [], []
for bx, by, bt in buff:
buff_x.append(bx)
buff_y.append(by)
buff_t.append(bt)
buff_x = torch.stack(buff_x, dim=0).to(self.device)
buff_y = torch.tensor(buff_y).to(self.device).long()
buff_t = torch.tensor(buff_t).to(self.device).long()
return buff_x, buff_y, buff_t


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class SCR(SupervisedTemplate):

def __init__(
self,
*,
model: SCRModel,
optimizer: Optimizer,
augmentations=Compose([Lambda(lambda el: el)]),
Expand Down
2 changes: 2 additions & 0 deletions docs/benchmarks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ Scenarios

NCScenario
NIScenario
benchmark_with_validation_stream

Streams
"""""""""
Expand Down Expand Up @@ -279,6 +280,7 @@ Train/Validation splits for streams:

.. currentmodule:: avalanche.benchmarks.utils


Utils (Data Loading and AvalancheDataset)
--------------------------------------------------------------------------------

Expand Down
2 changes: 1 addition & 1 deletion extra_dependencies.txt
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
# package[version_required]: tag1, tag2, ...

higher: extra
ctrl-benchmark: extra
torchaudio: extra
ctrl-benchmark: ctrl
gym: rl
pycocotools: detection
lvis: detection
Expand Down

0 comments on commit 4bace17

Please sign in to comment.