Skip to content

Commit

Permalink
fix(example): add more examples
Browse files Browse the repository at this point in the history
  • Loading branch information
LongxingTan authored Jan 21, 2025
1 parent 541e399 commit 6852412
Show file tree
Hide file tree
Showing 32 changed files with 219 additions and 144 deletions.
22 changes: 15 additions & 7 deletions examples/README.md
Original file line number Diff line number Diff line change
@@ -1,16 +1,24 @@
# TFTS examples
# 🚀 TFTS Examples

**Basic Usage**
- [time series prediction](./run_prediction_simple.py)
- [time series anomaly detection](./run_anomaly.py)
## 🛠️ Basic Usage
Get started with these basic examples:
- [Time Series Prediction](./run_prediction_simple.py): Learn how to predict future values in a time series.
- [Time Series Classification](./run_classification.py): Explore how to classify time series data.
- [Time Series Anomaly Detection](./run_anomaly.py): Detect anomalies in time series data.


## Notebooks
- [single step prediction](https://nbviewer.org/github/LongxingTan/Time-series-prediction/blob/master/examples/notebooks/demo_single_step_prediction.ipynb)
## 📓 Notebooks
Dive deeper with these interactive notebooks:
- [single step prediction](https://nbviewer.org/github/LongxingTan/Time-series-prediction/blob/master/examples/notebooks/demo_single_step_prediction.ipynb): A step-by-step guide to single-step time series prediction.


## More examples
## 🏆 More examples
Check out these advanced examples and competition-winning implementations:

**Multiple steps prediction**
- [TFTS-Bert](https://github.com/LongxingTan/KDDCup2022-Baidu) wins the **3rd place** in KDD Cup 2022 wind power forecasting
- [TFTS-Seq2seq](https://github.com/LongxingTan/Data-competitions/tree/master/tianchi-enso-prediction) wins the **4th place** in Tianchi ENSO prediction 2021


## 🤝 Contributing
We welcome contributions! If you have an example, notebook, or improvement to share, please follow [these steps](../CONTRIBUTING.md)
31 changes: 13 additions & 18 deletions examples/run_anomaly.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,9 @@ def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=315, required=False, help="seed")
parser.add_argument("--use_model", type=str, default="rnn", help="model for train")
parser.add_argument("--use_data", type=str, default="ecg", help="dataset: sine or airpassengers")
parser.add_argument("--train_length", type=int, default=12, help="sequence length for train")
parser.add_argument("--predict_sequence_length", type=int, default=1, help="sequence length for predict")
parser.add_argument("--epochs", type=int, default=1, help="Number of training epochs")
parser.add_argument("--epochs", type=int, default=5, help="Number of training epochs")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size for training")
parser.add_argument("--learning_rate", type=float, default=1e-4, help="learning rate for training")
parser.add_argument("--output_dir", type=str, default="./weights", help="saved model weights")
Expand All @@ -34,39 +33,35 @@ def create_subseq(ts, train_length, pred_length):
return sub_seq, next_values


def build_data(data_name="ecg"):
if data_name == "ecg":
df = pd.read_csv("http://www.cs.ucr.edu/~eamonn/discords/qtdbsel102.txt", header=None, delimiter="\t")
ecg = df.iloc[:, 2].values
ecg = ecg.reshape(len(ecg), -1)
print("length of ECG data : ", len(ecg))
def build_data():
df = pd.read_csv("http://www.cs.ucr.edu/~eamonn/discords/qtdbsel102.txt", header=None, delimiter="\t")
ecg = df.iloc[:, 2].values
ecg = ecg.reshape(len(ecg), -1)
print("length of ECG data : ", len(ecg))

scaler = StandardScaler()
std_ecg = scaler.fit_transform(ecg)
std_ecg = std_ecg[:5000]
scaler = StandardScaler()
std_ecg = scaler.fit_transform(ecg)
std_ecg = std_ecg[:5000]

sub_seq, next_values = create_subseq(std_ecg, args.train_length, args.predict_sequence_length)
return np.array(sub_seq), np.array(next_values), std_ecg
else:
raise ValueError()
sub_seq, next_values = create_subseq(std_ecg, args.train_length, args.predict_sequence_length)
return np.array(sub_seq), np.array(next_values), std_ecg


def run_train(args):
x_test, y_test, sig = build_data("ecg")
x_test, y_test, sig = build_data()

config = AutoConfig.for_model(args.use_model)
config.train_sequence_length = args.train_length
model = AutoModelForAnomaly.from_config(config, predict_sequence_length=1)

trainer = KerasTrainer(model)
trainer.train((x_test, y_test), (x_test, y_test), epochs=args.epochs)
# model.save_weights(args.output_dir)
trainer.save_model(args.output_dir)
return


def run_inference(args):
x_test, y_test, sig = build_data("ecg")
x_test, y_test, sig = build_data()

config = AutoConfig.for_model(args.use_model)
config.train_sequence_length = args.train_length
Expand Down
65 changes: 57 additions & 8 deletions examples/run_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,78 @@

import argparse

import tfts
import numpy as np
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import tensorflow as tf

from tfts import AutoConfig, AutoModel, AutoModelForClassification, KerasTrainer


def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=315, required=False, help="seed")
parser.add_argument("--use_model", type=str, default="rnn", help="model for train")
parser.add_argument("--use_data", type=str, default="sine", help="dataset: sine or airpassengers")
parser.add_argument("--train_length", type=int, default=24, help="sequence length for train")
parser.add_argument("--predict_sequence_length", type=int, default=12, help="sequence length for predict")
parser.add_argument("--use_model", type=str, default="bert", help="model for train")
parser.add_argument("--num_labels", type=int, default=2, help="number of unique labels")
parser.add_argument("--epochs", type=int, default=100, help="Number of training epochs")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size for training")
parser.add_argument("--batch_size", type=int, default=64, help="Batch size for training")
parser.add_argument("--learning_rate", type=float, default=1e-4, help="learning rate for training")

return parser.parse_args()


def prepare_data():
def readucr(filename):
data = np.loadtxt(filename, delimiter="\t")
y = data[:, 0]
x = data[:, 1:]
return x, y.astype(int)

root_url = "https://raw.githubusercontent.com/hfawaz/cd-diagram/master/FordA/"

x_train, y_train = readucr(root_url + "FordA_TRAIN.tsv")
x_test, y_test = readucr(root_url + "FordA_TEST.tsv")

x_train = x_train.reshape((x_train.shape[0], x_train.shape[1], 1))
x_test = x_test.reshape((x_test.shape[0], x_test.shape[1], 1))

idx = np.random.permutation(len(x_train))
x_train = x_train[idx]
y_train = y_train[idx]

y_train[y_train == -1] = 0
y_test[y_test == -1] = 0
return x_train, y_train, x_test, y_test


def run_train(args):
return
x_train, y_train, x_test, y_test = prepare_data()

x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=42)
print(x_train.shape, y_train.shape, x_val.shape, y_val.shape)

config = AutoConfig.for_model(args.use_model)
model = AutoModelForClassification.from_config(config, num_labels=args.num_labels)

opt = tf.keras.optimizers.Adam(args.learning_rate)
loss_fn = "sparse_categorical_crossentropy"
trainer = KerasTrainer(model, loss_fn=loss_fn, optimizer=opt)
early_stop_callback = tf.keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=5)

trainer.train(
(x_train, y_train),
valid_dataset=(x_val, y_val),
epochs=args.epochs,
batch_size=args.batch_size,
callbacks=[early_stop_callback],
)

y_pred = model(x_val)
y_pred_classes = np.argmax(y_pred, axis=1)
y_true_classes = np.argmax(y_val, axis=1)

def run_inference(args):
cm = confusion_matrix(y_true_classes, y_pred_classes)
print(cm)
return


Expand Down
7 changes: 3 additions & 4 deletions examples/run_prediction_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
import argparse
import os
import random
import sys

import matplotlib.pyplot as plt
import numpy as np
Expand All @@ -21,12 +20,12 @@ def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=315, required=False, help="seed")
parser.add_argument("--use_model", type=str, default="rnn", help="model for train")
parser.add_argument("--use_data", type=str, default="sine", help="dataset: sine or airpassengers")
parser.add_argument("--use_data", type=str, default="sine", help="dataset: sine or air passengers")
parser.add_argument("--train_length", type=int, default=24, help="sequence length for train")
parser.add_argument("--predict_sequence_length", type=int, default=12, help="sequence length for predict")
parser.add_argument("--epochs", type=int, default=100, help="Number of training epochs")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size for training")
parser.add_argument("--learning_rate", type=float, default=1e-4, help="learning rate for training")
parser.add_argument("--learning_rate", type=float, default=5e-4, help="learning rate for training")

return parser.parse_args()

Expand All @@ -49,7 +48,7 @@ def run_train(args):
model = AutoModel.from_config(config, predict_sequence_length=args.predict_sequence_length)

trainer = KerasTrainer(model, optimizer=optimizer, loss_fn=loss_fn)
trainer.train(train, valid, epochs=args.epochs, early_stopping=EarlyStopping("val_loss", patience=5))
trainer.train(train, valid, epochs=args.epochs, callbacks=[EarlyStopping("val_loss", patience=5)])

pred = trainer.predict(valid[0])
trainer.plot(history=valid[0], true=valid[1], pred=pred)
Expand Down
9 changes: 9 additions & 0 deletions tests/test_models/test_auto_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,3 +25,12 @@ def test_auto_model_init(
output = auto_model(input_data)

self.assertEqual(output.shape, (1, 5, 1))

def test_auto_model_for_classification(self):
num_labels = 3
config = AutoConfig.for_model("bert")
model = AutoModelForClassification.from_config(config, num_labels=num_labels)

x = tf.random.normal([2, 14, 4])
y = model(x)
self.assertEqual(y.shape, (2, num_labels))
1 change: 0 additions & 1 deletion tfts/datasets/get_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
"""

import logging
import os
import random
from typing import List, Optional, Tuple, Union

Expand Down
14 changes: 7 additions & 7 deletions tfts/layers/attention_layer.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
"""Layer for :py:class:`~tfts.models.transformer` :py:class:`~tfts.models.autoformer`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
from typing import Any, Callable, Dict, Optional, Tuple

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv1D, Dense, Dropout, LayerNormalization
from tensorflow.keras.layers import Conv1D, Dense, Dropout

from tfts.layers.mask_layer import ProbMask

Expand Down Expand Up @@ -52,8 +52,8 @@ def call(
q: tf.Tensor,
k: tf.Tensor,
v: tf.Tensor,
past_key_value=None,
mask: Optional[tf.Tensor] = None,
past_key_value=None,
training: Optional[bool] = None,
return_attention_scores: bool = False,
use_causal_mask: bool = False,
Expand Down Expand Up @@ -140,7 +140,7 @@ def __init__(
def build(self, input_shape: Tuple[Optional[int], ...]) -> None:
super(SelfAttention, self).build(input_shape)

def call(self, x: tf.Tensor, mask: Optional[tf.Tensor] = None, training=None):
def call(self, x: tf.Tensor, mask: Optional[tf.Tensor] = None, training: Optional[bool] = None):
"""Self attention layer
Parameters
Expand All @@ -155,7 +155,7 @@ def call(self, x: tf.Tensor, mask: Optional[tf.Tensor] = None, training=None):
tf.Tensor
3D self attention output, (batch_size, sequence_length, attention_hidden_size)
"""
return self.attention(x, x, x, mask, training=training)
return self.attention(q=x, k=x, v=x, mask=mask, training=training)

def get_config(self):
base_config = super(SelfAttention, self).get_config()
Expand Down Expand Up @@ -234,7 +234,7 @@ def _update_context(self, context_in, v, scores, index, L_Q):
return tf.convert_to_tensor(context_in)

# @tf.function
def call(self, q, k, v, mask=None):
def call(self, q, k, v, mask: Optional[tf.Tensor] = None):
"""Prob attention"""
q = self.dense_q(q) # project the query/key/value to num_attention_heads * units
k = self.dense_k(k)
Expand Down Expand Up @@ -282,7 +282,7 @@ def __init__(self, hidden_size: int, num_attention_heads: int, attention_probs_d
def build(self, input_shape: Tuple[Optional[int], ...]):
super().build(input_shape)

def call(self, x, mask=None):
def call(self, x, mask: Optional[tf.Tensor] = None):
"""Sparse attention
Parameters
Expand Down
2 changes: 1 addition & 1 deletion tfts/layers/autoformer_layer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Layer for :py:class:`~tfts.models.autoformer`"""

import math
from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
from typing import Any, Callable, Dict, Optional, Tuple

import tensorflow as tf
from tensorflow.keras.layers import AveragePooling1D, Conv1D, Dense, Dropout
Expand Down
2 changes: 1 addition & 1 deletion tfts/layers/cnn_layer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Layer for :py:class:`~tfts.models.wavenet`"""

from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
from typing import Any, Callable, Dict, List, Optional, Tuple

import tensorflow as tf
from tensorflow.keras import activations, constraints, initializers, regularizers
Expand Down
2 changes: 1 addition & 1 deletion tfts/layers/dense_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

import tensorflow as tf
from tensorflow.keras import activations, constraints, initializers, regularizers
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import Dense


class DenseTemp(tf.keras.layers.Layer):
Expand Down
7 changes: 3 additions & 4 deletions tfts/layers/embed_layer.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,9 @@
"""Layer for :py:class:`~tfts.models.transformer`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
from typing import Any, Callable, Dict, Optional, Tuple

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import GRU, LSTM, Conv1D, Dense, Dropout, Embedding, LayerNormalization, SpatialDropout1D
from tensorflow.keras.layers import GRU, LSTM, Conv1D, Dense, Dropout, Embedding

from .position_layer import PositionalEmbedding, PositionalEncoding, RelativePositionEmbedding

Expand Down Expand Up @@ -35,7 +34,7 @@ def build(self, input_shape: Tuple[Optional[int], ...]):
)
super(TokenEmbedding, self).build(input_shape)

def call(self, x):
def call(self, x: tf.Tensor):
"""
Performs the token embedding.
Expand Down
2 changes: 0 additions & 2 deletions tfts/layers/mask_layer.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
"""Layer for :py:class:`~tfts.models.transformer`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union

import tensorflow as tf
from tensorflow.keras import activations, constraints, initializers, regularizers

Expand Down
2 changes: 1 addition & 1 deletion tfts/layers/nbeats_layer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Layer for :py:class:`~tfts.models.nbeats`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
from typing import Any, Callable, Dict, Optional, Tuple

import numpy as np
import tensorflow as tf
Expand Down
2 changes: 1 addition & 1 deletion tfts/layers/position_layer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Layer for :py:class:`~tfts.models.transformer`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
from typing import Any, Callable, Dict, Optional, Tuple

import numpy as np
import tensorflow as tf
Expand Down
2 changes: 1 addition & 1 deletion tfts/layers/unet_layer.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Layer for :py:class:`~tfts.models.unet`"""

from typing import Any, Callable, Dict, Optional, Tuple, Type, Union
from typing import Any, Callable, Dict, Optional, Tuple

import tensorflow as tf
from tensorflow.keras.layers import Activation, Add, BatchNormalization, Conv1D, Dense, GlobalAveragePooling1D, Multiply
Expand Down
Loading

0 comments on commit 6852412

Please sign in to comment.