From 25a5204ebd555796b469ae75fec9529b015cefc2 Mon Sep 17 00:00:00 2001 From: Eddie Mattia Date: Fri, 30 Aug 2024 08:52:42 -0700 Subject: [PATCH] add workflows --- finetune_hf_peft.py | 36 +++++++++++++++++++----------------- hf_peft_args.json | 6 +++--- huggingface_model_card.json | 1 - my_peft_tools.py | 4 ++-- 4 files changed, 24 insertions(+), 23 deletions(-) delete mode 100644 huggingface_model_card.json diff --git a/finetune_hf_peft.py b/finetune_hf_peft.py index 8e5ac0a..9327e88 100644 --- a/finetune_hf_peft.py +++ b/finetune_hf_peft.py @@ -1,9 +1,21 @@ import os import json -from metaflow import FlowSpec, step, IncludeFile, Parameter, secrets, resources, retry, pypi, huggingface_card, nvidia, S3 +from metaflow import FlowSpec, step, IncludeFile, Parameter, secrets, resources, secrets, retry, pypi_base, huggingface_card, kubernetes, S3 from metaflow.profilers import gpu_profile from exceptions import GatedRepoError, GATED_HF_ORGS +@pypi_base(packages={ + 'datasets': '', + 'torch': '', + 'transformers': '', + 'peft': '', + 'trl': '', + 'accelerate': '', + 'bitsandbytes': '', + 'sentencepiece': '', + 'safetensors': '', + 'requests': '' +}) class FinetuneLlama3LoRA(FlowSpec): script_args_file = IncludeFile( @@ -19,7 +31,6 @@ class FinetuneLlama3LoRA(FlowSpec): help="Flag for a smoke test" ) - @pypi(disabled=True) @secrets(sources=["huggingface-token"]) @step def start(self): @@ -33,25 +44,17 @@ def start(self): raise GatedRepoError(self.script_args.dataset_name) self.next(self.sft) - @pypi(packages={ - 'datasets': '', - 'torch': '', - 'transformers': '', - 'peft': '', - 'trl': '', - 'accelerate': '', - 'bitsandbytes': '', - 'sentencepiece': '', - 'safetensors': '' - }) @gpu_profile(interval=1) @huggingface_card - @nvidia + @secrets(sources=["huggingface-token"]) + @kubernetes(gpu=1) @step def sft(self): + import os from my_peft_tools import create_model, create_trainer, save_model, get_tar_bytes import huggingface_hub - huggingface_hub.login('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') + + huggingface_hub.login(os.environ['HF_TOKEN']) # contained in hugginface-token secret model, tokenizer = create_model(self.script_args) trainer = create_trainer(self.script_args, tokenizer, model, smoke=self.smoke, card=True) trainer.train() @@ -62,11 +65,10 @@ def sft(self): s3.put('lora_merged.tar.gz', get_tar_bytes(merge_output_dirname)) self.next(self.end) - @pypi(disabled=True) @step def end(self): print("Training completed successfully!") if __name__ == '__main__': - FinetuneLlama3LoRA() \ No newline at end of file + FinetuneLlama3LoRA() diff --git a/hf_peft_args.json b/hf_peft_args.json index 1e87711..e8cfa82 100644 --- a/hf_peft_args.json +++ b/hf_peft_args.json @@ -1,7 +1,7 @@ { "local_rank": -1, - "per_device_train_batch_size": 1, - "per_device_eval_batch_size": 4, + "per_device_train_batch_size": 16, + "per_device_eval_batch_size": 16, "gradient_accumulation_steps": 17, "learning_rate": 3e-4, "max_grad_norm": 0.3, @@ -30,4 +30,4 @@ "logging_steps": 5, "merge": false, "output_dir": "./lora_checkpoints" -} \ No newline at end of file +} diff --git a/huggingface_model_card.json b/huggingface_model_card.json deleted file mode 100644 index 09daf7a..0000000 --- a/huggingface_model_card.json +++ /dev/null @@ -1 +0,0 @@ -{"metrics": {"loss": [{"step": 1, "value": 2.0702}, {"step": 2, "value": 2.1332}, {"step": 3, "value": 2.2524}], "grad_norm": [{"step": 1, "value": 0.4016675651073456}, {"step": 2, "value": 0.5151544213294983}, {"step": 3, "value": 0.39521700143814087}], "learning_rate": [{"step": 1, "value": 0.000225}, {"step": 2, "value": 7.500000000000002e-05}, {"step": 3, "value": 0.0}]}, "model_config": {"vocab_size": 128256, "max_position_embeddings": 131072, "hidden_size": 4096, "intermediate_size": 14336, "num_hidden_layers": 32, "num_attention_heads": 32, "num_key_value_heads": 8, "hidden_act": "silu", "initializer_range": 0.02, "rms_norm_eps": 1e-05, "pretraining_tp": 1, "use_cache": true, "rope_theta": 500000.0, "rope_scaling": {"factor": 8.0, "low_freq_factor": 1.0, "high_freq_factor": 4.0, "original_max_position_embeddings": 8192, "rope_type": "llama3"}, "attention_bias": false, "attention_dropout": 0.0, "mlp_bias": false, "return_dict": true, "output_hidden_states": false, "output_attentions": false, "torchscript": false, "torch_dtype": "bfloat16", "use_bfloat16": false, "tf_legacy_loss": false, "pruned_heads": {}, "tie_word_embeddings": false, "chunk_size_feed_forward": 0, "is_encoder_decoder": false, "is_decoder": false, "cross_attention_hidden_size": null, "add_cross_attention": false, "tie_encoder_decoder": false, "max_length": 20, "min_length": 0, "do_sample": false, "early_stopping": false, "num_beams": 1, "num_beam_groups": 1, "diversity_penalty": 0.0, "temperature": 1.0, "top_k": 50, "top_p": 1.0, "typical_p": 1.0, "repetition_penalty": 1.0, "length_penalty": 1.0, "no_repeat_ngram_size": 0, "encoder_no_repeat_ngram_size": 0, "bad_words_ids": null, "num_return_sequences": 1, "output_scores": false, "return_dict_in_generate": false, "forced_bos_token_id": null, "forced_eos_token_id": null, "remove_invalid_values": false, "exponential_decay_length_penalty": null, "suppress_tokens": null, "begin_suppress_tokens": null, "architectures": ["LlamaForCausalLM"], "finetuning_task": null, "id2label": {"0": "LABEL_0", "1": "LABEL_1"}, "label2id": {"LABEL_0": 0, "LABEL_1": 1}, "tokenizer_class": null, "prefix": null, "bos_token_id": 128000, "pad_token_id": null, "eos_token_id": [128001, 128008, 128009], "sep_token_id": null, "decoder_start_token_id": null, "task_specific_params": null, "problem_type": null, "_name_or_path": "meta-llama/Meta-Llama-3.1-8B-Instruct", "transformers_version": "4.44.2", "model_type": "llama", "quantization_config": {"quant_method": "bitsandbytes", "_load_in_8bit": false, "_load_in_4bit": true, "llm_int8_threshold": 6.0, "llm_int8_skip_modules": null, "llm_int8_enable_fp32_cpu_offload": false, "llm_int8_has_fp16_weight": false, "bnb_4bit_quant_type": "nf4", "bnb_4bit_use_double_quant": false, "bnb_4bit_compute_dtype": "float16", "bnb_4bit_quant_storage": "uint8", "load_in_4bit": true, "load_in_8bit": false}}, "trainer_configuration": {"output_dir": "./lora_checkpoints", "overwrite_output_dir": false, "do_train": false, "do_eval": false, "do_predict": false, "eval_strategy": "no", "prediction_loss_only": false, "per_device_train_batch_size": 1, "per_device_eval_batch_size": 8, "per_gpu_train_batch_size": null, "per_gpu_eval_batch_size": null, "gradient_accumulation_steps": 17, "eval_accumulation_steps": null, "eval_delay": 0, "torch_empty_cache_steps": null, "learning_rate": 0.0003, "weight_decay": 0.0, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_epsilon": 1e-08, "max_grad_norm": 1.0, "num_train_epochs": 1, "max_steps": 3, "lr_scheduler_type": "cosine", "lr_scheduler_kwargs": {}, "warmup_ratio": 0.0, "warmup_steps": 0, "log_level": "passive", "log_level_replica": "warning", "log_on_each_node": true, "logging_dir": "./lora_checkpoints/runs/Aug25_22-28-06_new-3090", "logging_strategy": "steps", "logging_first_step": false, "logging_steps": 1, "logging_nan_inf_filter": true, "save_strategy": "steps", "save_steps": 500, "save_total_limit": null, "save_safetensors": true, "save_on_each_node": false, "save_only_model": false, "restore_callback_states_from_checkpoint": false, "no_cuda": false, "use_cpu": false, "use_mps_device": false, "seed": 42, "data_seed": null, "jit_mode_eval": false, "use_ipex": false, "bf16": true, "fp16": false, "fp16_opt_level": "O1", "half_precision_backend": "auto", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": null, "local_rank": 0, "ddp_backend": null, "tpu_num_cores": null, "tpu_metrics_debug": false, "debug": [], "dataloader_drop_last": false, "eval_steps": null, "dataloader_num_workers": 0, "dataloader_prefetch_factor": null, "past_index": -1, "run_name": "./lora_checkpoints", "disable_tqdm": true, "remove_unused_columns": true, "label_names": null, "load_best_model_at_end": false, "metric_for_best_model": null, "greater_is_better": null, "ignore_data_skip": false, "fsdp": [], "fsdp_min_num_params": 0, "fsdp_config": {"min_num_params": 0, "xla": false, "xla_fsdp_v2": false, "xla_fsdp_grad_ckpt": false}, "fsdp_transformer_layer_cls_to_wrap": null, "accelerator_config": {"split_batches": false, "dispatch_batches": null, "even_batches": true, "use_seedable_sampler": true, "non_blocking": false, "gradient_accumulation_kwargs": null}, "deepspeed": null, "label_smoothing_factor": 0.0, "optim": "adamw_torch", "optim_args": null, "adafactor": false, "group_by_length": true, "length_column_name": "length", "report_to": [], "ddp_find_unused_parameters": null, "ddp_bucket_cap_mb": null, "ddp_broadcast_buffers": null, "dataloader_pin_memory": true, "dataloader_persistent_workers": false, "skip_memory_metrics": true, "use_legacy_prediction_loop": false, "push_to_hub": false, "resume_from_checkpoint": null, "hub_model_id": null, "hub_strategy": "every_save", "hub_token": "", "hub_private_repo": false, "hub_always_push": false, "gradient_checkpointing": false, "gradient_checkpointing_kwargs": null, "include_inputs_for_metrics": false, "eval_do_concat_batches": true, "fp16_backend": "auto", "evaluation_strategy": null, "push_to_hub_model_id": null, "push_to_hub_organization": null, "push_to_hub_token": "", "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": null, "ray_scope": "last", "ddp_timeout": 1800, "torch_compile": false, "torch_compile_backend": null, "torch_compile_mode": null, "dispatch_batches": null, "split_batches": null, "include_tokens_per_second": false, "include_num_input_tokens_seen": false, "neftune_noise_alpha": null, "optim_target_modules": null, "batch_eval_metrics": false, "eval_on_start": false, "eval_use_gather_object": false, "dataset_text_field": "text", "packing": false, "max_seq_length": 256, "dataset_num_proc": null, "dataset_batch_size": 1000, "model_init_kwargs": null, "dataset_kwargs": {}, "eval_packing": null, "num_of_sequences": 1024, "chars_per_token": ""}, "runtime_info": {"Train runtime": 22.4171, "Train samples / sec": 2.275, "Train steps / sec": 0.134}, "training_state": {"epoch": 0.000980731510326526, "global_step": 3, "max_steps": 3, "num_train_epochs": 1}, "created_on": "2024-08-25T22:28:33.972531"} \ No newline at end of file diff --git a/my_peft_tools.py b/my_peft_tools.py index 3fb3bfd..eb7c318 100644 --- a/my_peft_tools.py +++ b/my_peft_tools.py @@ -186,13 +186,13 @@ def save_model(args, trainer, dirname="final", merge_dirname="final_merged_check if args.merge: """ This conditional block merges the LoRA adapter with the original model weights. - NOTE: For use with NIM, we do not need to do the merge, the adapter_config.json + NOTE: For use with NIM, we do not need to do the merge. """ model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16) model = model.merge_and_unload() output_merged_dir = os.path.join(args.output_dir, merge_dirname) model.save_pretrained(output_merged_dir, safe_serialization=True) - return output_dir, merge_dirname + return output_dir, output_merged_dir else: return output_dir, None