finetune.py 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. import os
  2. import sys
  3. from typing import List
  4. import fire
  5. import torch
  6. import transformers
  7. from datasets import load_dataset
  8. """
  9. Unused imports:
  10. import torch.nn as nn
  11. import bitsandbytes as bnb
  12. """
  13. from peft import (
  14. LoraConfig,
  15. get_peft_model,
  16. get_peft_model_state_dict,
  17. prepare_model_for_int8_training,
  18. set_peft_model_state_dict,
  19. )
  20. from transformers import LlamaForCausalLM, LlamaTokenizer
  21. from utils.prompter import Prompter
  22. def train(
  23. # model/data params
  24. base_model: str = "", # the only required argument
  25. data_path: str = "yahma/alpaca-cleaned",
  26. output_dir: str = "./lora-alpaca",
  27. # training hyperparams
  28. batch_size: int = 128,
  29. micro_batch_size: int = 4,
  30. num_epochs: int = 3,
  31. learning_rate: float = 3e-4,
  32. cutoff_len: int = 256,
  33. val_set_size: int = 2000,
  34. # lora hyperparams
  35. lora_r: int = 8,
  36. lora_alpha: int = 16,
  37. lora_dropout: float = 0.05,
  38. lora_target_modules: List[str] = [
  39. "q_proj",
  40. "v_proj",
  41. ],
  42. # llm hyperparams
  43. train_on_inputs: bool = True, # if False, masks out inputs in loss
  44. group_by_length: bool = False, # faster, but produces an odd training loss curve
  45. # wandb params
  46. wandb_project: str = "",
  47. wandb_run_name: str = "",
  48. wandb_watch: str = "", # options: false | gradients | all
  49. wandb_log_model: str = "", # options: false | true
  50. resume_from_checkpoint: str = None, # either training checkpoint or final adapter
  51. prompt_template_name: str = "alpaca", # The prompt template to use, will default to alpaca.
  52. ):
  53. if int(os.environ.get("LOCAL_RANK", 0)) == 0:
  54. print(
  55. f"Training Alpaca-LoRA model with params:\n"
  56. f"base_model: {base_model}\n"
  57. f"data_path: {data_path}\n"
  58. f"output_dir: {output_dir}\n"
  59. f"batch_size: {batch_size}\n"
  60. f"micro_batch_size: {micro_batch_size}\n"
  61. f"num_epochs: {num_epochs}\n"
  62. f"learning_rate: {learning_rate}\n"
  63. f"cutoff_len: {cutoff_len}\n"
  64. f"val_set_size: {val_set_size}\n"
  65. f"lora_r: {lora_r}\n"
  66. f"lora_alpha: {lora_alpha}\n"
  67. f"lora_dropout: {lora_dropout}\n"
  68. f"lora_target_modules: {lora_target_modules}\n"
  69. f"train_on_inputs: {train_on_inputs}\n"
  70. f"group_by_length: {group_by_length}\n"
  71. f"wandb_project: {wandb_project}\n"
  72. f"wandb_run_name: {wandb_run_name}\n"
  73. f"wandb_watch: {wandb_watch}\n"
  74. f"wandb_log_model: {wandb_log_model}\n"
  75. f"resume_from_checkpoint: {resume_from_checkpoint or False}\n"
  76. f"prompt template: {prompt_template_name}\n"
  77. )
  78. assert (
  79. base_model
  80. ), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
  81. gradient_accumulation_steps = batch_size // micro_batch_size
  82. prompter = Prompter(prompt_template_name)
  83. device_map = "auto"
  84. world_size = int(os.environ.get("WORLD_SIZE", 1))
  85. ddp = world_size != 1
  86. if ddp:
  87. device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
  88. gradient_accumulation_steps = gradient_accumulation_steps // world_size
  89. # Check if parameter passed or if set within environ
  90. use_wandb = len(wandb_project) > 0 or (
  91. "WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0
  92. )
  93. # Only overwrite environ if wandb param passed
  94. if len(wandb_project) > 0:
  95. os.environ["WANDB_PROJECT"] = wandb_project
  96. if len(wandb_watch) > 0:
  97. os.environ["WANDB_WATCH"] = wandb_watch
  98. if len(wandb_log_model) > 0:
  99. os.environ["WANDB_LOG_MODEL"] = wandb_log_model
  100. model = LlamaForCausalLM.from_pretrained(
  101. base_model,
  102. load_in_8bit=True,
  103. torch_dtype=torch.float16,
  104. device_map=device_map,
  105. )
  106. tokenizer = LlamaTokenizer.from_pretrained(base_model)
  107. tokenizer.pad_token_id = (
  108. 0 # unk. we want this to be different from the eos token
  109. )
  110. tokenizer.padding_side = "left" # Allow batched inference
  111. def tokenize(prompt, add_eos_token=True):
  112. # there's probably a way to do this with the tokenizer settings
  113. # but again, gotta move fast
  114. result = tokenizer(
  115. prompt,
  116. truncation=True,
  117. max_length=cutoff_len,
  118. padding=False,
  119. return_tensors=None,
  120. )
  121. if (
  122. result["input_ids"][-1] != tokenizer.eos_token_id
  123. and len(result["input_ids"]) < cutoff_len
  124. and add_eos_token
  125. ):
  126. result["input_ids"].append(tokenizer.eos_token_id)
  127. result["attention_mask"].append(1)
  128. result["labels"] = result["input_ids"].copy()
  129. return result
  130. def generate_and_tokenize_prompt(data_point):
  131. full_prompt = prompter.generate_prompt(
  132. data_point["instruction"],
  133. data_point["input"],
  134. data_point["output"],
  135. )
  136. tokenized_full_prompt = tokenize(full_prompt)
  137. if not train_on_inputs:
  138. user_prompt = prompter.generate_prompt(
  139. data_point["instruction"], data_point["input"]
  140. )
  141. tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
  142. user_prompt_len = len(tokenized_user_prompt["input_ids"])
  143. tokenized_full_prompt["labels"] = [
  144. -100
  145. ] * user_prompt_len + tokenized_full_prompt["labels"][
  146. user_prompt_len:
  147. ] # could be sped up, probably
  148. return tokenized_full_prompt
  149. model = prepare_model_for_int8_training(model)
  150. config = LoraConfig(
  151. r=lora_r,
  152. lora_alpha=lora_alpha,
  153. target_modules=lora_target_modules,
  154. lora_dropout=lora_dropout,
  155. bias="none",
  156. task_type="CAUSAL_LM",
  157. )
  158. model = get_peft_model(model, config)
  159. if data_path.endswith(".json") or data_path.endswith(".jsonl"):
  160. data = load_dataset("json", data_files=data_path)
  161. else:
  162. data = load_dataset(data_path)
  163. if resume_from_checkpoint:
  164. # Check the available weights and load them
  165. checkpoint_name = os.path.join(
  166. resume_from_checkpoint, "pytorch_model.bin"
  167. ) # Full checkpoint
  168. if not os.path.exists(checkpoint_name):
  169. checkpoint_name = os.path.join(
  170. resume_from_checkpoint, "adapter_model.bin"
  171. ) # only LoRA model - LoRA config above has to fit
  172. resume_from_checkpoint = (
  173. False # So the trainer won't try loading its state
  174. )
  175. # The two files above have a different name depending on how they were saved, but are actually the same.
  176. if os.path.exists(checkpoint_name):
  177. print(f"Restarting from {checkpoint_name}")
  178. adapters_weights = torch.load(checkpoint_name)
  179. model = set_peft_model_state_dict(model, adapters_weights)
  180. else:
  181. print(f"Checkpoint {checkpoint_name} not found")
  182. model.print_trainable_parameters() # Be more transparent about the % of trainable params.
  183. if val_set_size > 0:
  184. train_val = data["train"].train_test_split(
  185. test_size=val_set_size, shuffle=True, seed=42
  186. )
  187. train_data = (
  188. train_val["train"].shuffle().map(generate_and_tokenize_prompt)
  189. )
  190. val_data = (
  191. train_val["test"].shuffle().map(generate_and_tokenize_prompt)
  192. )
  193. else:
  194. train_data = data["train"].shuffle().map(generate_and_tokenize_prompt)
  195. val_data = None
  196. if not ddp and torch.cuda.device_count() > 1:
  197. # keeps Trainer from trying its own DataParallelism when more than 1 gpu is available
  198. model.is_parallelizable = True
  199. model.model_parallel = True
  200. trainer = transformers.Trainer(
  201. model=model,
  202. train_dataset=train_data,
  203. eval_dataset=val_data,
  204. args=transformers.TrainingArguments(
  205. per_device_train_batch_size=micro_batch_size,
  206. gradient_accumulation_steps=gradient_accumulation_steps,
  207. warmup_steps=100,
  208. num_train_epochs=num_epochs,
  209. learning_rate=learning_rate,
  210. fp16=True,
  211. logging_steps=10,
  212. optim="adamw_torch",
  213. evaluation_strategy="steps" if val_set_size > 0 else "no",
  214. save_strategy="steps",
  215. eval_steps=200 if val_set_size > 0 else None,
  216. save_steps=200,
  217. output_dir=output_dir,
  218. save_total_limit=3,
  219. load_best_model_at_end=True if val_set_size > 0 else False,
  220. ddp_find_unused_parameters=False if ddp else None,
  221. group_by_length=group_by_length,
  222. report_to="wandb" if use_wandb else None,
  223. run_name=wandb_run_name if use_wandb else None,
  224. ),
  225. data_collator=transformers.DataCollatorForSeq2Seq(
  226. tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
  227. ),
  228. )
  229. model.config.use_cache = False
  230. old_state_dict = model.state_dict
  231. model.state_dict = (
  232. lambda self, *_, **__: get_peft_model_state_dict(
  233. self, old_state_dict()
  234. )
  235. ).__get__(model, type(model))
  236. if torch.__version__ >= "2" and sys.platform != "win32":
  237. model = torch.compile(model)
  238. trainer.train(resume_from_checkpoint=resume_from_checkpoint)
  239. model.save_pretrained(output_dir)
  240. print(
  241. "\n If there's a warning about missing keys above, please disregard :)"
  242. )
  243. if __name__ == "__main__":
  244. fire.Fire(train)