finetune.py 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285
  1. import os
  2. import sys
  3. from typing import List
  4. import fire
  5. import torch
  6. import transformers
  7. from datasets import load_dataset
  8. """
  9. Unused imports:
  10. import torch.nn as nn
  11. import bitsandbytes as bnb
  12. """
  13. # Catch when user should re-install transformers library
  14. assert (
  15. "LlamaTokenizer" in transformers._import_structure["models.llama"]
  16. ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git" # noqa: E501
  17. from peft import ( # noqa: E402
  18. LoraConfig,
  19. get_peft_model,
  20. get_peft_model_state_dict,
  21. prepare_model_for_int8_training,
  22. set_peft_model_state_dict,
  23. )
  24. from transformers import LlamaForCausalLM, LlamaTokenizer # noqa: F402
  25. def train(
  26. # model/data params
  27. base_model: str = "", # the only required argument
  28. data_path: str = "./alpaca_data_cleaned.json",
  29. output_dir: str = "./lora-alpaca",
  30. # training hyperparams
  31. batch_size: int = 128,
  32. micro_batch_size: int = 4,
  33. num_epochs: int = 3,
  34. learning_rate: float = 3e-4,
  35. cutoff_len: int = 256,
  36. val_set_size: int = 2000,
  37. # lora hyperparams
  38. lora_r: int = 8,
  39. lora_alpha: int = 16,
  40. lora_dropout: float = 0.05,
  41. lora_target_modules: List[str] = [
  42. "q_proj",
  43. "v_proj",
  44. ],
  45. # llm hyperparams
  46. train_on_inputs: bool = True, # if False, masks out inputs in loss
  47. group_by_length: bool = False, # faster, but produces an odd training loss curve
  48. # wandb params
  49. wandb_project: str = "",
  50. wandb_run_name: str = "",
  51. wandb_watch: str = "", # options: false | gradients | all
  52. wandb_log_model: str = "", # options: false | true
  53. resume_from_checkpoint: str = None, # either training checkpoint or final adapter
  54. ):
  55. print(
  56. f"Training Alpaca-LoRA model with params:\n"
  57. f"base_model: {base_model}\n"
  58. f"data_path: {data_path}\n"
  59. f"output_dir: {output_dir}\n"
  60. f"batch_size: {batch_size}\n"
  61. f"micro_batch_size: {micro_batch_size}\n"
  62. f"num_epochs: {num_epochs}\n"
  63. f"learning_rate: {learning_rate}\n"
  64. f"cutoff_len: {cutoff_len}\n"
  65. f"val_set_size: {val_set_size}\n"
  66. f"lora_r: {lora_r}\n"
  67. f"lora_alpha: {lora_alpha}\n"
  68. f"lora_dropout: {lora_dropout}\n"
  69. f"lora_target_modules: {lora_target_modules}\n"
  70. f"train_on_inputs: {train_on_inputs}\n"
  71. f"group_by_length: {group_by_length}\n"
  72. f"wandb_project: {wandb_project}\n"
  73. f"wandb_run_name: {wandb_run_name}\n"
  74. f"wandb_watch: {wandb_watch}\n"
  75. f"wandb_log_model: {wandb_log_model}\n"
  76. f"resume_from_checkpoint: {resume_from_checkpoint}\n"
  77. )
  78. assert (
  79. base_model
  80. ), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
  81. gradient_accumulation_steps = batch_size // micro_batch_size
  82. device_map = "auto"
  83. world_size = int(os.environ.get("WORLD_SIZE", 1))
  84. ddp = world_size != 1
  85. if ddp:
  86. device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
  87. gradient_accumulation_steps = gradient_accumulation_steps // world_size
  88. # Check if parameter passed or if set within environ
  89. use_wandb = len(wandb_project) > 0 or \
  90. ("WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0)
  91. # Only overwrite environ if wandb param passed
  92. if len(wandb_project) > 0:
  93. os.environ['WANDB_PROJECT'] = wandb_project
  94. if len(wandb_watch) > 0:
  95. os.environ['WANDB_WATCH'] = wandb_watch
  96. if len(wandb_log_model) > 0:
  97. os.environ['WANDB_LOG_MODEL'] = wandb_log_model
  98. model = LlamaForCausalLM.from_pretrained(
  99. base_model,
  100. load_in_8bit=True,
  101. device_map=device_map,
  102. )
  103. tokenizer = LlamaTokenizer.from_pretrained(base_model)
  104. tokenizer.pad_token_id = (
  105. 0 # unk. we want this to be different from the eos token
  106. )
  107. tokenizer.padding_side = "left" # Allow batched inference
  108. def tokenize(prompt, add_eos_token=True):
  109. # there's probably a way to do this with the tokenizer settings
  110. # but again, gotta move fast
  111. result = tokenizer(
  112. prompt,
  113. truncation=True,
  114. max_length=cutoff_len,
  115. padding=False,
  116. return_tensors=None,
  117. )
  118. if (
  119. result["input_ids"][-1] != tokenizer.eos_token_id
  120. and len(result["input_ids"]) < cutoff_len
  121. and add_eos_token
  122. ):
  123. result["input_ids"].append(tokenizer.eos_token_id)
  124. result["attention_mask"].append(1)
  125. result["labels"] = result["input_ids"].copy()
  126. return result
  127. def generate_and_tokenize_prompt(data_point):
  128. full_prompt = generate_prompt(data_point)
  129. tokenized_full_prompt = tokenize(full_prompt)
  130. if not train_on_inputs:
  131. user_prompt = generate_prompt({**data_point, "output": ""})
  132. tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
  133. user_prompt_len = len(tokenized_user_prompt["input_ids"])
  134. tokenized_full_prompt["labels"] = [
  135. -100
  136. ] * user_prompt_len + tokenized_full_prompt["labels"][
  137. user_prompt_len:
  138. ] # could be sped up, probably
  139. return tokenized_full_prompt
  140. model = prepare_model_for_int8_training(model)
  141. config = LoraConfig(
  142. r=lora_r,
  143. lora_alpha=lora_alpha,
  144. target_modules=lora_target_modules,
  145. lora_dropout=lora_dropout,
  146. bias="none",
  147. task_type="CAUSAL_LM",
  148. )
  149. model = get_peft_model(model, config)
  150. if data_path.endswith(".json"): # todo: support jsonl
  151. data = load_dataset("json", data_files=data_path)
  152. else:
  153. data = load_dataset(data_path)
  154. if resume_from_checkpoint:
  155. # Check the available weights and load them
  156. checkpoint_name = os.path.join(
  157. resume_from_checkpoint, "pytorch_model.bin"
  158. ) # Full checkpoint
  159. if not os.path.exists(checkpoint_name):
  160. checkpoint_name = os.path.join(
  161. resume_from_checkpoint, "adapter_model.bin"
  162. ) # only LoRA model - LoRA config above has to fit
  163. resume_from_checkpoint = (
  164. False # So the trainer won't try loading its state
  165. )
  166. # The two files above have a different name depending on how they were saved, but are actually the same.
  167. if os.path.exists(checkpoint_name):
  168. print(f"Restarting from {checkpoint_name}")
  169. adapters_weights = torch.load(checkpoint_name)
  170. model = set_peft_model_state_dict(model, adapters_weights)
  171. else:
  172. print(f"Checkpoint {checkpoint_name} not found")
  173. model.print_trainable_parameters() # Be more transparent about the % of trainable params.
  174. if val_set_size > 0:
  175. train_val = data["train"].train_test_split(
  176. test_size=val_set_size, shuffle=True, seed=42
  177. )
  178. train_data = (
  179. train_val["train"].shuffle().map(generate_and_tokenize_prompt)
  180. )
  181. val_data = (
  182. train_val["test"].shuffle().map(generate_and_tokenize_prompt)
  183. )
  184. else:
  185. train_data = data["train"].shuffle().map(generate_and_tokenize_prompt)
  186. val_data = None
  187. trainer = transformers.Trainer(
  188. model=model,
  189. train_dataset=train_data,
  190. eval_dataset=val_data,
  191. args=transformers.TrainingArguments(
  192. per_device_train_batch_size=micro_batch_size,
  193. gradient_accumulation_steps=gradient_accumulation_steps,
  194. warmup_steps=100,
  195. num_train_epochs=num_epochs,
  196. learning_rate=learning_rate,
  197. fp16=True,
  198. logging_steps=10,
  199. optim="adamw_torch",
  200. evaluation_strategy="steps" if val_set_size > 0 else "no",
  201. save_strategy="steps",
  202. eval_steps=200 if val_set_size > 0 else None,
  203. save_steps=200,
  204. output_dir=output_dir,
  205. save_total_limit=3,
  206. load_best_model_at_end=True if val_set_size > 0 else False,
  207. ddp_find_unused_parameters=False if ddp else None,
  208. group_by_length=group_by_length,
  209. report_to="wandb" if use_wandb else None,
  210. run_name=wandb_run_name if use_wandb else None
  211. ),
  212. data_collator=transformers.DataCollatorForSeq2Seq(
  213. tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
  214. ),
  215. )
  216. model.config.use_cache = False
  217. old_state_dict = model.state_dict
  218. model.state_dict = (
  219. lambda self, *_, **__: get_peft_model_state_dict(
  220. self, old_state_dict()
  221. )
  222. ).__get__(model, type(model))
  223. if torch.__version__ >= "2" and sys.platform != "win32":
  224. model = torch.compile(model)
  225. trainer.train(resume_from_checkpoint=resume_from_checkpoint)
  226. model.save_pretrained(output_dir)
  227. print(
  228. "\n If there's a warning about missing keys above, please disregard :)"
  229. )
  230. def generate_prompt(data_point):
  231. # sorry about the formatting disaster gotta move fast
  232. if data_point["input"]:
  233. return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. # noqa: E501
  234. ### Instruction:
  235. {data_point["instruction"]}
  236. ### Input:
  237. {data_point["input"]}
  238. ### Response:
  239. {data_point["output"]}"""
  240. else:
  241. return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
  242. ### Instruction:
  243. {data_point["instruction"]}
  244. ### Response:
  245. {data_point["output"]}"""
  246. if __name__ == "__main__":
  247. fire.Fire(train)