finetune.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. import os
  2. import sys
  3. from typing import List
  4. import fire
  5. import torch
  6. import transformers
  7. from datasets import load_dataset
  8. """
  9. Unused imports:
  10. import torch.nn as nn
  11. import bitsandbytes as bnb
  12. """
  13. from peft import ( # noqa: E402
  14. LoraConfig,
  15. get_peft_model,
  16. get_peft_model_state_dict,
  17. prepare_model_for_int8_training,
  18. set_peft_model_state_dict,
  19. )
  20. from transformers import LlamaForCausalLM, LlamaTokenizer # noqa: F402
  21. def train(
  22. # model/data params
  23. base_model: str = "", # the only required argument
  24. data_path: str = "./alpaca_data_cleaned.json",
  25. output_dir: str = "./lora-alpaca",
  26. # training hyperparams
  27. batch_size: int = 128,
  28. micro_batch_size: int = 4,
  29. num_epochs: int = 3,
  30. learning_rate: float = 3e-4,
  31. cutoff_len: int = 256,
  32. val_set_size: int = 2000,
  33. # lora hyperparams
  34. lora_r: int = 8,
  35. lora_alpha: int = 16,
  36. lora_dropout: float = 0.05,
  37. lora_target_modules: List[str] = [
  38. "q_proj",
  39. "v_proj",
  40. ],
  41. # llm hyperparams
  42. train_on_inputs: bool = True, # if False, masks out inputs in loss
  43. group_by_length: bool = False, # faster, but produces an odd training loss curve
  44. # wandb params
  45. wandb_project: str = "",
  46. wandb_run_name: str = "",
  47. wandb_watch: str = "", # options: false | gradients | all
  48. wandb_log_model: str = "", # options: false | true
  49. resume_from_checkpoint: str = None, # either training checkpoint or final adapter
  50. ):
  51. print(
  52. f"Training Alpaca-LoRA model with params:\n"
  53. f"base_model: {base_model}\n"
  54. f"data_path: {data_path}\n"
  55. f"output_dir: {output_dir}\n"
  56. f"batch_size: {batch_size}\n"
  57. f"micro_batch_size: {micro_batch_size}\n"
  58. f"num_epochs: {num_epochs}\n"
  59. f"learning_rate: {learning_rate}\n"
  60. f"cutoff_len: {cutoff_len}\n"
  61. f"val_set_size: {val_set_size}\n"
  62. f"lora_r: {lora_r}\n"
  63. f"lora_alpha: {lora_alpha}\n"
  64. f"lora_dropout: {lora_dropout}\n"
  65. f"lora_target_modules: {lora_target_modules}\n"
  66. f"train_on_inputs: {train_on_inputs}\n"
  67. f"group_by_length: {group_by_length}\n"
  68. f"wandb_project: {wandb_project}\n"
  69. f"wandb_run_name: {wandb_run_name}\n"
  70. f"wandb_watch: {wandb_watch}\n"
  71. f"wandb_log_model: {wandb_log_model}\n"
  72. f"resume_from_checkpoint: {resume_from_checkpoint}\n"
  73. )
  74. assert (
  75. base_model
  76. ), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
  77. gradient_accumulation_steps = batch_size // micro_batch_size
  78. device_map = "auto"
  79. world_size = int(os.environ.get("WORLD_SIZE", 1))
  80. ddp = world_size != 1
  81. if ddp:
  82. device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
  83. gradient_accumulation_steps = gradient_accumulation_steps // world_size
  84. # Check if parameter passed or if set within environ
  85. use_wandb = len(wandb_project) > 0 or (
  86. "WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0
  87. )
  88. # Only overwrite environ if wandb param passed
  89. if len(wandb_project) > 0:
  90. os.environ["WANDB_PROJECT"] = wandb_project
  91. if len(wandb_watch) > 0:
  92. os.environ["WANDB_WATCH"] = wandb_watch
  93. if len(wandb_log_model) > 0:
  94. os.environ["WANDB_LOG_MODEL"] = wandb_log_model
  95. model = LlamaForCausalLM.from_pretrained(
  96. base_model,
  97. load_in_8bit=True,
  98. torch_dtype=torch.float16,
  99. device_map=device_map,
  100. )
  101. tokenizer = LlamaTokenizer.from_pretrained(base_model)
  102. tokenizer.pad_token_id = (
  103. 0 # unk. we want this to be different from the eos token
  104. )
  105. tokenizer.padding_side = "left" # Allow batched inference
  106. def tokenize(prompt, add_eos_token=True):
  107. # there's probably a way to do this with the tokenizer settings
  108. # but again, gotta move fast
  109. result = tokenizer(
  110. prompt,
  111. truncation=True,
  112. max_length=cutoff_len,
  113. padding=False,
  114. return_tensors=None,
  115. )
  116. if (
  117. result["input_ids"][-1] != tokenizer.eos_token_id
  118. and len(result["input_ids"]) < cutoff_len
  119. and add_eos_token
  120. ):
  121. result["input_ids"].append(tokenizer.eos_token_id)
  122. result["attention_mask"].append(1)
  123. result["labels"] = result["input_ids"].copy()
  124. return result
  125. def generate_and_tokenize_prompt(data_point):
  126. full_prompt = generate_prompt(data_point)
  127. tokenized_full_prompt = tokenize(full_prompt)
  128. if not train_on_inputs:
  129. user_prompt = generate_prompt({**data_point, "output": ""})
  130. tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
  131. user_prompt_len = len(tokenized_user_prompt["input_ids"])
  132. tokenized_full_prompt["labels"] = [
  133. -100
  134. ] * user_prompt_len + tokenized_full_prompt["labels"][
  135. user_prompt_len:
  136. ] # could be sped up, probably
  137. return tokenized_full_prompt
  138. model = prepare_model_for_int8_training(model)
  139. config = LoraConfig(
  140. r=lora_r,
  141. lora_alpha=lora_alpha,
  142. target_modules=lora_target_modules,
  143. lora_dropout=lora_dropout,
  144. bias="none",
  145. task_type="CAUSAL_LM",
  146. )
  147. model = get_peft_model(model, config)
  148. if data_path.endswith(".json"): # todo: support jsonl
  149. data = load_dataset("json", data_files=data_path)
  150. else:
  151. data = load_dataset(data_path)
  152. if resume_from_checkpoint:
  153. # Check the available weights and load them
  154. checkpoint_name = os.path.join(
  155. resume_from_checkpoint, "pytorch_model.bin"
  156. ) # Full checkpoint
  157. if not os.path.exists(checkpoint_name):
  158. checkpoint_name = os.path.join(
  159. resume_from_checkpoint, "adapter_model.bin"
  160. ) # only LoRA model - LoRA config above has to fit
  161. resume_from_checkpoint = (
  162. False # So the trainer won't try loading its state
  163. )
  164. # The two files above have a different name depending on how they were saved, but are actually the same.
  165. if os.path.exists(checkpoint_name):
  166. print(f"Restarting from {checkpoint_name}")
  167. adapters_weights = torch.load(checkpoint_name)
  168. model = set_peft_model_state_dict(model, adapters_weights)
  169. else:
  170. print(f"Checkpoint {checkpoint_name} not found")
  171. model.print_trainable_parameters() # Be more transparent about the % of trainable params.
  172. if val_set_size > 0:
  173. train_val = data["train"].train_test_split(
  174. test_size=val_set_size, shuffle=True, seed=42
  175. )
  176. train_data = (
  177. train_val["train"].shuffle().map(generate_and_tokenize_prompt)
  178. )
  179. val_data = (
  180. train_val["test"].shuffle().map(generate_and_tokenize_prompt)
  181. )
  182. else:
  183. train_data = data["train"].shuffle().map(generate_and_tokenize_prompt)
  184. val_data = None
  185. trainer = transformers.Trainer(
  186. model=model,
  187. train_dataset=train_data,
  188. eval_dataset=val_data,
  189. args=transformers.TrainingArguments(
  190. per_device_train_batch_size=micro_batch_size,
  191. gradient_accumulation_steps=gradient_accumulation_steps,
  192. warmup_steps=100,
  193. num_train_epochs=num_epochs,
  194. learning_rate=learning_rate,
  195. fp16=True,
  196. logging_steps=10,
  197. optim="adamw_torch",
  198. evaluation_strategy="steps" if val_set_size > 0 else "no",
  199. save_strategy="steps",
  200. eval_steps=200 if val_set_size > 0 else None,
  201. save_steps=200,
  202. output_dir=output_dir,
  203. save_total_limit=3,
  204. load_best_model_at_end=True if val_set_size > 0 else False,
  205. ddp_find_unused_parameters=False if ddp else None,
  206. group_by_length=group_by_length,
  207. report_to="wandb" if use_wandb else None,
  208. run_name=wandb_run_name if use_wandb else None,
  209. ),
  210. data_collator=transformers.DataCollatorForSeq2Seq(
  211. tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
  212. ),
  213. )
  214. model.config.use_cache = False
  215. old_state_dict = model.state_dict
  216. model.state_dict = (
  217. lambda self, *_, **__: get_peft_model_state_dict(
  218. self, old_state_dict()
  219. )
  220. ).__get__(model, type(model))
  221. if torch.__version__ >= "2" and sys.platform != "win32":
  222. model = torch.compile(model)
  223. trainer.train(resume_from_checkpoint=resume_from_checkpoint)
  224. model.save_pretrained(output_dir)
  225. print(
  226. "\n If there's a warning about missing keys above, please disregard :)"
  227. )
  228. def generate_prompt(data_point):
  229. # sorry about the formatting disaster gotta move fast
  230. if data_point["input"]:
  231. return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. # noqa: E501
  232. ### Instruction:
  233. {data_point["instruction"]}
  234. ### Input:
  235. {data_point["input"]}
  236. ### Response:
  237. {data_point["output"]}"""
  238. else:
  239. return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
  240. ### Instruction:
  241. {data_point["instruction"]}
  242. ### Response:
  243. {data_point["output"]}"""
  244. if __name__ == "__main__":
  245. fire.Fire(train)