finetune.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. import os
  2. import sys
  3. from typing import List
  4. import fire
  5. import torch
  6. import transformers
  7. from datasets import load_dataset
  8. """
  9. Unused imports:
  10. import torch.nn as nn
  11. import bitsandbytes as bnb
  12. """
  13. from peft import ( # noqa: E402
  14. LoraConfig,
  15. get_peft_model,
  16. get_peft_model_state_dict,
  17. prepare_model_for_int8_training,
  18. set_peft_model_state_dict,
  19. )
  20. from transformers import LlamaForCausalLM, LlamaTokenizer # noqa: F402
  21. def train(
  22. # model/data params
  23. base_model: str = "", # the only required argument
  24. data_path: str = "yahma/alpaca-cleaned",
  25. output_dir: str = "./lora-alpaca",
  26. # training hyperparams
  27. batch_size: int = 128,
  28. micro_batch_size: int = 4,
  29. num_epochs: int = 3,
  30. learning_rate: float = 3e-4,
  31. cutoff_len: int = 256,
  32. val_set_size: int = 2000,
  33. # lora hyperparams
  34. lora_r: int = 8,
  35. lora_alpha: int = 16,
  36. lora_dropout: float = 0.05,
  37. lora_target_modules: List[str] = [
  38. "q_proj",
  39. "v_proj",
  40. ],
  41. # llm hyperparams
  42. train_on_inputs: bool = True, # if False, masks out inputs in loss
  43. group_by_length: bool = False, # faster, but produces an odd training loss curve
  44. # wandb params
  45. wandb_project: str = "",
  46. wandb_run_name: str = "",
  47. wandb_watch: str = "", # options: false | gradients | all
  48. wandb_log_model: str = "", # options: false | true
  49. resume_from_checkpoint: str = None, # either training checkpoint or final adapter
  50. ):
  51. if int(os.environ.get("LOCAL_RANK", 0)) == 0:
  52. print(
  53. f"Training Alpaca-LoRA model with params:\n"
  54. f"base_model: {base_model}\n"
  55. f"data_path: {data_path}\n"
  56. f"output_dir: {output_dir}\n"
  57. f"batch_size: {batch_size}\n"
  58. f"micro_batch_size: {micro_batch_size}\n"
  59. f"num_epochs: {num_epochs}\n"
  60. f"learning_rate: {learning_rate}\n"
  61. f"cutoff_len: {cutoff_len}\n"
  62. f"val_set_size: {val_set_size}\n"
  63. f"lora_r: {lora_r}\n"
  64. f"lora_alpha: {lora_alpha}\n"
  65. f"lora_dropout: {lora_dropout}\n"
  66. f"lora_target_modules: {lora_target_modules}\n"
  67. f"train_on_inputs: {train_on_inputs}\n"
  68. f"group_by_length: {group_by_length}\n"
  69. f"wandb_project: {wandb_project}\n"
  70. f"wandb_run_name: {wandb_run_name}\n"
  71. f"wandb_watch: {wandb_watch}\n"
  72. f"wandb_log_model: {wandb_log_model}\n"
  73. f"resume_from_checkpoint: {resume_from_checkpoint}\n"
  74. )
  75. assert (
  76. base_model
  77. ), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
  78. gradient_accumulation_steps = batch_size // micro_batch_size
  79. device_map = "auto"
  80. world_size = int(os.environ.get("WORLD_SIZE", 1))
  81. ddp = world_size != 1
  82. if ddp:
  83. device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}
  84. gradient_accumulation_steps = gradient_accumulation_steps // world_size
  85. # Check if parameter passed or if set within environ
  86. use_wandb = len(wandb_project) > 0 or (
  87. "WANDB_PROJECT" in os.environ and len(os.environ["WANDB_PROJECT"]) > 0
  88. )
  89. # Only overwrite environ if wandb param passed
  90. if len(wandb_project) > 0:
  91. os.environ["WANDB_PROJECT"] = wandb_project
  92. if len(wandb_watch) > 0:
  93. os.environ["WANDB_WATCH"] = wandb_watch
  94. if len(wandb_log_model) > 0:
  95. os.environ["WANDB_LOG_MODEL"] = wandb_log_model
  96. model = LlamaForCausalLM.from_pretrained(
  97. base_model,
  98. load_in_8bit=True,
  99. torch_dtype=torch.float16,
  100. device_map=device_map,
  101. )
  102. tokenizer = LlamaTokenizer.from_pretrained(base_model)
  103. tokenizer.pad_token_id = (
  104. 0 # unk. we want this to be different from the eos token
  105. )
  106. tokenizer.padding_side = "left" # Allow batched inference
  107. def tokenize(prompt, add_eos_token=True):
  108. # there's probably a way to do this with the tokenizer settings
  109. # but again, gotta move fast
  110. result = tokenizer(
  111. prompt,
  112. truncation=True,
  113. max_length=cutoff_len,
  114. padding=False,
  115. return_tensors=None,
  116. )
  117. if (
  118. result["input_ids"][-1] != tokenizer.eos_token_id
  119. and len(result["input_ids"]) < cutoff_len
  120. and add_eos_token
  121. ):
  122. result["input_ids"].append(tokenizer.eos_token_id)
  123. result["attention_mask"].append(1)
  124. result["labels"] = result["input_ids"].copy()
  125. return result
  126. def generate_and_tokenize_prompt(data_point):
  127. full_prompt = generate_prompt(data_point)
  128. tokenized_full_prompt = tokenize(full_prompt)
  129. if not train_on_inputs:
  130. user_prompt = generate_prompt({**data_point, "output": ""})
  131. tokenized_user_prompt = tokenize(user_prompt, add_eos_token=False)
  132. user_prompt_len = len(tokenized_user_prompt["input_ids"])
  133. tokenized_full_prompt["labels"] = [
  134. -100
  135. ] * user_prompt_len + tokenized_full_prompt["labels"][
  136. user_prompt_len:
  137. ] # could be sped up, probably
  138. return tokenized_full_prompt
  139. model = prepare_model_for_int8_training(model)
  140. config = LoraConfig(
  141. r=lora_r,
  142. lora_alpha=lora_alpha,
  143. target_modules=lora_target_modules,
  144. lora_dropout=lora_dropout,
  145. bias="none",
  146. task_type="CAUSAL_LM",
  147. )
  148. model = get_peft_model(model, config)
  149. if data_path.endswith(".json") or data_path.endswith(".jsonl"):
  150. data = load_dataset("json", data_files=data_path)
  151. else:
  152. data = load_dataset(data_path)
  153. if resume_from_checkpoint:
  154. # Check the available weights and load them
  155. checkpoint_name = os.path.join(
  156. resume_from_checkpoint, "pytorch_model.bin"
  157. ) # Full checkpoint
  158. if not os.path.exists(checkpoint_name):
  159. checkpoint_name = os.path.join(
  160. resume_from_checkpoint, "adapter_model.bin"
  161. ) # only LoRA model - LoRA config above has to fit
  162. resume_from_checkpoint = (
  163. False # So the trainer won't try loading its state
  164. )
  165. # The two files above have a different name depending on how they were saved, but are actually the same.
  166. if os.path.exists(checkpoint_name):
  167. print(f"Restarting from {checkpoint_name}")
  168. adapters_weights = torch.load(checkpoint_name)
  169. model = set_peft_model_state_dict(model, adapters_weights)
  170. else:
  171. print(f"Checkpoint {checkpoint_name} not found")
  172. model.print_trainable_parameters() # Be more transparent about the % of trainable params.
  173. if val_set_size > 0:
  174. train_val = data["train"].train_test_split(
  175. test_size=val_set_size, shuffle=True, seed=42
  176. )
  177. train_data = (
  178. train_val["train"].shuffle().map(generate_and_tokenize_prompt)
  179. )
  180. val_data = (
  181. train_val["test"].shuffle().map(generate_and_tokenize_prompt)
  182. )
  183. else:
  184. train_data = data["train"].shuffle().map(generate_and_tokenize_prompt)
  185. val_data = None
  186. if not ddp and torch.cuda.device_count() > 1:
  187. # keeps Trainer from trying its own DataParallelism when more than 1 gpu is available
  188. model.is_parallelizable = True
  189. model.model_parallel = True
  190. trainer = transformers.Trainer(
  191. model=model,
  192. train_dataset=train_data,
  193. eval_dataset=val_data,
  194. args=transformers.TrainingArguments(
  195. per_device_train_batch_size=micro_batch_size,
  196. gradient_accumulation_steps=gradient_accumulation_steps,
  197. warmup_steps=100,
  198. num_train_epochs=num_epochs,
  199. learning_rate=learning_rate,
  200. fp16=True,
  201. logging_steps=10,
  202. optim="adamw_torch",
  203. evaluation_strategy="steps" if val_set_size > 0 else "no",
  204. save_strategy="steps",
  205. eval_steps=200 if val_set_size > 0 else None,
  206. save_steps=200,
  207. output_dir=output_dir,
  208. save_total_limit=3,
  209. load_best_model_at_end=True if val_set_size > 0 else False,
  210. ddp_find_unused_parameters=False if ddp else None,
  211. group_by_length=group_by_length,
  212. report_to="wandb" if use_wandb else None,
  213. run_name=wandb_run_name if use_wandb else None,
  214. ),
  215. data_collator=transformers.DataCollatorForSeq2Seq(
  216. tokenizer, pad_to_multiple_of=8, return_tensors="pt", padding=True
  217. ),
  218. )
  219. model.config.use_cache = False
  220. old_state_dict = model.state_dict
  221. model.state_dict = (
  222. lambda self, *_, **__: get_peft_model_state_dict(
  223. self, old_state_dict()
  224. )
  225. ).__get__(model, type(model))
  226. if torch.__version__ >= "2" and sys.platform != "win32":
  227. model = torch.compile(model)
  228. trainer.train(resume_from_checkpoint=resume_from_checkpoint)
  229. model.save_pretrained(output_dir)
  230. print(
  231. "\n If there's a warning about missing keys above, please disregard :)"
  232. )
  233. def generate_prompt(data_point):
  234. # sorry about the formatting disaster gotta move fast
  235. if data_point["input"]:
  236. return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. # noqa: E501
  237. ### Instruction:
  238. {data_point["instruction"]}
  239. ### Input:
  240. {data_point["input"]}
  241. ### Response:
  242. {data_point["output"]}"""
  243. else:
  244. return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. # noqa: E501
  245. ### Instruction:
  246. {data_point["instruction"]}
  247. ### Response:
  248. {data_point["output"]}"""
  249. if __name__ == "__main__":
  250. fire.Fire(train)