generate.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. import os
  2. import sys
  3. import fire
  4. import gradio as gr
  5. import torch
  6. import transformers
  7. from peft import PeftModel
  8. from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
  9. from utils.prompter import Prompter
  10. if torch.cuda.is_available():
  11. device = "cuda"
  12. else:
  13. device = "cpu"
  14. try:
  15. if torch.backends.mps.is_available():
  16. device = "mps"
  17. except: # noqa: E722
  18. pass
  19. def main(
  20. load_8bit: bool = False,
  21. base_model: str = "",
  22. lora_weights: str = "tloen/alpaca-lora-7b",
  23. prompt_template: str = "", # The prompt template to use, will default to alpaca.
  24. server_name: str = "0.0.0.0", # Allows to listen on all interfaces by providing '0.
  25. share_gradio: bool = False,
  26. ):
  27. base_model = base_model or os.environ.get("BASE_MODEL", "")
  28. assert (
  29. base_model
  30. ), "Please specify a --base_model, e.g. --base_model='decapoda-research/llama-7b-hf'"
  31. prompter = Prompter(prompt_template)
  32. tokenizer = LlamaTokenizer.from_pretrained(base_model)
  33. if device == "cuda":
  34. model = LlamaForCausalLM.from_pretrained(
  35. base_model,
  36. load_in_8bit=load_8bit,
  37. torch_dtype=torch.float16,
  38. device_map="auto",
  39. )
  40. model = PeftModel.from_pretrained(
  41. model,
  42. lora_weights,
  43. torch_dtype=torch.float16,
  44. )
  45. elif device == "mps":
  46. model = LlamaForCausalLM.from_pretrained(
  47. base_model,
  48. device_map={"": device},
  49. torch_dtype=torch.float16,
  50. )
  51. model = PeftModel.from_pretrained(
  52. model,
  53. lora_weights,
  54. device_map={"": device},
  55. torch_dtype=torch.float16,
  56. )
  57. else:
  58. model = LlamaForCausalLM.from_pretrained(
  59. base_model, device_map={"": device}, low_cpu_mem_usage=True
  60. )
  61. model = PeftModel.from_pretrained(
  62. model,
  63. lora_weights,
  64. device_map={"": device},
  65. )
  66. # unwind broken decapoda-research config
  67. model.config.pad_token_id = tokenizer.pad_token_id = 0 # unk
  68. model.config.bos_token_id = 1
  69. model.config.eos_token_id = 2
  70. if not load_8bit:
  71. model.half() # seems to fix bugs for some users.
  72. model.eval()
  73. if torch.__version__ >= "2" and sys.platform != "win32":
  74. model = torch.compile(model)
  75. def evaluate(
  76. instruction,
  77. input=None,
  78. temperature=0.1,
  79. top_p=0.75,
  80. top_k=40,
  81. num_beams=4,
  82. max_new_tokens=128,
  83. **kwargs,
  84. ):
  85. prompt = prompter.generate_prompt(instruction, input)
  86. inputs = tokenizer(prompt, return_tensors="pt")
  87. input_ids = inputs["input_ids"].to(device)
  88. generation_config = GenerationConfig(
  89. temperature=temperature,
  90. top_p=top_p,
  91. top_k=top_k,
  92. num_beams=num_beams,
  93. **kwargs,
  94. )
  95. with torch.no_grad():
  96. generation_output = model.generate(
  97. input_ids=input_ids,
  98. generation_config=generation_config,
  99. return_dict_in_generate=True,
  100. output_scores=True,
  101. max_new_tokens=max_new_tokens,
  102. )
  103. s = generation_output.sequences[0]
  104. output = tokenizer.decode(s)
  105. return prompter.get_response(output)
  106. gr.Interface(
  107. fn=evaluate,
  108. inputs=[
  109. gr.components.Textbox(
  110. lines=2,
  111. label="Instruction",
  112. placeholder="Tell me about alpacas.",
  113. ),
  114. gr.components.Textbox(lines=2, label="Input", placeholder="none"),
  115. gr.components.Slider(
  116. minimum=0, maximum=1, value=0.1, label="Temperature"
  117. ),
  118. gr.components.Slider(
  119. minimum=0, maximum=1, value=0.75, label="Top p"
  120. ),
  121. gr.components.Slider(
  122. minimum=0, maximum=100, step=1, value=40, label="Top k"
  123. ),
  124. gr.components.Slider(
  125. minimum=1, maximum=4, step=1, value=4, label="Beams"
  126. ),
  127. gr.components.Slider(
  128. minimum=1, maximum=2000, step=1, value=128, label="Max tokens"
  129. ),
  130. ],
  131. outputs=[
  132. gr.inputs.Textbox(
  133. lines=5,
  134. label="Output",
  135. )
  136. ],
  137. title="🦙🌲 Alpaca-LoRA",
  138. description="Alpaca-LoRA is a 7B-parameter LLaMA model finetuned to follow instructions. It is trained on the [Stanford Alpaca](https://github.com/tatsu-lab/stanford_alpaca) dataset and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/tloen/alpaca-lora).", # noqa: E501
  139. ).launch(server_name="0.0.0.0", share=share_gradio)
  140. # Old testing code follows.
  141. """
  142. # testing code for readme
  143. for instruction in [
  144. "Tell me about alpacas.",
  145. "Tell me about the president of Mexico in 2019.",
  146. "Tell me about the king of France in 2019.",
  147. "List all Canadian provinces in alphabetical order.",
  148. "Write a Python program that prints the first 10 Fibonacci numbers.",
  149. "Write a program that prints the numbers from 1 to 100. But for multiples of three print 'Fizz' instead of the number and for the multiples of five print 'Buzz'. For numbers which are multiples of both three and five print 'FizzBuzz'.", # noqa: E501
  150. "Tell me five words that rhyme with 'shock'.",
  151. "Translate the sentence 'I have no mouth but I must scream' into Spanish.",
  152. "Count up from 1 to 500.",
  153. ]:
  154. print("Instruction:", instruction)
  155. print("Response:", evaluate(instruction))
  156. print()
  157. """
  158. if __name__ == "__main__":
  159. fire.Fire(main)