Răsfoiți Sursa

Add Scaled Rope Embedding

dongzn 2 ani în urmă
părinte
comite
e80e48e2da
3 a modificat fișierele cu 114 adăugiri și 6 ștergeri
  1. 15 6
      finetune.py
  2. 64 0
      llama_rope_scaled_monkey_patch.py
  3. 35 0
      utils/lima_prompter.py

+ 15 - 6
finetune.py

@@ -23,6 +23,13 @@ from peft import (
 from transformers import LlamaForCausalLM, LlamaTokenizer
 
 from utils.prompter import Prompter
+from utils.lima_prompter import LimaPrompter
+
+from llama_rope_scaled_monkey_patch import replace_llama_rope_with_scaled_rope
+
+
+# Extend context size to 8k
+replace_llama_rope_with_scaled_rope()
 
 
 def train(
@@ -88,7 +95,8 @@ def train(
     ), "Please specify a --base_model, e.g. --base_model='huggyllama/llama-7b'"
     gradient_accumulation_steps = batch_size // micro_batch_size
 
-    prompter = Prompter(prompt_template_name)
+    # prompter = Prompter(prompt_template_name)
+    prompter = LimaPrompter(prompt_template_name)
 
     device_map = "auto"
     world_size = int(os.environ.get("WORLD_SIZE", 1))
@@ -146,11 +154,12 @@ def train(
         return result
 
     def generate_and_tokenize_prompt(data_point):
-        full_prompt = prompter.generate_prompt(
-            data_point["instruction"],
-            data_point["input"],
-            data_point["output"],
-        )
+        # full_prompt = prompter.generate_prompt(
+        #     data_point["instruction"],
+        #     data_point["input"],
+        #     data_point["output"],
+        # )
+        full_prompt = prompter.generate_prompt(data_point["conversations"])
         tokenized_full_prompt = tokenize(full_prompt)
         if not train_on_inputs:
             user_prompt = prompter.generate_prompt(

+ 64 - 0
llama_rope_scaled_monkey_patch.py

@@ -0,0 +1,64 @@
+import torch
+import transformers
+import transformers.models.llama.modeling_llama
+from einops import rearrange
+import random
+
+
+class ScaledRotaryEmbedding(torch.nn.Module):
+    def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+        super().__init__()
+        inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
+        self.register_buffer("inv_freq", inv_freq)
+
+        max_position_embeddings = 8192
+
+        # Build here to make `torch.jit.trace` work.
+        self.max_seq_len_cached = max_position_embeddings
+        t = torch.arange(
+            self.max_seq_len_cached,
+            device=self.inv_freq.device,
+            dtype=self.inv_freq.dtype,
+        )
+
+        self.scale = 1 / 4
+        t *= self.scale
+
+        freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+        # Different from paper, but it uses a different permutation in order to obtain the same calculation
+        emb = torch.cat((freqs, freqs), dim=-1)
+        self.register_buffer(
+            "cos_cached", emb.cos()[None, None, :, :], persistent=False
+        )
+        self.register_buffer(
+            "sin_cached", emb.sin()[None, None, :, :], persistent=False
+        )
+
+    def forward(self, x, seq_len=None):
+        # x: [bs, num_attention_heads, seq_len, head_size]
+        # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
+        if seq_len > self.max_seq_len_cached:
+            self.max_seq_len_cached = seq_len
+            t = torch.arange(
+                self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype
+            )
+            t *= self.scale
+            freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+            # Different from paper, but it uses a different permutation in order to obtain the same calculation
+            emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
+            self.register_buffer(
+                "cos_cached", emb.cos()[None, None, :, :], persistent=False
+            )
+            self.register_buffer(
+                "sin_cached", emb.sin()[None, None, :, :], persistent=False
+            )
+        return (
+            self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
+            self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
+        )
+
+
+def replace_llama_rope_with_scaled_rope():
+    transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = (
+        ScaledRotaryEmbedding
+    )

+ 35 - 0
utils/lima_prompter.py

@@ -0,0 +1,35 @@
+"""
+A dedicated helper to manage templates and prompt building.
+"""
+
+import json
+import os.path as osp
+from typing import List
+
+
+class LimaPrompter(object):
+    __slots__ = ("template", "_verbose")
+
+    def __init__(self, template_name: str = "", verbose: bool = False):
+        self._verbose = verbose
+        if not template_name:
+            template_name = "Lima"
+
+    def generate_prompt(
+        self,
+        conversations: List[str]
+    ) -> str:
+        c = []
+        for turn, text in enumerate(conversations):
+            role = 'User' if turn % 2 == 0 else 'Assistant'
+            c.append(f'### {role}: {text}')
+        res = '\n'.join(c)
+        if self._verbose:
+            print(res)
+        return res
+
+    def get_response(self, output: str) -> str:
+        res = output.split('### Assistant:')[-1].strip()
+        if self._verbose:
+            print(res)
+        return res