llama_rope_scaled_monkey_patch.py 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. import torch
  2. import transformers
  3. import transformers.models.llama.modeling_llama
  4. from einops import rearrange
  5. import random
  6. class ScaledRotaryEmbedding(torch.nn.Module):
  7. def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
  8. super().__init__()
  9. inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim))
  10. self.register_buffer("inv_freq", inv_freq)
  11. max_position_embeddings = 8192
  12. # Build here to make `torch.jit.trace` work.
  13. self.max_seq_len_cached = max_position_embeddings
  14. t = torch.arange(
  15. self.max_seq_len_cached,
  16. device=self.inv_freq.device,
  17. dtype=self.inv_freq.dtype,
  18. )
  19. self.scale = 1 / 4
  20. t *= self.scale
  21. freqs = torch.einsum("i,j->ij", t, self.inv_freq)
  22. # Different from paper, but it uses a different permutation in order to obtain the same calculation
  23. emb = torch.cat((freqs, freqs), dim=-1)
  24. self.register_buffer(
  25. "cos_cached", emb.cos()[None, None, :, :], persistent=False
  26. )
  27. self.register_buffer(
  28. "sin_cached", emb.sin()[None, None, :, :], persistent=False
  29. )
  30. def forward(self, x, seq_len=None):
  31. # x: [bs, num_attention_heads, seq_len, head_size]
  32. # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case.
  33. if seq_len > self.max_seq_len_cached:
  34. self.max_seq_len_cached = seq_len
  35. t = torch.arange(
  36. self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype
  37. )
  38. t *= self.scale
  39. freqs = torch.einsum("i,j->ij", t, self.inv_freq)
  40. # Different from paper, but it uses a different permutation in order to obtain the same calculation
  41. emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
  42. self.register_buffer(
  43. "cos_cached", emb.cos()[None, None, :, :], persistent=False
  44. )
  45. self.register_buffer(
  46. "sin_cached", emb.sin()[None, None, :, :], persistent=False
  47. )
  48. return (
  49. self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
  50. self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
  51. )
  52. def replace_llama_rope_with_scaled_rope():
  53. transformers.models.llama.modeling_llama.LlamaRotaryEmbedding = (
  54. ScaledRotaryEmbedding
  55. )