export_state_dict_checkpoint.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. import os
  2. import json
  3. import torch
  4. from peft import PeftModel, LoraConfig
  5. from transformers import LLaMATokenizer, LLaMAForCausalLM
  6. tokenizer = LLaMATokenizer.from_pretrained("decapoda-research/llama-7b-hf")
  7. base_model = LLaMAForCausalLM.from_pretrained(
  8. "decapoda-research/llama-7b-hf",
  9. load_in_8bit=False,
  10. torch_dtype=torch.float16,
  11. device_map={"": "cpu"},
  12. )
  13. lora_model = PeftModel.from_pretrained(
  14. base_model,
  15. "tloen/alpaca-lora-7b",
  16. device_map={"": "cpu"},
  17. torch_dtype=torch.float16,
  18. )
  19. lora_model.eval() # merge weights
  20. lora_model_sd = lora_model.state_dict()
  21. params = {
  22. "dim": 4096,
  23. "multiple_of": 256,
  24. "n_heads": 32,
  25. "n_layers": 32,
  26. "norm_eps": 1e-06,
  27. "vocab_size": -1,
  28. }
  29. n_layers = params["n_layers"]
  30. n_heads = params["n_heads"]
  31. dim = params["dim"]
  32. dims_per_head = dim // n_heads
  33. base = 10000.0
  34. inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
  35. def permute(w):
  36. return (
  37. w.view(n_heads, dim // n_heads // 2, 2, dim).transpose(1, 2).reshape(dim, dim)
  38. )
  39. def unpermute(w):
  40. return (
  41. w.view(n_heads, 2, dim // n_heads // 2, dim).transpose(1, 2).reshape(dim, dim)
  42. )
  43. def translate_state_dict_key(k):
  44. k = k.replace("base_model.model.", "")
  45. if k == "model.embed_tokens.weight":
  46. return "tok_embeddings.weight"
  47. elif k == "model.norm.weight":
  48. return "norm.weight"
  49. elif k == "lm_head.weight":
  50. return "output.weight"
  51. elif k.startswith("model.layers."):
  52. layer = k.split(".")[2]
  53. if k.endswith(".self_attn.q_proj.weight"):
  54. return f"layers.{layer}.attention.wq.weight"
  55. elif k.endswith(".self_attn.k_proj.weight"):
  56. return f"layers.{layer}.attention.wk.weight"
  57. elif k.endswith(".self_attn.v_proj.weight"):
  58. return f"layers.{layer}.attention.wv.weight"
  59. elif k.endswith(".self_attn.o_proj.weight"):
  60. return f"layers.{layer}.attention.wo.weight"
  61. elif k.endswith(".mlp.gate_proj.weight"):
  62. return f"layers.{layer}.feed_forward.w1.weight"
  63. elif k.endswith(".mlp.down_proj.weight"):
  64. return f"layers.{layer}.feed_forward.w2.weight"
  65. elif k.endswith(".mlp.up_proj.weight"):
  66. return f"layers.{layer}.feed_forward.w3.weight"
  67. elif k.endswith(".input_layernorm.weight"):
  68. return f"layers.{layer}.attention_norm.weight"
  69. elif k.endswith(".post_attention_layernorm.weight"):
  70. return f"layers.{layer}.ffn_norm.weight"
  71. elif k.endswith("rotary_emb.inv_freq") or "lora" in k:
  72. return None
  73. else:
  74. print(layer, k)
  75. raise NotImplementedError
  76. else:
  77. print(k)
  78. raise NotImplementedError
  79. new_state_dict = {}
  80. for k, v in lora_model_sd.items():
  81. new_k = translate_state_dict_key(k)
  82. if new_k is not None:
  83. if "wq" in new_k or "wk" in new_k:
  84. new_state_dict[new_k] = unpermute(v)
  85. else:
  86. new_state_dict[new_k] = v
  87. os.makedirs("./ckpt", exist_ok=True)
  88. torch.save(new_state_dict, "./ckpt/consolidated.00.pth")
  89. with open("./ckpt/params.json", "w") as f:
  90. json.dump(params, f)