export_state_dict_checkpoint.py 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. import json
  2. import os
  3. import torch
  4. import transformers
  5. from peft import PeftModel
  6. from transformers import LlamaForCausalLM, LlamaTokenizer # noqa: E402
  7. BASE_MODEL = os.environ.get("BASE_MODEL", None)
  8. assert (
  9. BASE_MODEL
  10. ), "Please specify a value for BASE_MODEL environment variable, e.g. `export BASE_MODEL=huggyllama/llama-7b`" # noqa: E501
  11. tokenizer = LlamaTokenizer.from_pretrained(BASE_MODEL)
  12. base_model = LlamaForCausalLM.from_pretrained(
  13. BASE_MODEL,
  14. load_in_8bit=False,
  15. torch_dtype=torch.float16,
  16. device_map={"": "cpu"},
  17. )
  18. lora_model = PeftModel.from_pretrained(
  19. base_model,
  20. "tloen/alpaca-lora-7b",
  21. device_map={"": "cpu"},
  22. torch_dtype=torch.float16,
  23. )
  24. # merge weights
  25. for layer in lora_model.base_model.model.model.layers:
  26. layer.self_attn.q_proj.merge_weights = True
  27. layer.self_attn.v_proj.merge_weights = True
  28. lora_model.train(False)
  29. lora_model_sd = lora_model.state_dict()
  30. params = {
  31. "dim": 4096,
  32. "multiple_of": 256,
  33. "n_heads": 32,
  34. "n_layers": 32,
  35. "norm_eps": 1e-06,
  36. "vocab_size": -1,
  37. }
  38. n_layers = params["n_layers"]
  39. n_heads = params["n_heads"]
  40. dim = params["dim"]
  41. dims_per_head = dim // n_heads
  42. base = 10000.0
  43. inv_freq = 1.0 / (
  44. base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head)
  45. )
  46. def permute(w):
  47. return (
  48. w.view(n_heads, dim // n_heads // 2, 2, dim)
  49. .transpose(1, 2)
  50. .reshape(dim, dim)
  51. )
  52. def unpermute(w):
  53. return (
  54. w.view(n_heads, 2, dim // n_heads // 2, dim)
  55. .transpose(1, 2)
  56. .reshape(dim, dim)
  57. )
  58. def translate_state_dict_key(k): # noqa: C901
  59. k = k.replace("base_model.model.", "")
  60. if k == "model.embed_tokens.weight":
  61. return "tok_embeddings.weight"
  62. elif k == "model.norm.weight":
  63. return "norm.weight"
  64. elif k == "lm_head.weight":
  65. return "output.weight"
  66. elif k.startswith("model.layers."):
  67. layer = k.split(".")[2]
  68. if k.endswith(".self_attn.q_proj.weight"):
  69. return f"layers.{layer}.attention.wq.weight"
  70. elif k.endswith(".self_attn.k_proj.weight"):
  71. return f"layers.{layer}.attention.wk.weight"
  72. elif k.endswith(".self_attn.v_proj.weight"):
  73. return f"layers.{layer}.attention.wv.weight"
  74. elif k.endswith(".self_attn.o_proj.weight"):
  75. return f"layers.{layer}.attention.wo.weight"
  76. elif k.endswith(".mlp.gate_proj.weight"):
  77. return f"layers.{layer}.feed_forward.w1.weight"
  78. elif k.endswith(".mlp.down_proj.weight"):
  79. return f"layers.{layer}.feed_forward.w2.weight"
  80. elif k.endswith(".mlp.up_proj.weight"):
  81. return f"layers.{layer}.feed_forward.w3.weight"
  82. elif k.endswith(".input_layernorm.weight"):
  83. return f"layers.{layer}.attention_norm.weight"
  84. elif k.endswith(".post_attention_layernorm.weight"):
  85. return f"layers.{layer}.ffn_norm.weight"
  86. elif k.endswith("rotary_emb.inv_freq") or "lora" in k:
  87. return None
  88. else:
  89. print(layer, k)
  90. raise NotImplementedError
  91. else:
  92. print(k)
  93. raise NotImplementedError
  94. new_state_dict = {}
  95. for k, v in lora_model_sd.items():
  96. new_k = translate_state_dict_key(k)
  97. if new_k is not None:
  98. if "wq" in new_k or "wk" in new_k:
  99. new_state_dict[new_k] = unpermute(v)
  100. else:
  101. new_state_dict[new_k] = v
  102. os.makedirs("./ckpt", exist_ok=True)
  103. torch.save(new_state_dict, "./ckpt/consolidated.00.pth")
  104. with open("./ckpt/params.json", "w") as f:
  105. json.dump(params, f)