| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566 |
- import os
- import torch
- import transformers
- from peft import PeftModel
- # Unused imports
- # import json
- # from peft import LoraConfig
- assert (
- "LlamaTokenizer" in transformers._import_structure["models.llama"]
- ), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git" # noqa: E501
- from transformers import LlamaForCausalLM, LlamaTokenizer # noqa: F402
- BASE_MODEL = os.environ.get("BASE_MODEL", None)
- assert (
- BASE_MODEL
- ), "Please specify a value for BASE_MODEL environment variable, e.g. `export BASE_MODEL=decapoda-research/llama-7b-hf`" # noqa: E501
- tokenizer = LlamaTokenizer.from_pretrained(BASE_MODEL)
- base_model = LlamaForCausalLM.from_pretrained(
- BASE_MODEL,
- load_in_8bit=False,
- torch_dtype=torch.float16,
- device_map={"": "cpu"},
- )
- first_weight = base_model.model.layers[0].self_attn.q_proj.weight
- first_weight_old = first_weight.clone()
- lora_model = PeftModel.from_pretrained(
- base_model,
- "tloen/alpaca-lora-7b",
- device_map={"": "cpu"},
- torch_dtype=torch.float16,
- )
- lora_weight = lora_model.base_model.model.model.layers[
- 0
- ].self_attn.q_proj.weight
- assert torch.allclose(first_weight_old, first_weight)
- # merge weights
- for layer in lora_model.base_model.model.model.layers:
- layer.self_attn.q_proj.merge_weights = True
- layer.self_attn.v_proj.merge_weights = True
- lora_model.train(False)
- # did we do anything?
- assert not torch.allclose(first_weight_old, first_weight)
- lora_model_sd = lora_model.state_dict()
- deloreanized_sd = {
- k.replace("base_model.model.", ""): v
- for k, v in lora_model_sd.items()
- if "lora" not in k
- }
- LlamaForCausalLM.save_pretrained(
- base_model, "./hf_ckpt", state_dict=deloreanized_sd, max_shard_size="400MB"
- )
|