Selaa lähdekoodia

initial commit

Eric Wang 3 vuotta sitten
commit
26f64780ad
6 muutettua tiedostoa jossa 1935 lisäystä ja 0 poistoa
  1. 6 0
      .gitignore
  2. 1324 0
      alpaca_data.json
  3. 276 0
      conversion.py
  4. 116 0
      finetune.py
  5. 125 0
      iteration.ipynb
  6. 88 0
      lengths.ipynb

+ 6 - 0
.gitignore

@@ -0,0 +1,6 @@
+out/
+7B/
+13B/
+__pycache__/
+checkpoint**
+minimal-llama**

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 1324 - 0
alpaca_data.json


+ 276 - 0
conversion.py

@@ -0,0 +1,276 @@
+# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import json
+import os
+import shutil
+
+import torch
+
+
+"""
+Sample usage:
+
+    ```
+    python src/transformers/models/llama/convert_llama_weights_to_hf.py \
+        --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
+    ```
+
+Thereafter, models can be loaded via:
+
+    ```
+    tokenizer = transformers.LLaMATokenizer.from_pretrained("/output/path/tokenizer/")
+
+    model = transformers.LLaMAForCausalLM.from_pretrained("/output/path/llama-7b/")
+    ```
+"""
+
+INTERMEDIATE_SIZE_MAP = {
+    "7B": 11008,
+    "13B": 13824,
+    "30B": 17920,
+    "65B": 22016,
+}
+NUM_SHARDS = {
+    "7B": 1,
+    "13B": 2,
+    "30B": 4,
+    "65B": 8,
+}
+
+
+def read_json(path):
+    with open(path, "r") as f:
+        return json.load(f)
+
+
+def write_json(text, path):
+    with open(path, "w") as f:
+        json.dump(text, f)
+
+
+def write_model(model_path, input_base_path, model_size):
+    assert model_size in INTERMEDIATE_SIZE_MAP
+    os.makedirs(model_path, exist_ok=True)
+
+    params = read_json(os.path.join(input_base_path, "params.json"))
+    num_shards = NUM_SHARDS[model_size]
+    n_layers = params["n_layers"]
+    n_heads = params["n_heads"]
+    n_heads_per_shard = n_heads // num_shards
+    dim = params["dim"]
+    dims_per_head = dim // n_heads
+    base = 10000.0
+    inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
+
+    # permute for sliced rotary
+    def permute(w):
+        return w.view(n_heads, dim // n_heads // 2, 2, dim).transpose(1, 2).reshape(dim, dim)
+
+    # Load weights
+    if model_size == "7B":
+        # Not shared
+        # (The sharded implementation would also work, but this is simpler.)
+        loaded = torch.load(os.path.join(input_base_path, "consolidated.00.pth"), map_location="cpu")
+    else:
+        # Sharded
+        loaded = [
+            torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pth"), map_location="cpu")
+            for i in range(num_shards)
+        ]
+    param_count = 0
+    index_dict = {"weight_map": {}}
+    for layer_i in range(n_layers):
+        filename = "pytorch_model-{:05d}-of-{:05d}.bin".format(
+            layer_i + 1,
+            n_layers + 1,
+        )
+        if model_size == "7B":
+            # Unsharded
+            state_dict = {
+                f"model.layers.{layer_i}.self_attn.q_proj.weight": permute(
+                    loaded[f"layers.{layer_i}.attention.wq.weight"]
+                ),
+                f"model.layers.{layer_i}.self_attn.k_proj.weight": permute(
+                    loaded[f"layers.{layer_i}.attention.wk.weight"]
+                ),
+                f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"],
+                f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"],
+                f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"],
+                f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"],
+                f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"],
+                f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"layers.{layer_i}.attention_norm.weight"],
+                f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[f"layers.{layer_i}.ffn_norm.weight"],
+            }
+        else:
+            # Sharded
+            state_dict = {
+                f"model.layers.{layer_i}.input_layernorm.weight": loaded[0][f"layers.{layer_i}.attention_norm.weight"],
+                f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[0][
+                    f"layers.{layer_i}.ffn_norm.weight"
+                ],
+            }
+            state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
+                torch.cat(
+                    [
+                        loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim)
+                        for i in range(num_shards)
+                    ],
+                    dim=0,
+                ).reshape(dim, dim)
+            )
+            state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
+                torch.cat(
+                    [
+                        loaded[i][f"layers.{layer_i}.attention.wk.weight"].view(n_heads_per_shard, dims_per_head, dim)
+                        for i in range(num_shards)
+                    ],
+                    dim=0,
+                ).reshape(dim, dim)
+            )
+            state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat(
+                [
+                    loaded[i][f"layers.{layer_i}.attention.wv.weight"].view(n_heads_per_shard, dims_per_head, dim)
+                    for i in range(num_shards)
+                ],
+                dim=0,
+            ).reshape(dim, dim)
+
+            state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat(
+                [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1
+            )
+            state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat(
+                [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0
+            )
+            state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat(
+                [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1
+            )
+            state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat(
+                [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0
+            )
+
+        state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
+        for k, v in state_dict.items():
+            index_dict["weight_map"][k] = filename
+            param_count += v.numel()
+        torch.save(state_dict, os.path.join(model_path, filename))
+
+    filename = "pytorch_model-{:05d}-of-{:05d}.bin".format(
+        n_layers + 1,
+        n_layers + 1,
+    )
+    if model_size == "7B":
+        # Unsharded
+        state_dict = {
+            "model.embed_tokens.weight": loaded["tok_embeddings.weight"],
+            "model.norm.weight": loaded["norm.weight"],
+            "lm_head.weight": loaded["output.weight"],
+        }
+    else:
+        state_dict = {
+            "model.norm.weight": loaded[0]["norm.weight"],
+            "model.embed_tokens.weight": torch.cat(
+                [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1
+            ),
+            "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0),
+        }
+
+    for k, v in state_dict.items():
+        index_dict["weight_map"][k] = filename
+        param_count += v.numel()
+    torch.save(state_dict, os.path.join(model_path, filename))
+
+    # Write configs
+    index_dict["metadata"] = {"total_size": param_count * 2}
+    write_json(index_dict, os.path.join(model_path, "pytorch_model.bin.index.json"))
+    config_out = {
+        "architectures": ["LLaMAForCausalLM"],
+        "bos_token_id": 0,
+        "eos_token_id": 1,
+        "hidden_act": "silu",
+        "hidden_size": params["dim"],
+        "intermediate_size": INTERMEDIATE_SIZE_MAP[model_size],
+        "initializer_range": 0.02,
+        "max_sequence_length": 2048,
+        "model_type": "llama",
+        "num_attention_heads": params["n_heads"],
+        "num_hidden_layers": params["n_layers"],
+        "pad_token_id": -1,
+        "rms_norm_eps": params["norm_eps"],
+        "torch_dtype": "float16",
+        "transformers_version": "4.27.0.dev0",
+        "use_cache": True,
+        "vocab_size": 32000,
+    }
+    write_json(
+        config_out,
+        os.path.join(model_path, "config.json"),
+    )
+    generation_config = {
+        "_from_model_config": True,
+        "bos_token_id": 0,
+        "eos_token_id": 1,
+        "pad_token_id": 0,
+        "transformers_version": "4.27.0.dev0",
+    }
+    write_json(
+        generation_config,
+        os.path.join(model_path, "generation_config.json"),
+    )
+
+
+def write_tokenizer(tokenizer_path, input_tokenizer_path):
+    os.makedirs(tokenizer_path, exist_ok=True)
+    write_json({}, os.path.join(tokenizer_path, "special_tokens_map.json"))
+    write_json(
+        {
+            "bos_token": "",
+            "eos_token": "",
+            "model_max_length": int(1e30),
+            "tokenizer_class": "LLaMATokenizer",
+            "unk_token": "",
+        },
+        os.path.join(tokenizer_path, "tokenizer_config.json"),
+    )
+    shutil.copyfile(input_tokenizer_path, os.path.join(tokenizer_path, "tokenizer.model"))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "--input_dir",
+        help="Location of LLaMA weights, which contains tokenizer.model and model folders",
+    )
+    parser.add_argument(
+        "--model_size",
+        choices=["7B", "13B", "30B", "65B"],
+    )
+    parser.add_argument(
+        "--output_dir",
+        help="Location to write HF model and tokenizer",
+    )
+    args = parser.parse_args()
+    write_model(
+        model_path=os.path.join(args.output_dir, "llama-{}".format(args.model_size).lower()),
+        input_base_path=os.path.join(args.input_dir, args.model_size),
+        model_size=args.model_size,
+    )
+    write_tokenizer(
+        tokenizer_path=os.path.join(args.output_dir, "tokenizer"),
+        input_tokenizer_path=os.path.join(args.input_dir, "tokenizer.model"),
+    )
+
+
+if __name__ == "__main__":
+    main()

+ 116 - 0
finetune.py

@@ -0,0 +1,116 @@
+import os
+
+# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
+import torch
+import torch.nn as nn
+import bitsandbytes as bnb
+from datasets import load_dataset
+import transformers
+from transformers import AutoTokenizer, AutoConfig, LLaMAForCausalLM, LLaMATokenizer
+from peft import prepare_model_for_int8_training, LoraConfig, get_peft_model
+
+model = LLaMAForCausalLM.from_pretrained(
+    "./7B/llama-7b",
+    load_in_8bit=True,
+    max_sequence_length=128,  # data length
+    device_map="auto",
+)
+
+
+tokenizer = LLaMATokenizer.from_pretrained("./7B/tokenizer")
+
+
+def print_trainable_parameters(model):
+    """
+    Prints the number of trainable parameters in the model.
+    """
+    trainable_params = 0
+    all_param = 0
+    for _, param in model.named_parameters():
+        all_param += param.numel()
+        if param.requires_grad:
+            trainable_params += param.numel()
+    print(
+        f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
+    )
+
+
+print_trainable_parameters(model)
+model = prepare_model_for_int8_training(model)
+
+config = LoraConfig(
+    r=4,
+    lora_alpha=16,
+    target_modules=["q_proj", "v_proj"],
+    lora_dropout=0.05,
+    bias="none",
+    task_type="CAUSAL_LM",
+)
+model = get_peft_model(model, config)
+
+print_trainable_parameters(model)
+
+tokenizer.pad_token = tokenizer.eos_token
+tokenizer.pad_token_id = tokenizer.eos_token_id
+
+data = load_dataset("json", data_files="alpaca_data.json")
+
+
+def generate_prompt(data_point):
+    # sorry about the formatting disaster gotta move fast
+    if data_point["instruction"]:
+        return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
+
+### Instruction:
+{data_point["instruction"]}
+
+### Input:
+{data_point["input"]}
+
+### Response:"""
+    else:
+        return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
+
+### Instruction:
+{data_point["instruction"]}
+
+### Response:"""
+
+
+data = data.map(
+    lambda data_point: tokenizer(
+        generate_prompt(data_point),
+        truncation=True,
+        max_length=128,
+        padding="max_length",
+    )
+)
+
+DATA_SIZE = 51368
+MICRO_BATCH_SIZE = 12
+BATCH_SIZE = 36
+GRADIENT_ACCUMULATION_STEPS = BATCH_SIZE // MICRO_BATCH_SIZE
+EPOCHS = 3
+LEARNING_RATE = 2e-5
+
+
+trainer = transformers.Trainer(
+    model=model,
+    train_dataset=data["train"],
+    args=transformers.TrainingArguments(
+        per_device_train_batch_size=MICRO_BATCH_SIZE,
+        gradient_accumulation_steps=GRADIENT_ACCUMULATION_STEPS,
+        warmup_steps=100,
+        num_train_epochs=EPOCHS,
+        learning_rate=LEARNING_RATE,
+        fp16=True,
+        logging_steps=1,
+        output_dir="lora-alpaca",
+        save_total_limit=3,
+    ),
+    data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
+)
+model.config.use_cache = False
+trainer.train(resume_from_checkpoint=False)
+
+model.save_pretrained("lora-alpaca")

Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 125 - 0
iteration.ipynb


Tiedoston diff-näkymää rajattu, sillä se on liian suuri
+ 88 - 0
lengths.ipynb


Kaikkia tiedostoja ei voida näyttää, sillä liian monta tiedostoa muuttui tässä diffissä