This repository has been archived by the owner on Aug 28, 2024. It is now read-only.
forked from locuslab/wanda
-
Notifications
You must be signed in to change notification settings - Fork 0
/
main_opt.py
110 lines (93 loc) · 4.76 KB
/
main_opt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import argparse
import os
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from importlib.metadata import version
from lib.prune_opt import prune_wanda, prune_magnitude, prune_sparsegpt, prune_ablate, check_sparsity, find_layers
from lib.eval import eval_ppl, eval_zero_shot
print('torch', version('torch'))
print('transformers', version('transformers'))
print('accelerate', version('accelerate'))
print('# of gpus: ', torch.cuda.device_count())
def get_llm(model_name, cache_dir="llm_weights"):
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16,
cache_dir=cache_dir,
low_cpu_mem_usage=True,
device_map="auto"
)
model.seqlen = model.config.max_position_embeddings
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, help='LLaMA model')
parser.add_argument('--seed', type=int, default=0, help='Seed for sampling the calibration data.')
parser.add_argument('--nsamples', type=int, default=128, help='Number of calibration samples.')
parser.add_argument('--sparsity_ratio', type=float, default=0, help='Sparsity level')
parser.add_argument("--sparsity_type", type=str, choices=["unstructured", "4:8", "2:4"])
parser.add_argument("--prune_method", type=str, choices=["magnitude", "wanda", "sparsegpt",
"ablate_mag_seq", "ablate_wanda_seq", "ablate_mag_iter", "ablate_wanda_iter", "search"])
parser.add_argument("--cache_dir", default="llm_weights", type=str )
parser.add_argument('--use_variant', action="store_true", help="whether to use the wanda variant described in the appendix")
parser.add_argument('--save', type=str, default=None, help='Path to save results.')
parser.add_argument('--save_model', type=str, default=None, help='Path to save the pruned model.')
parser.add_argument("--eval_zero_shot", action="store_true")
args = parser.parse_args()
# Setting seeds for reproducibility
np.random.seed(args.seed)
torch.random.manual_seed(args.seed)
# Handling n:m sparsity
prune_n, prune_m = 0, 0
if args.sparsity_type != "unstructured":
assert args.sparsity_ratio == 0.5, "sparsity ratio must be 0.5 for structured N:M sparsity"
prune_n, prune_m = map(int, args.sparsity_type.split(":"))
model_name = args.model.split("/")[-1]
print(f"loading llm model {args.model}")
model = get_llm(args.model, args.cache_dir)
model.eval()
tokenizer = AutoTokenizer.from_pretrained(args.model, use_fast=False)
device = torch.device("cuda:0")
if "30b" in args.model or "66b" in args.model: # for 30b and 65b we use device_map to load onto multiple A6000 GPUs, thus the processing here.
device = model.hf_device_map["lm_head"]
print("use device ", device)
if args.sparsity_ratio != 0:
print("pruning starts")
if args.prune_method == "wanda":
prune_wanda(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m)
elif args.prune_method == "magnitude":
prune_magnitude(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m)
elif args.prune_method == "sparsegpt":
prune_sparsegpt(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m)
elif "ablate" in args.prune_method:
prune_ablate(args, model, tokenizer, device, prune_n=prune_n, prune_m=prune_m)
################################################################
print("*"*30)
sparsity_ratio = check_sparsity(model)
print(f"sparsity sanity check {sparsity_ratio:.4f}")
print("*"*30)
################################################################
ppl_test = eval_ppl(args, model, tokenizer, device)
print(f"wikitext perplexity {ppl_test}")
if not os.path.exists(args.save):
os.makedirs(args.save)
save_filepath = os.path.join(args.save, f"log_{args.prune_method}.txt")
with open(save_filepath, "w") as f:
print("method\tactual_sparsity\tppl_test", file=f, flush=True)
print(f"{args.prune_method}\t{sparsity_ratio:.4f}\t{ppl_test:.4f}", file=f, flush=True)
if args.eval_zero_shot:
accelerate=False
if "30b" in args.model or "66b" in args.model:
accelerate=True
task_list = ["boolq", "rte","hellaswag","winogrande", "arc_easy","arc_challenge", "openbookqa"]
num_shot = 0
results = eval_zero_shot(args.model, model, tokenizer, task_list, num_shot, accelerate)
print("********************************")
print("zero_shot evaluation results")
print(results)
if args.save_model:
model.save_pretrained(args.save_model)
tokenizer.save_pretrained(args.save_model)
if __name__ == '__main__':
main()