path
stringlengths
9
117
type
stringclasses
2 values
project
stringclasses
10 values
commit_hash
stringlengths
40
40
commit_message
stringlengths
1
137
ground_truth
stringlengths
0
2.74k
main_code
stringlengths
102
3.37k
context
stringlengths
0
14.7k
RWKV-v4neo.src.dataset/MyDataset.__init__
Modified
BlinkDL~RWKV-LM
1945cb58ed29ed1c353453bb93a7ee72d563d3bc
pile v2
<6>:<del> if args.data_file.endswith('/'): <7>:<del> d_all = [] <8>:<del> for p in os.listdir(args.data_file): <9>:<del> if p.endswith(".idx"): <10>:<del> d_all += [p[:-4]] <11>:<del> d_all.sort() <12>:<del> rank_zero_info(d_all) <13>:<del> exit(0) <14>:<add> if args.my_pile_version == 1: <add> self.data = MMapIndexedDataset(args.data_file) <add> self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size <add> rank_zero_info(f"Data has {self.data_size} tokens.") <15>:<add> data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') <add> data_list = [i.strip().split(' ') for i in data_list] <add> self.data = [] <add> self.data_size = int(data_list[-1][-1]) <del> self.data = MMapIndexedDataset(args.data_file) <16>:<del> self.data_size = len(self.data._bin_buffer) // 2 <17>:<add> rank_zero_info(f"Data has {self.data_size} chunks.") <del> rank_zero_info(f"Data has {self.data_size} tokens.") <18>:<add> for d in data_list: <add> data = MMapIndexedDataset(d[0]) <add> data_size = len(data._bin
# module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): <0> self.args = args <1> <2> if args.data_type == "binidx": <3> self.vocab_size = args.vocab_size <4> rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") <5> <6> if args.data_file.endswith('/'): <7> d_all = [] <8> for p in os.listdir(args.data_file): <9> if p.endswith(".idx"): <10> d_all += [p[:-4]] <11> d_all.sort() <12> rank_zero_info(d_all) <13> exit(0) <14> else: <15> self.data = MMapIndexedDataset(args.data_file) <16> self.data_size = len(self.data._bin_buffer) // 2 <17> rank_zero_info(f"Data has {self.data_size} tokens.") <18> <19> if args.my_qa_mask > 0: <20> self.data_pile = MMapIndexedDataset('/fsx/BlinkDL/pile/pile_20B_tokenizer_text_document') <21> self.data_pile_size = len(self.data_pile._bin_buffer) // 2 <22> <23> if args.my_pile_stage > 0: <24> # assert self.data_size == 332115325534 and self.vocab_size == 50277 <25> self.samples_per_epoch = args.epoch_steps * args.real_bsz <26> assert self.samples_per_epoch == 40320 <27> rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") <28> dataset_slot = self.data_size // args.ctx_len <29> if args.my_pile_stage != 4: <30> assert MaybeIsPrime(args.magic_prime) <31> </s>
===========below chunk 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s> rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for i, ch in enumerate(unique)} ===========unchanged ref 0=========== at: RWKV-v4neo.src.binidx MMapIndexedDataset(path, skip_warmup=False) at: RWKV-v4neo.src.binidx.MMapIndexedDataset.Index.__init__ self._dtype_size = self._dtype().itemsize at: RWKV-v4neo.src.binidx.MMapIndexedDataset.__init__ self._index = None self._bin_buffer = None at: RWKV-v4neo.src.binidx.MMapIndexedDataset._do_init self._index = self.Index(index_file_path(self._path), skip_warmup) self._bin_buffer = memoryview(self._bin_buffer_mmap) at: RWKV-v4neo.src.utils MaybeIsPrime(number) at: io.BufferedRandom read(self, size: Optional[int]=..., /) -> bytes at: typing.IO __slots__ = () read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: RWKV-v4neo.train ######################################################################################################## # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM ######################################################################################################## if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0</s>
RWKV-v4neo.src.dataset/MyDataset.__init__
Modified
BlinkDL~RWKV-LM
79915b3696fc744d410b69a09510b7161bda835b
better
<24>:<add> self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') <del> self.data_pile = MMapIndexedDataset('/fsx/BlinkDL/pile/pile_20B_tokenizer_text_document') <25>:<add> # self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document')
# module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): <0> self.args = args <1> <2> if args.data_type == "binidx": <3> self.vocab_size = args.vocab_size <4> rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") <5> <6> if args.my_pile_version == 1: <7> self.data = MMapIndexedDataset(args.data_file) <8> self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size <9> rank_zero_info(f"Data has {self.data_size} tokens.") <10> else: <11> data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') <12> data_list = [i.strip().split(' ') for i in data_list] <13> self.data = [] <14> self.data_size = int(data_list[-1][-1]) <15> rank_zero_info(f"Data has {self.data_size} chunks.") <16> for d in data_list: <17> data = MMapIndexedDataset(d[0]) <18> data_size = len(data._bin_buffer) // data._index._dtype_size <19> assert (data_size - args.ctx_len) == int(d[1]) <20> self.data += [[int(d[-1]), int(d[1]), data]] <21> # rank_zero_info(self.data) <22> <23> if args.my_qa_mask > 0: <24> self.data_pile = MMapIndexedDataset('/fsx/BlinkDL/pile/pile_20B_tokenizer_text_document') <25> self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._index._dtype_size <26> <27> if args.my_pile_stage > 0</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 </s> ===========below chunk 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s> = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for i, ch in enumerate(unique)} ===========unchanged ref 0=========== at: RWKV-v4neo.src.binidx MMapIndexedDataset(path, skip_warmup=False) at: RWKV-v4neo.src.binidx.MMapIndexedDataset.Index.__init__ self._dtype_size = self._dtype().itemsize at: RWKV-v4neo.src.binidx.MMapIndexedDataset.__init__ self._index = None self._bin_buffer = None at: RWKV-v4neo.src.binidx.MMapIndexedDataset._do_init self._index = self.Index(index_file_path(self._path), skip_warmup) self._bin_buffer = memoryview(self._bin_buffer_mmap) at: RWKV-v4neo.src.utils MaybeIsPrime(number) at: io.BufferedReader read(self, size: Optional[int]=..., /) -> bytes at: typing.IO __slots__ = () read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: RWKV-v4neo.train ######################################################################################################## # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM ######################################################################################################## if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0</s>
RWKV-v4neo.src.dataset/MyDataset.__init__
Modified
BlinkDL~RWKV-LM
725327d667167a23ce6100e7d4c7b6fb9d6b1a40
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM into main
<24>:<add> # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') <del> self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') <25>:<add> self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') <del> # self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document')
# module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): <0> self.args = args <1> <2> if args.data_type == "binidx": <3> self.vocab_size = args.vocab_size <4> rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") <5> <6> if args.my_pile_version == 1: <7> self.data = MMapIndexedDataset(args.data_file) <8> self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size <9> rank_zero_info(f"Data has {self.data_size} tokens.") <10> else: <11> data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') <12> data_list = [i.strip().split(' ') for i in data_list] <13> self.data = [] <14> self.data_size = int(data_list[-1][-1]) <15> rank_zero_info(f"Data has {self.data_size} chunks.") <16> for d in data_list: <17> data = MMapIndexedDataset(d[0]) <18> data_size = len(data._bin_buffer) // data._index._dtype_size <19> assert (data_size - args.ctx_len) == int(d[1]) <20> self.data += [[int(d[-1]), int(d[1]), data]] <21> # rank_zero_info(self.data) <22> <23> if args.my_qa_mask > 0: <24> self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') <25> # self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') <26> self.data_pile_size = len</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 </s> ===========below chunk 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s>type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for i, ch in enumerate(unique)} ===========unchanged ref 0=========== at: RWKV-v4neo.src.binidx MMapIndexedDataset(path, skip_warmup=False) at: RWKV-v4neo.src.binidx.MMapIndexedDataset.Index.__init__ self._dtype_size = self._dtype().itemsize at: RWKV-v4neo.src.binidx.MMapIndexedDataset.__init__ self._index = None self._bin_buffer = None at: RWKV-v4neo.src.binidx.MMapIndexedDataset._do_init self._index = self.Index(index_file_path(self._path), skip_warmup) self._bin_buffer = memoryview(self._bin_buffer_mmap) at: RWKV-v4neo.src.utils MaybeIsPrime(number) at: io.FileIO read(self, size: int=..., /) -> bytes at: typing.IO __slots__ = () read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: RWKV-v4neo.train ######################################################################################################## # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM ######################################################################################################## if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0</s>
RWKV-v4neo.src.trainer/train_callback.on_train_batch_start
Modified
BlinkDL~RWKV-LM
725327d667167a23ce6100e7d4c7b6fb9d6b1a40
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM into main
<9>:<add> if trainer.global_step < w_step: <add> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step)
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): <0> args = self.args <1> # if args.cuda_cleanup > 0: <2> # torch.cuda.empty_cache() <3> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <4> <5> # LR schedule <6> w_step = args.warmup_steps <7> if args.lr_final == args.lr_init or args.epoch_count == 0: <8> lr = args.lr_init <9> else: <10> decay_step = real_step - args.my_pile_edecay * args.epoch_steps <11> decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps <12> progress = (decay_step - w_step + 1) / (decay_total - w_step) <13> progress = min(1, max(0, progress)) <14> <15> if args.lr_final == 0 or args.lr_init == 0: # linear decay <16> lr = args.lr_init + (args.lr_final - args.lr_init) * progress <17> else: # exp decay <18> lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) <19> <20> if trainer.global_step < w_step: <21> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <22> # if trainer.is_global_zero: <23> # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) <24> <25> for param_group in trainer.optimizers[0].param_groups: <26> if args.layerwise_lr > 0: <27> param_group["lr"] = lr * param_group["my_lr_scale"] <28> </s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 1 else: param_group["lr"] = lr trainer.my_lr = lr # rank_zero_info(f"{real_step} {lr}") if trainer.global_step == 0: if trainer.is_global_zero: # logging trainer.my_loss_sum = 0 trainer.my_loss_count = 0 trainer.my_log = open(args.proj_dir + "/train_log.txt", "a") trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") try: print(f"\n{trainer.strategy.config}\n") trainer.my_log.write(f"{trainer.strategy.config}\n") except: pass trainer.my_log.flush() if len(args.wandb) > 0: print("Login to wandb...") import wandb wandb.init( project=args.wandb, name=args.run_name + " " + args.my_timestamp, config=args, save_code=False, ) trainer.my_wandb = wandb ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: math exp(x: SupportsFloat, /) -> float log(x: SupportsFloat, base: SupportsFloat=...) -> float at: wandb init = wandb_sdk.init ===========changed ref 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): self.args = args if args.data_type == "binidx": self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") if args.my_pile_version == 1: self.data = MMapIndexedDataset(args.data_file) self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size rank_zero_info(f"Data has {self.data_size} tokens.") else: data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') data_list = [i.strip().split(' ') for i in data_list] self.data = [] self.data_size = int(data_list[-1][-1]) rank_zero_info(f"Data has {self.data_size} chunks.") for d in data_list: data = MMapIndexedDataset(d[0]) data_size = len(data._bin_buffer) // data._index._dtype_size assert (data_size - args.ctx_len) == int(d[1]) self.data += [[int(d[-1]), int(d[1]), data]] # rank_zero_info(self.data) if args.my_qa_mask > 0: + # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') - self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') + self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') - </s> ===========changed ref 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 <s>Dataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') - # self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._index._dtype_size + else: + self.data_pile = None + self.data_pile_size = 0 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s>_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le</s>
RWKV-v4neo.src.trainer/train_callback.on_train_batch_end
Modified
BlinkDL~RWKV-LM
a8ec3151b6e2160b9c46527a71ccc6f47b5125df
fix
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): <0> args = self.args <1> if trainer.is_global_zero: # logging <2> t_now = time.time_ns() <3> token_per_step = args.ctx_len * args.real_bsz <4> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <5> kt_s = 0 <6> try: <7> t_cost = (t_now - trainer.my_time_ns) / 1e9 <8> kt_s = token_per_step / t_cost / 1000 <9> self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) <10> self.log("Kt/s", kt_s, prog_bar=True, on_step=True) <11> except: <12> pass <13> trainer.my_time_ns = t_now <14> trainer.my_loss = trainer.my_loss_all.float().mean().item() <15> trainer.my_loss_sum += trainer.my_loss <16> trainer.my_loss_count += 1 <17> trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count <18> self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) <19> self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) <20> # self.log("s", real_step, prog_bar=True, on_step=True) <21> <22> if len(args.wandb) > 0: <23> lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "Gtokens": real_step * token_per_step / 1e9} <24> if kt_s > 0: <25> lll["kt/</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # offset: 1 trainer.my_wandb.log(lll, step=int(real_step)) if args.magic_prime > 0: expand_factor = 2 if args.my_qa_mask > 0 else 1 if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1: to_save_dict = pl_module.state_dict() my_save( to_save_dict, f"{args.proj_dir}/rwkv-final.pth", ) ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer my_save(dd, ff) at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: time time_ns() -> int
RWKV-v4neo.src.trainer/train_callback.on_train_epoch_end
Modified
BlinkDL~RWKV-LM
2f57660839017c3a3138cd2199df3232bf3a9a8c
misc
<2>:<add> if (args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0) or (trainer.current_epoch == args.epoch_count - 1): <del> if (args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0) or trainer.current_epoch == args.epoch_count - 1: <23>:<add> if (args.epoch_begin + trainer.current_epoch) >= args.my_exit: <add> exit(0)
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): # print(f'########## world_size {dataset.world_size} global_rank {dataset.global_rank} real_epoch {dataset.real_epoch} ##########') def on_train_epoch_end(self, trainer, pl_module): <0> args = self.args <1> if trainer.is_global_zero: # logging & save state_dict <2> if (args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0) or trainer.current_epoch == args.epoch_count - 1: <3> if args.data_type == 'wds_img': <4> raw_dict = pl_module.state_dict() <5> to_save_dict = {} <6> for k in raw_dict: <7> if k.startswith('encoder.') or k.startswith('decoder.'): <8> to_save_dict[k] = raw_dict[k] <9> else: <10> to_save_dict = pl_module.state_dict() <11> try: <12> my_save( <13> to_save_dict, <14> f"{args.proj_dir}/rwkv-{args.epoch_begin + trainer.current_epoch}.pth", <15> ) <16> except Exception as e: <17> print('Error\n\n', e, '\n\n') <18> trainer.my_log.write(f"{args.epoch_begin + trainer.current_epoch} {trainer.my_epoch_loss:.6f} {math.exp(trainer.my_epoch_loss):.4f} {trainer.my_lr:.8f} {datetime.datetime.now()} {trainer.current_epoch}\n") <19> trainer.my_log.flush() <20> <21> trainer.my_loss_sum = 0 <22> trainer.my_loss_count = 0 <23>
===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer my_save(dd, ff) at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: datetime datetime() at: datetime.datetime __slots__ = date.__slots__ + time.__slots__ now(tz: Optional[_tzinfo]=...) -> _S __radd__ = __add__ at: math exp(x: SupportsFloat, /) -> float ===========changed ref 0=========== # module: RWKV-v4neo.train ######################################################################################################## # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM ######################################################################################################## if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0</s> ===========changed ref 1=========== # module: RWKV-v4neo.train # offset: 1 <s>_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: fine-tune RWKV 1.5B using 8xA100 40G = 1.76it/s = 115k token/s, VRAM 37477M # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx_len 1024 --epoch_steps 1000 --epoch_count 1000 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 8 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 8 --precision bf16 --strategy deepspeed_stage_2 --grad_cp 0 # example: fine-tune RWKV 1.5B using 1 GPU fp16 (VRAM 16G) NOTE: fp16 might overflow # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx</s> ===========changed ref 2=========== # module: RWKV-v4neo.train # offset: 2 <s> 1024 --epoch_steps 200 --epoch_count 1000 --epoch_begin 0 --epoch_save 1 \ # --micro_bsz 11 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision fp16 --strategy deepspeed_stage_2_offload --grad_cp 1 parser = ArgumentParser() parser.add_argument("--load_model", default="", type=str) # full path, with .pth parser.add_argument("--wandb", default="", type=str) # wandb project name. if "" then don't use wandb parser.add_argument("--proj_dir", default="out", type=str) parser.add_argument("--random_seed", default="-1", type=int) parser.add_argument("--data_file", default="", type=str) parser.add_argument("--data_type", default="utf-8", type=str) parser.add_argument("--vocab_size", default=0, type=int) # vocab_size = 0 means auto (for char-level LM and .txt data) parser.add_argument("--ctx_len", default=1024, type=int) parser.add_argument("--epoch_steps", default=1000, type=int) # a mini "epoch" has [epoch_steps] steps parser.add_argument("--epoch_count", default=500, type=int) # train for this many "epochs". will continue afterwards with lr = lr_final parser.add_argument("--epoch_begin", default=0, type=int) # if you load a model trained</s>
RWKV-v4neo.src.dataset/MyDataset.__init__
Modified
BlinkDL~RWKV-LM
f8c5f6c3dea1d720c5774fb0352bee373e3bc244
misc
<10>:<add> elif args.my_pile_version == 2: <del> else:
# module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): <0> self.args = args <1> <2> if args.data_type == "binidx": <3> self.vocab_size = args.vocab_size <4> rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") <5> <6> if args.my_pile_version == 1: <7> self.data = MMapIndexedDataset(args.data_file) <8> self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size <9> rank_zero_info(f"Data has {self.data_size} tokens.") <10> else: <11> data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') <12> data_list = [i.strip().split(' ') for i in data_list] <13> self.data = [] <14> self.data_size = int(data_list[-1][-1]) <15> rank_zero_info(f"Data has {self.data_size} chunks.") <16> for d in data_list: <17> data = MMapIndexedDataset(d[0]) <18> data_size = len(data._bin_buffer) // data._index._dtype_size <19> assert (data_size - args.ctx_len) == int(d[1]) <20> self.data += [[int(d[-1]), int(d[1]), data]] <21> # rank_zero_info(self.data) <22> <23> if args.my_qa_mask > 0: <24> # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') <25> self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') <26> self.data_pile_size = len</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 else: self.data_pile = None self.data_pile_size = 0 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "w</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s>_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for i, ch in ===========unchanged ref 0=========== at: RWKV-v4neo.src.binidx MMapIndexedDataset(path, skip_warmup=False) at: RWKV-v4neo.src.binidx.MMapIndexedDataset.Index.__init__ self._dtype_size = self._dtype().itemsize at: RWKV-v4neo.src.binidx.MMapIndexedDataset.__init__ self._index = None self._bin_buffer = None at: RWKV-v4neo.src.binidx.MMapIndexedDataset._do_init self._index = self.Index(index_file_path(self._path), skip_warmup) self._bin_buffer = memoryview(self._bin_buffer_mmap) at: RWKV-v4neo.src.utils MaybeIsPrime(number) at: io.FileIO read(self, size: int=..., /) -> bytes at: typing.IO __slots__ = () read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: RWKV-v4neo.train ######################################################################################################## # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM ######################################################################################################## if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0</s>
RWKV-v4neo.src.trainer/my_save
Modified
BlinkDL~RWKV-LM
f8c5f6c3dea1d720c5774fb0352bee373e3bc244
misc
<0>:<add> if '14b-run1' in ff: <del> if '14b-run1' not in ff: <1>:<del> torch.save(dd, ff) <2>:<del> else: <7>:<add> elif ('world/14b' in ff) or ('world/7b' in ff): <add> aa = ff.split('/')[1] <add> fn = ff.split('/')[-1] <add> fff = f'/dev/shm/{aa}-{fn}' <add> torch.save(dd, fff) <add> subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) <add> else: <add> torch.save(dd, ff)
# module: RWKV-v4neo.src.trainer def my_save(dd, ff): <0> if '14b-run1' not in ff: <1> torch.save(dd, ff) <2> else: <3> fn = ff.split('/')[-1] <4> fff = '/dev/shm/' + fn <5> torch.save(dd, fff) <6> subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) <7>
===========unchanged ref 0=========== at: subprocess Popen() at: torch.serialization save(obj: object, f: FILE_LIKE, pickle_module: Any=pickle, pickle_protocol: int=DEFAULT_PROTOCOL, _use_new_zipfile_serialization: bool=True, _disable_byteorder_record: bool=False) -> None ===========changed ref 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): self.args = args if args.data_type == "binidx": self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") if args.my_pile_version == 1: self.data = MMapIndexedDataset(args.data_file) self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size rank_zero_info(f"Data has {self.data_size} tokens.") + elif args.my_pile_version == 2: - else: data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') data_list = [i.strip().split(' ') for i in data_list] self.data = [] self.data_size = int(data_list[-1][-1]) rank_zero_info(f"Data has {self.data_size} chunks.") for d in data_list: data = MMapIndexedDataset(d[0]) data_size = len(data._bin_buffer) // data._index._dtype_size assert (data_size - args.ctx_len) == int(d[1]) self.data += [[int(d[-1]), int(d[1]), data]] # rank_zero_info(self.data) if args.my_qa_mask > 0: # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') self.data_pile_size = len(self.data_pile._bin_buffer</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 <s>uped_text_document') self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._index._dtype_size else: self.data_pile = None self.data_pile_size = 0 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size </s> ===========changed ref 2=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s>_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.</s> ===========changed ref 3=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 3 <s>size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for i, ch in enumerate(unique)}
RWKV-v4neo.src.dataset/MyDataset.__init__
Modified
BlinkDL~RWKV-LM
cca1b5e8e597cf40675882bb10b46287c844e35c
misc
# module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): <0> self.args = args <1> <2> if args.data_type == "binidx": <3> self.vocab_size = args.vocab_size <4> rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") <5> <6> if args.my_pile_version == 1: <7> self.data = MMapIndexedDataset(args.data_file) <8> self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size <9> rank_zero_info(f"Data has {self.data_size} tokens.") <10> elif args.my_pile_version == 2: <11> data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') <12> data_list = [i.strip().split(' ') for i in data_list] <13> self.data = [] <14> self.data_size = int(data_list[-1][-1]) <15> rank_zero_info(f"Data has {self.data_size} chunks.") <16> for d in data_list: <17> data = MMapIndexedDataset(d[0]) <18> data_size = len(data._bin_buffer) // data._index._dtype_size <19> assert (data_size - args.ctx_len) == int(d[1]) <20> self.data += [[int(d[-1]), int(d[1]), data]] <21> # rank_zero_info(self.data) <22> <23> if args.my_qa_mask > 0: <24> # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') <25> self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') <26> </s>
===========below chunk 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 else: self.data_pile = None self.data_pile_size = 0 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "w</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s>_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for i, ch in ===========unchanged ref 0=========== at: RWKV-v4neo.src.binidx MMapIndexedDataset(path, skip_warmup=False) at: RWKV-v4neo.src.binidx.MMapIndexedDataset.Index.__init__ self._dtype_size = self._dtype().itemsize at: RWKV-v4neo.src.binidx.MMapIndexedDataset.__init__ self._index = None self._bin_buffer = None at: RWKV-v4neo.src.binidx.MMapIndexedDataset._do_init self._index = self.Index(index_file_path(self._path), skip_warmup) self._bin_buffer = memoryview(self._bin_buffer_mmap) at: RWKV-v4neo.src.utils MaybeIsPrime(number) at: io.TextIOWrapper read(self, size: Optional[int]=..., /) -> str at: typing.IO __slots__ = () read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: RWKV-v4neo.train ######################################################################################################## # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM ######################################################################################################## if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0</s>
RWKV-v4neo.src.trainer/train_callback.on_train_batch_start
Modified
BlinkDL~RWKV-LM
cca1b5e8e597cf40675882bb10b46287c844e35c
misc
<26>:<add> <add> if args.my_exit_tokens > 0: # cosine decay <add> if trainer.global_step < w_step: <add> lr = args.lr_init * (0.2 + 0.8 * trainer.global_step / w_step) <add> else: <add> real_tokens = real_step * args.ctx_len * args.real_bsz <add> warmup_tokens = w_step * args.ctx_len * args.real_bsz <add> progress = (real_tokens - warmup_tokens) / (args.my_exit_tokens - warmup_tokens) <add> progress = max(0, min(1, progress)) <add> lr_final_factor = 0.1 <add> lr_mult = (0.5 + lr_final_factor / 2) + (0.5 - lr_final_factor / 2) * math.cos(math.pi * progress) <add> lr = args.lr_init * lr_
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): <0> args = self.args <1> # if args.cuda_cleanup > 0: <2> # torch.cuda.empty_cache() <3> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <4> <5> # LR schedule <6> w_step = args.warmup_steps <7> if args.lr_final == args.lr_init or args.epoch_count == 0: <8> lr = args.lr_init <9> if trainer.global_step < w_step: <10> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <11> else: <12> decay_step = real_step - args.my_pile_edecay * args.epoch_steps <13> decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps <14> progress = (decay_step - w_step + 1) / (decay_total - w_step) <15> progress = min(1, max(0, progress)) <16> <17> if args.lr_final == 0 or args.lr_init == 0: # linear decay <18> lr = args.lr_init + (args.lr_final - args.lr_init) * progress <19> else: # exp decay <20> lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) <21> <22> if trainer.global_step < w_step: <23> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <24> # if trainer.is_global_zero: <25> # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) <26> <27> for param_group in trainer.optimizers[0].param_</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 1 if args.layerwise_lr > 0: param_group["lr"] = lr * param_group["my_lr_scale"] # print(param_group["lr"], param_group["my_lr_scale"]) else: param_group["lr"] = lr trainer.my_lr = lr # rank_zero_info(f"{real_step} {lr}") if trainer.global_step == 0: if trainer.is_global_zero: # logging trainer.my_loss_sum = 0 trainer.my_loss_count = 0 trainer.my_log = open(args.proj_dir + "/train_log.txt", "a") trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") try: print(f"\n{trainer.strategy.config}\n") trainer.my_log.write(f"{trainer.strategy.config}\n") except: pass trainer.my_log.flush() if len(args.wandb) > 0: print("Login to wandb...") import wandb wandb.init( project=args.wandb, name=args.run_name + " " + args.my_timestamp, config=args, save_code=False, ) trainer.my_wandb = wandb ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer my_save(dd, ff) at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: math pi: float cos(x: SupportsFloat, /) -> float exp(x: SupportsFloat, /) -> float log(x: SupportsFloat, base: SupportsFloat=...) -> float ===========changed ref 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): self.args = args if args.data_type == "binidx": self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") if args.my_pile_version == 1: self.data = MMapIndexedDataset(args.data_file) self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size rank_zero_info(f"Data has {self.data_size} tokens.") elif args.my_pile_version == 2: data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') data_list = [i.strip().split(' ') for i in data_list] self.data = [] self.data_size = int(data_list[-1][-1]) rank_zero_info(f"Data has {self.data_size} chunks.") for d in data_list: data = MMapIndexedDataset(d[0]) data_size = len(data._bin_buffer) // data._index._dtype_size assert (data_size - args.ctx_len) == int(d[1]) self.data += [[int(d[-1]), int(d[1]), data]] # rank_zero_info(self.data) if args.my_qa_mask > 0: # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 <s>') self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._index._dtype_size else: self.data_pile = None self.data_pile_size = 0 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size + rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") - rank_zero_info("Current vocab size =", self.vocab_size, "(make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape</s>
RWKV-v4neo.src.model/RWKV.configure_optimizers
Modified
BlinkDL~RWKV-LM
a637aea61c77cedd290054449d819da5e7b19d44
weight decay (very important for small dataset)
<1>:<add> <add> lr_decay = set() <del> if args.layerwise_lr > 0: <2>:<add> lr_1x = set() <del> lr_1x = set() <3>:<add> lr_2x = set() <del> lr_2x = set() <4>:<add> lr_3x = set() <del> lr_3x = set() <5>:<add> for n, p in self.named_parameters(): <del> for n, p in self.named_parameters(): <6>:<add> if ("time_mix" in n) and (args.layerwise_lr > 0): <del> if "time_mix" in n: <7>:<add> if args.my_pile_stage == 2: <del> if args.my_pile_stage == 2: <8>:<add> lr_2x.add(n) <del> lr_2x.add(n) <9>:<add> else: <del> else: <10>:<add> lr_1x.add(n) <del> lr_1x.add(n) <11>:<add> elif ("time_decay" in n) and (args.layerwise_lr > 0): <del> elif "time_decay" in n: <12>:<add> if args.my_pile_stage == 2: <del> if args.my_pile_stage == 2: <13>:<del> lr_3x.add(n) <14>:<del> else: <15>:<del> lr_2x.add(n) <16>:<del> elif "time_first" in n: <19>:<add> lr_2x.add(n) <del> lr_1x.add(n) <20>:<add> elif ("time_first" in n) and (args.layerwise_lr > 0): <add> lr_3x.add(n) <add> elif (len(p.squeeze
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): <0> args = self.args <1> if args.layerwise_lr > 0: <2> lr_1x = set() <3> lr_2x = set() <4> lr_3x = set() <5> for n, p in self.named_parameters(): <6> if "time_mix" in n: <7> if args.my_pile_stage == 2: <8> lr_2x.add(n) <9> else: <10> lr_1x.add(n) <11> elif "time_decay" in n: <12> if args.my_pile_stage == 2: <13> lr_3x.add(n) <14> else: <15> lr_2x.add(n) <16> elif "time_first" in n: <17> lr_3x.add(n) <18> else: <19> lr_1x.add(n) <20> lr_1x = sorted(list(lr_1x)) <21> lr_2x = sorted(list(lr_2x)) <22> lr_3x = sorted(list(lr_3x)) <23> # print('1x', lr_1x) <24> # print('2x', lr_2x) <25> # print('3x', lr_3x) <26> param_dict = {n: p for n, p in self.named_parameters()} <27> if args.my_pile_stage == 2: <28> optim_groups = [ <29> {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, <30> {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init}, <31> {"params": [param_dict[n] for n in lr_3x], "weight_</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 1 ] else: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups = [ {"params": [p for n, p in self.named_parameters()], "weight_decay": 0.0}, ] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False) ===========unchanged ref 0=========== at: RWKV-v4neo.src.model.RWKV.__init__ self.args = args ===========changed ref 0=========== # module: RWKV-v4neo.train ######################################################################################################## # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM ######################################################################################################## if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0</s> ===========changed ref 1=========== # module: RWKV-v4neo.train # offset: 1 <s>_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: fine-tune RWKV 1.5B using 8xA100 40G = 1.76it/s = 115k token/s, VRAM 37477M # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx_len 1024 --epoch_steps 1000 --epoch_count 1000 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 8 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 8 --precision bf16 --strategy deepspeed_stage_2 --grad_cp 0 # example: fine-tune RWKV 1.5B using 1 GPU fp16 (VRAM 16G) NOTE: fp16 might overflow # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx</s> ===========changed ref 2=========== # module: RWKV-v4neo.train # offset: 2 <s> 1024 --epoch_steps 200 --epoch_count 1000 --epoch_begin 0 --epoch_save 1 \ # --micro_bsz 11 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision fp16 --strategy deepspeed_stage_2_offload --grad_cp 1 parser = ArgumentParser() parser.add_argument("--load_model", default="", type=str) # full path, with .pth parser.add_argument("--wandb", default="", type=str) # wandb project name. if "" then don't use wandb parser.add_argument("--proj_dir", default="out", type=str) parser.add_argument("--random_seed", default="-1", type=int) parser.add_argument("--data_file", default="", type=str) parser.add_argument("--data_type", default="utf-8", type=str) parser.add_argument("--vocab_size", default=0, type=int) # vocab_size = 0 means auto (for char-level LM and .txt data) parser.add_argument("--ctx_len", default=1024, type=int) parser.add_argument("--epoch_steps", default=1000, type=int) # a mini "epoch" has [epoch_steps] steps parser.add_argument("--epoch_count", default=500, type=int) # train for this many "epochs". will continue afterwards with lr = lr_final parser.add_argument("--epoch_begin", default=0, type=int) # if you load a model trained</s>
RWKV-v4neo.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
9143748f8079e7d3c726c2b98a83681242da30f7
RWKV5 preview
<16>:<add> if 'r' in os.environ["RWKV_MY_TESTING"]: <add> self.att = RWKV_TimeMix_RWKV5_Preview(args, layer_id) <add> else: <add> self.att = RWKV_TimeMix(args, layer_id) <del> self.att = RWKV_TimeMix(args, layer_id)
# module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> self.att = RWKV_TimeMix(args, layer_id) <17> <18> if 'g' in os.environ["RWKV_MY_TESTING"]: <19> self.ffn = MishGLU(args, layer_id) <20> else: <21> self.ffn = RWKV_ChannelMix(args, layer_id) <22> <23> if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: <24> self.tiny_ln = nn.LayerNorm(args.n_embd) <25> self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) <26> self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) ===========unchanged ref 0=========== at: RWKV-v4neo.src.model.RWKV_TimeMix.__init__ self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) at: RWKV-v4neo.src.model.RWKV_TimeMix.jit_func xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) k = self.key(xk) at: torch._C._VariableFunctions sigmoid(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] ===========changed ref 0=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5_Preview(nn.Module): + def __init__(self, args, layer_id): + super().__init__() + self.args = args + self.layer_id = layer_id + self.ctx_len = args.ctx_len + self.n_embd = args.n_embd + + try: + self.n_head = self.n_embd // 96 + assert self.n_embd % self.n_head == 0 + except: + self.n_head = self.n_embd // 128 + assert self.n_embd % self.n_head == 0 + self.head_size = self.n_embd // self.n_head + + with torch.no_grad(): + ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 + ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 + ddd = torch.ones(1, 1, args.n_embd) + for i in range(args.n_embd): + ddd[0, 0, i] = i / args.n_embd + + # fancy time_mix + self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) + self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) + self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) + + # fancy time_decay + decay_speed = torch.ones(self.n_head) + for h in range</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5_Preview(nn.Module): + def __init__(self, args, layer_id): # offset: 1 <s> # fancy time_decay + decay_speed = torch.ones(self.n_head) + for h in range(self.n_head): + decay_speed[h] = -5 + 8 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) + self.time_decay = nn.Parameter(decay_speed) + # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) + + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) + self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) + self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) + self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) + + self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) + ===========changed ref 2=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5_Preview(nn.Module): + def forward(self, x): + B, TT, C = x.size() # x = (Batch,Time,Channel) + H = self.n_head + S = self.head_size + + xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr + xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) + xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) + xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) + + r = self.receptance(xr).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) + k = self.key(xk).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) + v = self.value(xv).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) + + ######################################## + + T = 512 # chunk length + assert TT % T == 0 + + ww = torch.exp(-torch.exp(self.time_decay.float())) + + att_mask = torch.zeros(H, T, T, device=r.device) + rows, cols = torch.tril_indices(T, T, device=r.device) + powers = rows - cols + for h in range(H): + att_mask[h</s>
RWKV-v4neo.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
9143748f8079e7d3c726c2b98a83681242da30f7
RWKV5 preview
<24>:<add> if 'r' in os.environ["RWKV_MY_TESTING"]: <add> zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] <add> else: <add> zero = [".att.key.", ".att.receptance.", ".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] <del> for kk in [".att.key.", ".att.receptance.", ".att.output.", ".att.key.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.']: <25>:<add> for kk in zero:
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> for n in self.state_dict(): <11> p = self.state_dict()[n] <12> shape = p.shape <13> <14> gain = 1.0 <15> scale = 1.0 <16> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n: <17> m[n] = p <18> else: <19> if n == "emb.weight": <20> scale = -1 * self.args.lr_init <21> else: <22> if shape[0] > shape[1]: <23> gain = math.sqrt(shape[0] / shape[1]) <24> for kk in [".att.key.", ".att.receptance.", ".att.output.", ".att.key.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.']: <25> if kk in n: <26> scale = 0 <27> if n == "head.weight": <28> scale = 0.5 <29> if "head_k." in n: <30> scale = 0.1 <31> if "head_q." in n: <32> scale = 0 <33> <34> print(f"{str(shape[0]).ljust(5)} {str(shape[1]).ljust(5)} {str(scale).ljust(4)} {n}") <35> <36> if self.args.accelerator.upper() == "GPU": <37> m[n] = torch.empty((shape[0], shape[1]), device="cuda") <38> else</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() # if n == "emb.weight": # print(m[n]) gc.collect() torch.cuda.empty_cache() return m ===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ln1 = nn.LayerNorm(args.n_embd) self.ln2 = nn.LayerNorm(args.n_embd) if self.layer_id == 0: self.ln0 = nn.LayerNorm(args.n_embd) if args.my_pos_emb > 0: self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) if self.layer_id == 0 and self.args.pre_ffn > 0: self.ffnPre = RWKV_ChannelMix(args, 0) else: + if 'r' in os.environ["RWKV_MY_TESTING"]: + self.att = RWKV_TimeMix_RWKV5_Preview(args, layer_id) + else: + self.att = RWKV_TimeMix(args, layer_id) - self.att = RWKV_TimeMix(args, layer_id) if 'g' in os.environ["RWKV_MY_TESTING"]: self.ffn = MishGLU(args, layer_id) else: self.ffn = RWKV_ChannelMix(args, layer_id) if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: self.tiny_ln = nn.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 <s>.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) ===========changed ref 2=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5_Preview(nn.Module): + def __init__(self, args, layer_id): + super().__init__() + self.args = args + self.layer_id = layer_id + self.ctx_len = args.ctx_len + self.n_embd = args.n_embd + + try: + self.n_head = self.n_embd // 96 + assert self.n_embd % self.n_head == 0 + except: + self.n_head = self.n_embd // 128 + assert self.n_embd % self.n_head == 0 + self.head_size = self.n_embd // self.n_head + + with torch.no_grad(): + ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 + ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 + ddd = torch.ones(1, 1, args.n_embd) + for i in range(args.n_embd): + ddd[0, 0, i] = i / args.n_embd + + # fancy time_mix + self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) + self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) + self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) + + # fancy time_decay + decay_speed = torch.ones(self.n_head) + for h in range</s> ===========changed ref 3=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5_Preview(nn.Module): + def __init__(self, args, layer_id): # offset: 1 <s> # fancy time_decay + decay_speed = torch.ones(self.n_head) + for h in range(self.n_head): + decay_speed[h] = -5 + 8 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) + self.time_decay = nn.Parameter(decay_speed) + # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) + + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) + self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) + self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) + self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) + + self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) +
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.__init__
Modified
BlinkDL~RWKV-LM
686c962008676809f17cf2424c193d9dc217c0e4
rwkv5 with time_first
<6>:<add> self.head_size = 64 <del> try: <7>:<add> self.n_head = self.n_embd // self.head_size <del> self.n_head = self.n_embd // 96 <8>:<add> assert self.n_embd % self.n_head == 0 <del> assert self.n_embd % self.n_head == 0 <9>:<add> <add> self.chunk_len = 512 <del> except: <10>:<del> self.n_head = self.n_embd // 128 <11>:<add> assert self.ctx_len % self.chunk_len == 0 <del> assert self.n_embd % self.n_head == 0 <12>:<del> self.head_size = self.n_embd // self.n_head
# module: RWKV-v4neo.src.model ######################################################################################################## + class RWKV_TimeMix_RWKV5_Preview(MyModule): - class RWKV_TimeMix_RWKV5_Preview(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> self.ctx_len = args.ctx_len <4> self.n_embd = args.n_embd <5> <6> try: <7> self.n_head = self.n_embd // 96 <8> assert self.n_embd % self.n_head == 0 <9> except: <10> self.n_head = self.n_embd // 128 <11> assert self.n_embd % self.n_head == 0 <12> self.head_size = self.n_embd // self.n_head <13> <14> with torch.no_grad(): <15> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <16> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <17> ddd = torch.ones(1, 1, args.n_embd) <18> for i in range(args.n_embd): <19> ddd[0, 0, i] = i / args.n_embd <20> <21> # fancy time_mix <22> self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) <23> self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) <24> self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <25> <26> # fancy time_decay <27> decay_speed = torch.</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## + class RWKV_TimeMix_RWKV5_Preview(MyModule): - class RWKV_TimeMix_RWKV5_Preview(nn.Module): def __init__(self, args, layer_id): # offset: 1 for h in range(self.n_head): decay_speed[h] = -5 + 8 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time_decay = nn.Parameter(decay_speed) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) ===========unchanged ref 0=========== at: math log(x: SupportsFloat, base: SupportsFloat=...) -> float at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.grad_mode no_grad() ===========unchanged ref 1=========== at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] __init__() __init__(self) forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...)
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.forward
Modified
BlinkDL~RWKV-LM
686c962008676809f17cf2424c193d9dc217c0e4
rwkv5 with time_first
<0>:<del> B, TT, C = x.size() # x = (Batch,Time,Channel) <2>:<add> T = self.chunk_len <del> S = self.head_size <4>:<del> xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr <5>:<del> xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) <6>:<del> xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) <7>:<del> xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) <8>:<add> r, k, v = self.jit_func(x) <9>:<del> r = self.receptance(xr).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) <10>:<del> k = self.key(xk).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) <11>:<del> v = self.value(xv).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) <12>:<add> w = torch.exp(-torch.exp(self.time_decay.float())).unsqueeze(-1) <add> u = torch.exp(self.time_first.float()).unsqueeze(-1) <13>:<add> ################################################################################ <add> ######## <add> ws = w.pow(T).reshape(1, H, 1, 1) <del> ######################################## <15>:<add> ind = torch.arange(T-1, -1, -1, device=r.device).unsqueeze(0).repeat(H, 1) <add> w = w.repeat(1, T).pow(ind) <del> T = 512 # chunk length <16>:<del> assert TT % T == 0 <18>:<add> wk = w.reshape(1, H, 1, T) <add> wb = wk.transpose(-2, -1).flip(2) <del> ww = torch.exp(-torch.exp(self.time_decay.float())) <20>:<add> w = torch.cat([w[:, 1:], u], dim=1) <add>
# module: RWKV-v4neo.src.model ######################################################################################################## + class RWKV_TimeMix_RWKV5_Preview(MyModule): - class RWKV_TimeMix_RWKV5_Preview(nn.Module): + def forward(self, x): <0> B, TT, C = x.size() # x = (Batch,Time,Channel) <1> H = self.n_head <2> S = self.head_size <3> <4> xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr <5> xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) <6> xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) <7> xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) <8> <9> r = self.receptance(xr).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) <10> k = self.key(xk).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) <11> v = self.value(xv).view(B, TT, self.n_head, self.head_size).transpose(1, 2) # (B, T, C) -> (B, H, T, S) <12> <13> ######################################## <14> <15> T = 512 # chunk length <16> assert TT % T == 0 <17> <18> ww = torch.exp(-torch.exp(self.time_decay.float())) <19> <20> att_mask = torch.zeros(H, T, T, device=r.device) <21> rows, cols = torch.tril_indices(T, T, device=r.device) <22> powers = rows - cols</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## + class RWKV_TimeMix_RWKV5_Preview(MyModule): - class RWKV_TimeMix_RWKV5_Preview(nn.Module): + def forward(self, x): # offset: 1 att_mask[h][rows, cols] = ww[h] ** powers ww = ww.unsqueeze(-1) wo = ww.pow(T).reshape(1, H, 1,1) ww = ww.repeat(1, T) ind = torch.arange(T, device=r.device).flip(0).unsqueeze(0).repeat(H, 1) wa = ww.pow(ind) wa = F.pad(wa, (0, T)) wa = torch.tile(wa, [T]) wa = wa[:, :-T].reshape(-1, T, 2 * T - 1) wa = wa[:, :, T-1:].unsqueeze(0) wc = ww.pow(1 + ind).unsqueeze(-2).unsqueeze(0) wa = wa.to(dtype=r.dtype) wc = wc.to(dtype=r.dtype) wo = wo.to(dtype=r.dtype) ######################################## s = torch.zeros(B, H, S, S, device=r.device, dtype=r.dtype) x = torch.zeros(B, H, TT, S, device=r.device, dtype=r.dtype) for i in range(TT // T): rr = r[:,:,i*T:i*T+T,:] kk = k[:,:,i*T:i*T+T,:].transpose(-2, -1) vv = v[:,:,i*T:i*T+T,:] y = ((rr @ kk) * wa) @ vv y2 = (rr @ s) * wa[:,:,:,0:1] y = y</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## + class RWKV_TimeMix_RWKV5_Preview(MyModule): - class RWKV_TimeMix_RWKV5_Preview(nn.Module): + def forward(self, x): # offset: 2 <s>) @ vv y2 = (rr @ s) * wa[:,:,:,0:1] y = y + y2 x[:,:,i*T:i*T+T,:] = y s = wo * s + (kk * wc) @ vv x = x.transpose(1, 2).contiguous().view(B, TT, C) # (B, H, T, S) -> (B, T, H, S) -> (B, T, C) x = self.ln_x(x.transpose(-2, -1)).transpose(-2, -1) return self.output(x) ===========unchanged ref 0=========== at: RWKV-v4neo.src.model.RWKV_TimeMix_RWKV5_Preview.__init__ self.head_size = 64 self.n_head = self.n_embd // self.head_size self.chunk_len = 512 self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) self.time_decay = nn.Parameter(decay_speed) self.time_first = nn.Parameter(torch.ones(self.n_head) * math.log(0.3)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) at: torch.jit._script.ScriptModule forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] ===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## + class RWKV_TimeMix_RWKV5_Preview(MyModule): - class RWKV_TimeMix_RWKV5_Preview(nn.Module): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ctx_len = args.ctx_len self.n_embd = args.n_embd + self.head_size = 64 - try: + self.n_head = self.n_embd // self.head_size - self.n_head = self.n_embd // 96 + assert self.n_embd % self.n_head == 0 - assert self.n_embd % self.n_head == 0 + + self.chunk_len = 512 - except: - self.n_head = self.n_embd // 128 + assert self.ctx_len % self.chunk_len == 0 - assert self.n_embd % self.n_head == 0 - self.head_size = self.n_embd // self.n_head with torch.no_grad(): ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd # fancy time_mix self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0</s>
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.__init__
Modified
BlinkDL~RWKV-LM
9b38a54c8e9e0cdc01e3d461a299485d3c1be6c3
BUG FIX: rwkv5 groupnorm was wrong
<28>:<add> decay_speed[h] = -10 + 9 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) <del> decay_speed[h] = -9 + 8 * (h / (self.
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> self.ctx_len = args.ctx_len <4> self.n_embd = args.n_embd <5> <6> self.head_size = 64 <7> self.n_head = self.n_embd // self.head_size <8> assert self.n_embd % self.n_head == 0 <9> <10> self.chunk_len = 512 <11> assert self.ctx_len % self.chunk_len == 0 <12> <13> with torch.no_grad(): <14> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <15> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <16> ddd = torch.ones(1, 1, args.n_embd) <17> for i in range(args.n_embd): <18> ddd[0, 0, i] = i / args.n_embd <19> <20> # fancy time_mix <21> self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) <22> self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) <23> self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <24> <25> # fancy time_decay <26> decay_speed = torch.ones(self.n_head) <27> for h in range(self.n_head): <28> decay_speed[h] = -9 + 8 * (h / (self.</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): # offset: 1 self.time_decay = nn.Parameter(decay_speed) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) self.time_first = nn.Parameter(torch.ones(self.n_head) * math.log(0.3)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) ===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.grad_mode no_grad() ===========unchanged ref 1=========== at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] __init__() __init__(self) forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...)
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.jit_func_2
Modified
BlinkDL~RWKV-LM
9b38a54c8e9e0cdc01e3d461a299485d3c1be6c3
BUG FIX: rwkv5 groupnorm was wrong
<19>:<add> x = x.transpose(1, 2).contiguous().view(B * TT, H*S) # BHTS -> BTHS -> BTC <del> x = x.transpose(1, 2).contiguous().view(B, TT, H*S) # BHTS -> BTHS -> BTC <20>:<add> x = self.ln_x(x).view(B, TT, H*S) <del> x = self.ln_x(x.transpose(-2, -1)).transpose(-2, -1)
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): @MyFunction def jit_func_2(self, r, k, v, w, wk, wb, ws): <0> B, H, TT, S = r.size() <1> T = self.chunk_len <2> <3> s = torch.zeros(B, H, S, S, device=r.device, dtype=r.dtype) # state <4> x = torch.zeros(B, H, TT, S, device=r.device, dtype=r.dtype) # output <5> <6> ################################################################################ <7> ######## <8> for i in range(TT // T): <9> rr = r[:, :, i*T:i*T+T, :] <10> kk = k[:, :, :, i*T:i*T+T] <11> vv = v[:, :, i*T:i*T+T, :] <12> <13> x[:, :, i*T:i*T+T, :] = ((rr @ kk) * w) @ vv + (rr @ s) * wb <14> <15> s = ws * s + (kk * wk) @ vv <16> ######## <17> ################################################################################ <18> <19> x = x.transpose(1, 2).contiguous().view(B, TT, H*S) # BHTS -> BTHS -> BTC <20> x = self.ln_x(x.transpose(-2, -1)).transpose(-2, -1) <21> return self.output(x) <22>
===========unchanged ref 0=========== at: RWKV-v4neo.src.model.RWKV_TimeMix_RWKV5_Preview.__init__ self.chunk_len = 512 ===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ctx_len = args.ctx_len self.n_embd = args.n_embd self.head_size = 64 self.n_head = self.n_embd // self.head_size assert self.n_embd % self.n_head == 0 self.chunk_len = 512 assert self.ctx_len % self.chunk_len == 0 with torch.no_grad(): ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd # fancy time_mix self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) # fancy time_decay decay_speed = torch.ones(self.n_head) for h in range(self.n_head): + decay_speed[h] = -10 + 9 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) - decay</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): # offset: 1 <s>self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) - decay_speed[h] = -9 + 8 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time_decay = nn.Parameter(decay_speed) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) + self.time_first = nn.Parameter(torch.ones(self.n_head) * (-2.0)) - self.time_first = nn.Parameter(torch.ones(self.n_head) * math.log(0.3)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, self.n_embd)
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.__init__
Modified
BlinkDL~RWKV-LM
468e608107632743c5ea3a9c23d9a18b4ee2ab59
rwkv5 working
<28>:<add> decay_speed[h] = -8 + 7 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) <del> decay_speed[h] = -10 + 9 * (h / (self.
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> self.ctx_len = args.ctx_len <4> self.n_embd = args.n_embd <5> <6> self.head_size = 64 <7> self.n_head = self.n_embd // self.head_size <8> assert self.n_embd % self.n_head == 0 <9> <10> self.chunk_len = 512 <11> assert self.ctx_len % self.chunk_len == 0 <12> <13> with torch.no_grad(): <14> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <15> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <16> ddd = torch.ones(1, 1, args.n_embd) <17> for i in range(args.n_embd): <18> ddd[0, 0, i] = i / args.n_embd <19> <20> # fancy time_mix <21> self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) <22> self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) <23> self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <24> <25> # fancy time_decay <26> decay_speed = torch.ones(self.n_head) <27> for h in range(self.n_head): <28> decay_speed[h] = -10 + 9 * (h / (self.</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): # offset: 1 self.time_decay = nn.Parameter(decay_speed) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) self.time_first = nn.Parameter(torch.ones(self.n_head) * (-2.0)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) ===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) ===========unchanged ref 1=========== at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None __init__(self) -> None __init__() -> None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) at: torch.nn.modules.normalization GroupNorm(num_groups: int, num_channels: int, eps: float=1e-5, affine: bool=True, device=None, dtype=None) at: torch.nn.modules.padding ZeroPad2d(padding: _size_4_t) ===========unchanged ref 2=========== at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...)
RWKV-v4neo.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
bff997a64972a343ccc40afffdb850c4a89c6b56
better rwkv5
<17>:<add> if 'ln_x.weight' in n: <add> layer_scale = (1+int(n.split('.')[1])) / self.args.n_layer <add> m[n] = (p * 0.0) + (layer_scale ** 0.5) <add> else: <add> m[n] = p <del> m[n] = p
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> for n in self.state_dict(): <11> p = self.state_dict()[n] <12> shape = p.shape <13> <14> gain = 1.0 <15> scale = 1.0 <16> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n: <17> m[n] = p <18> else: <19> if n == "emb.weight": <20> scale = -1 * self.args.lr_init <21> else: <22> if shape[0] > shape[1]: <23> gain = math.sqrt(shape[0] / shape[1]) <24> if 'r' in os.environ["RWKV_MY_TESTING"]: <25> zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] <26> else: <27> zero = [".att.key.", ".att.receptance.", ".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] <28> for kk in zero: <29> if kk in n: <30> scale = 0 <31> if n == "head.weight": <32> scale = 0.5 <33> if "head_k." in n: <34> scale = 0.1 <35> if "head_q." in n: <36> scale = 0 <37> <38> print(f"{str(</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((shape[0], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() # if n == "emb.weight": # print(m[n]) gc.collect() torch.cuda.empty_cache() return m ===========unchanged ref 0=========== at: RWKV-v4neo.src.model.RWKV.__init__ self.args = args at: math sqrt(x: SupportsFloat, /) -> float at: os environ = _createenviron() at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.init uniform_(tensor: Tensor, a: float=0., b: float=1.) -> Tensor zeros_(tensor: Tensor) -> Tensor
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.__init__
Modified
BlinkDL~RWKV-LM
69e6c50001e8da742dcfdd7e53064f155a6c9ad1
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM into main
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> self.ctx_len = args.ctx_len <4> self.n_embd = args.n_embd <5> <6> self.head_size = 64 <7> self.n_head = self.n_embd // self.head_size <8> assert self.n_embd % self.n_head == 0 <9> <10> self.chunk_len = 512 <11> assert self.ctx_len % self.chunk_len == 0 <12> <13> with torch.no_grad(): <14> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <15> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <16> ddd = torch.ones(1, 1, args.n_embd) <17> for i in range(args.n_embd): <18> ddd[0, 0, i] = i / args.n_embd <19> <20> # fancy time_mix <21> self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) <22> self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) <23> self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <24> <25> # fancy time_decay <26> decay_speed = torch.ones(self.n_head) <27> for h in range(self.n_head): <28> decay_speed[h] = -8 + 7 * (h / (self.</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): # offset: 1 self.time_decay = nn.Parameter(decay_speed) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) self.time_first = nn.Parameter(torch.ones(self.n_head) * (-3.0)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) ===========unchanged ref 0=========== at: os environ = _createenviron() at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.grad_mode no_grad() ===========unchanged ref 1=========== at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] __init__() __init__(self) forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v4neo.train - ######################################################################################################## - # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM - ######################################################################################################## + logging.basicConfig(level=logging.INFO) if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5</s> ===========changed ref 1=========== # module: RWKV-v4neo.train # offset: 1 <s>_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: fine-tune RWKV 1.5B using 8xA100 40G = 1.76it/s = 115k token/s, VRAM 37477M # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx_len 1024 --epoch_steps 1000 --epoch_count 1000 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 8 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 8 --precision bf16 --strategy deepspeed_stage_2 --grad_cp 0 # example: fine-tune RWKV 1.5B using 1 GPU fp16 (VRAM 16G) NOTE: fp16 might overflow # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type</s>
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.forward
Modified
BlinkDL~RWKV-LM
69e6c50001e8da742dcfdd7e53064f155a6c9ad1
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM into main
<6>:<add> <add> if 'r2' in os.environ["RWKV_MY_TESTING"]: <add> u = self.time_faaaa.float().unsqueeze(-1) <add> else: <add> u = torch.exp(self.time_first.float()).unsqueeze(-1) <del> u = torch.exp(self.time_first.float()).unsqueeze(-1)
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def forward(self, x): <0> H = self.n_head <1> T = self.chunk_len <2> <3> r, k, v = self.jit_func(x) <4> <5> w = torch.exp(-torch.exp(self.time_decay.float())).unsqueeze(-1) <6> u = torch.exp(self.time_first.float()).unsqueeze(-1) <7> <8> ################################################################################ <9> ######## <10> ws = w.pow(T).reshape(1, H, 1, 1) <11> <12> ind = torch.arange(T-1, -1, -1, device=r.device).unsqueeze(0).repeat(H, 1) <13> w = w.repeat(1, T).pow(ind) <14> <15> wk = w.reshape(1, H, 1, T) <16> wb = wk.transpose(-2, -1).flip(2) <17> <18> w = torch.cat([w[:, 1:], u], dim=1) <19> w = F.pad(w, (0, T)) <20> w = torch.tile(w, [T]) <21> w = w[:, :-T].reshape(-1, T, 2 * T - 1) <22> w = w[:, :, T-1:].reshape(1, H, T, T) <23> ######## <24> ################################################################################ <25> <26> w = w.to(dtype=r.dtype) <27> wk = wk.to(dtype=r.dtype) <28> wb = wb.to(dtype=r.dtype) <29> ws = ws.to(dtype=r.dtype) <30> return self.jit_func_2(r, k, v, w, wk, wb, ws) <31>
===========unchanged ref 0=========== at: RWKV-v4neo.src.model.RWKV_TimeMix_RWKV5_Preview.__init__ self.n_head = self.n_embd // self.head_size self.chunk_len = 512 self.time_decay = nn.Parameter(decay_speed) self.time_faaaa = nn.Parameter(torch.ones(self.n_head) * 0.05) self.time_first = nn.Parameter(torch.ones(self.n_head) * (-3.0)) at: os environ = _createenviron() at: torch.jit._script.ScriptModule forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] ===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ctx_len = args.ctx_len self.n_embd = args.n_embd self.head_size = 64 self.n_head = self.n_embd // self.head_size assert self.n_embd % self.n_head == 0 self.chunk_len = 512 assert self.ctx_len % self.chunk_len == 0 with torch.no_grad(): ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd # fancy time_mix self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) # fancy time_decay decay_speed = torch.ones(self.n_head) for h in range(self.n_head): decay_speed[h] = -8 + 7 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): # offset: 1 <s>.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time_decay = nn.Parameter(decay_speed) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) + if 'r2' in os.environ["RWKV_MY_TESTING"]: + self.time_faaaa = nn.Parameter(torch.ones(self.n_head) * 0.05) + else: + self.time_first = nn.Parameter(torch.ones(self.n_head) * (-3.0)) - self.time_first = nn.Parameter(torch.ones(self.n_head) * (-3.0)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) ===========changed ref 2=========== # module: RWKV-v4neo.train - ######################################################################################################## - # The RWKV Language Model - https://github.com/BlinkDL/RWKV-LM - ######################################################################################################## + logging.basicConfig(level=logging.INFO) if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5</s>
RWKV-v4neo.src.model/RWKV.configure_optimizers
Modified
BlinkDL~RWKV-LM
69e6c50001e8da742dcfdd7e53064f155a6c9ad1
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM into main
<13>:<add> if args.my_pile_stage == 2: <add> lr_3x.add(n) <add> else: <add> lr_2x.add(n) <add> elif ("time_faaaa" in n) and (args.layerwise_lr > 0):
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): <0> args = self.args <1> <2> lr_decay = set() <3> lr_1x = set() <4> lr_2x = set() <5> lr_3x = set() <6> for n, p in self.named_parameters(): <7> if ("time_mix" in n) and (args.layerwise_lr > 0): <8> if args.my_pile_stage == 2: <9> lr_2x.add(n) <10> else: <11> lr_1x.add(n) <12> elif ("time_decay" in n) and (args.layerwise_lr > 0): <13> if args.my_pile_stage == 2: <14> lr_3x.add(n) <15> else: <16> lr_2x.add(n) <17> elif ("time_first" in n) and (args.layerwise_lr > 0): <18> lr_3x.add(n) <19> elif (len(p.squeeze().shape) >= 2) and (args.weight_decay > 0): <20> lr_decay.add(n) <21> else: <22> lr_1x.add(n) <23> <24> lr_decay = sorted(list(lr_decay)) <25> lr_1x = sorted(list(lr_1x)) <26> lr_2x = sorted(list(lr_2x)) <27> lr_3x = sorted(list(lr_3x)) <28> # print('decay', lr_decay) <29> # print('1x', lr_1x) <30> # print('2x', lr_2x) <31> # print('3x', lr_3x) <32> param_dict = {n: p for n, p in self.named_parameters()} <33> <34> if args.layerwise_lr > 0: <35> if args.my_pile_stage == 2: <36> optim_groups = [ <37> {"params</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 1 {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init}, ] else: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups = [{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}] if args.weight_decay > 0: optim_groups += [{"params": [param_dict[n] for n in lr_decay], "weight_decay": args.weight_decay, "my_lr_scale": 1.0}] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=True, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps,</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 2 <s>=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=True, amsgrad=False) else: if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False) ===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def forward(self, x): H = self.n_head T = self.chunk_len r, k, v = self.jit_func(x) w = torch.exp(-torch.exp(self.time_decay.float())).unsqueeze(-1) + + if 'r2' in os.environ["RWKV_MY_TESTING"]: + u = self.time_faaaa.float().unsqueeze(-1) + else: + u = torch.exp(self.time_first.float()).unsqueeze(-1) - u = torch.exp(self.time_first.float()).unsqueeze(-1) ################################################################################ ######## ws = w.pow(T).reshape(1, H, 1, 1) ind = torch.arange(T-1, -1, -1, device=r.device).unsqueeze(0).repeat(H, 1) w = w.repeat(1, T).pow(ind) wk = w.reshape(1, H, 1, T) wb = wk.transpose(-2, -1).flip(2) w = torch.cat([w[:, 1:], u], dim=1) w = F.pad(w, (0, T)) w = torch.tile(w, [T]) w = w[:, :-T].reshape(-1, T, 2 * T - 1) w = w[:, :, T-1:].reshape(1, H, T, T) ######## ################################################################################ w = w.to(dtype=r.dtype) wk = wk.to(dtype=r.dtype) wb = wb.to(dtype=r.dtype) ws = ws.to(dtype=r.dtype) return self.jit_func_2(r, k, v, w, wk, wb, ws)
RWKV-v4neo.src.trainer/train_callback.on_train_batch_start
Modified
BlinkDL~RWKV-LM
69e6c50001e8da742dcfdd7e53064f155a6c9ad1
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM into main
<27>:<add> if args.my_exit_tokens != 0: # cosine decay <del> if args.my_exit_tokens > 0: # cosine decay
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): <0> args = self.args <1> # if args.cuda_cleanup > 0: <2> # torch.cuda.empty_cache() <3> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <4> <5> # LR schedule <6> w_step = args.warmup_steps <7> if args.lr_final == args.lr_init or args.epoch_count == 0: <8> lr = args.lr_init <9> if trainer.global_step < w_step: <10> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <11> else: <12> decay_step = real_step - args.my_pile_edecay * args.epoch_steps <13> decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps <14> progress = (decay_step - w_step + 1) / (decay_total - w_step) <15> progress = min(1, max(0, progress)) <16> <17> if args.lr_final == 0 or args.lr_init == 0: # linear decay <18> lr = args.lr_init + (args.lr_final - args.lr_init) * progress <19> else: # exp decay <20> lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) <21> <22> if trainer.global_step < w_step: <23> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <24> # if trainer.is_global_zero: <25> # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) <26> <27> if args.my_exit_tokens > 0: # cosine decay</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 1 lr = args.lr_init * (0.2 + 0.8 * trainer.global_step / w_step) else: real_tokens = real_step * args.ctx_len * args.real_bsz warmup_tokens = w_step * args.ctx_len * args.real_bsz progress = (real_tokens - warmup_tokens) / (args.my_exit_tokens - warmup_tokens) progress = max(0, min(1, progress)) lr_final_factor = 0.1 lr_mult = (0.5 + lr_final_factor / 2) + (0.5 - lr_final_factor / 2) * math.cos(math.pi * progress) lr = args.lr_init * lr_mult if progress >= 1: my_save( pl_module.state_dict(), f"{args.proj_dir}/rwkv-final.pth", ) exit(0) for param_group in trainer.optimizers[0].param_groups: if args.layerwise_lr > 0: param_group["lr"] = lr * param_group["my_lr_scale"] # print(param_group["lr"], param_group["my_lr_scale"]) else: param_group["lr"] = lr trainer.my_lr = lr # rank_zero_info(f"{real_step} {lr}") if trainer.global_step == 0: if trainer.is_global_zero: # logging trainer.my_loss_sum = 0 trainer.my_loss_count = 0 trainer.my_log = open(args.proj_dir + "/train_log.txt", "a") trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 2 <s>txt", "a") trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") try: print(f"\n{trainer.strategy.config}\n") trainer.my_log.write(f"{trainer.strategy.config}\n") except: pass trainer.my_log.flush() if len(args.wandb) > 0: print("Login to wandb...") import wandb wandb.init( project=args.wandb, name=args.run_name + " " + args.my_timestamp, config=args, save_code=False, ) trainer.my_wandb = wandb ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer my_save(dd, ff) at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: math pi: float cos(x: SupportsFloat, /) -> float exp(x: SupportsFloat, /) -> float log(x: SupportsFloat, base: SupportsFloat=...) -> float ===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def forward(self, x): H = self.n_head T = self.chunk_len r, k, v = self.jit_func(x) w = torch.exp(-torch.exp(self.time_decay.float())).unsqueeze(-1) + + if 'r2' in os.environ["RWKV_MY_TESTING"]: + u = self.time_faaaa.float().unsqueeze(-1) + else: + u = torch.exp(self.time_first.float()).unsqueeze(-1) - u = torch.exp(self.time_first.float()).unsqueeze(-1) ################################################################################ ######## ws = w.pow(T).reshape(1, H, 1, 1) ind = torch.arange(T-1, -1, -1, device=r.device).unsqueeze(0).repeat(H, 1) w = w.repeat(1, T).pow(ind) wk = w.reshape(1, H, 1, T) wb = wk.transpose(-2, -1).flip(2) w = torch.cat([w[:, 1:], u], dim=1) w = F.pad(w, (0, T)) w = torch.tile(w, [T]) w = w[:, :-T].reshape(-1, T, 2 * T - 1) w = w[:, :, T-1:].reshape(1, H, T, T) ######## ################################################################################ w = w.to(dtype=r.dtype) wk = wk.to(dtype=r.dtype) wb = wb.to(dtype=r.dtype) ws = ws.to(dtype=r.dtype) return self.jit_func_2(r, k, v, w, wk, wb, ws) ===========changed ref 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ctx_len = args.ctx_len self.n_embd = args.n_embd self.head_size = 64 self.n_head = self.n_embd // self.head_size assert self.n_embd % self.n_head == 0 self.chunk_len = 512 assert self.ctx_len % self.chunk_len == 0 with torch.no_grad(): ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd # fancy time_mix self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) # fancy time_decay decay_speed = torch.ones(self.n_head) for h in range(self.n_head): decay_speed[h] = -8 + 7 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time</s>
RWKV-v4neo.src.trainer/train_callback.on_train_batch_end
Modified
BlinkDL~RWKV-LM
69e6c50001e8da742dcfdd7e53064f155a6c9ad1
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM into main
<23>:<add> lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.my_wd, "Gtokens": real_step * token_per_step / 1e9} <del> lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "Gtokens": real_step * token_per_step / 1e9}
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): <0> args = self.args <1> if trainer.is_global_zero: # logging <2> t_now = time.time_ns() <3> token_per_step = args.ctx_len * args.real_bsz <4> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <5> kt_s = 0 <6> try: <7> t_cost = (t_now - trainer.my_time_ns) / 1e9 <8> kt_s = token_per_step / t_cost / 1000 <9> self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) <10> self.log("Kt/s", kt_s, prog_bar=True, on_step=True) <11> except: <12> pass <13> trainer.my_time_ns = t_now <14> trainer.my_loss = trainer.my_loss_all.float().mean().item() <15> trainer.my_loss_sum += trainer.my_loss <16> trainer.my_loss_count += 1 <17> trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count <18> self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) <19> self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) <20> # self.log("s", real_step, prog_bar=True, on_step=True) <21> <22> if len(args.wandb) > 0: <23> lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "Gtokens": real_step * token_per_step / 1e9} <24> if kt_s > 0: <25> lll["kt/</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # offset: 1 trainer.my_wandb.log(lll, step=int(real_step)) if args.magic_prime > 0: expand_factor = 2 if args.my_qa_mask > 0 else 1 if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1 + int(args.my_random_steps): to_save_dict = pl_module.state_dict() my_save( to_save_dict, f"{args.proj_dir}/rwkv-final.pth", ) ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: RWKV-v4neo.src.trainer.train_callback.on_train_batch_start args = self.args at: time time_ns() -> int ===========changed ref 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): args = self.args # if args.cuda_cleanup > 0: # torch.cuda.empty_cache() real_step = trainer.global_step + args.epoch_begin * args.epoch_steps # LR schedule w_step = args.warmup_steps if args.lr_final == args.lr_init or args.epoch_count == 0: lr = args.lr_init if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) else: decay_step = real_step - args.my_pile_edecay * args.epoch_steps decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps progress = (decay_step - w_step + 1) / (decay_total - w_step) progress = min(1, max(0, progress)) if args.lr_final == 0 or args.lr_init == 0: # linear decay lr = args.lr_init + (args.lr_final - args.lr_init) * progress else: # exp decay lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) # if trainer.is_global_zero: # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) + if args.my_exit_tokens != 0: # cosine decay - if args.my_exit_tokens > 0: # cosine decay if trainer.global_step < w</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 1 <s> # cosine decay - if args.my_exit_tokens > 0: # cosine decay if trainer.global_step < w_step: lr = args.lr_init * (0.2 + 0.8 * trainer.global_step / w_step) else: real_tokens = real_step * args.ctx_len * args.real_bsz warmup_tokens = w_step * args.ctx_len * args.real_bsz + progress = (real_tokens - warmup_tokens) / (abs(args.my_exit_tokens) - warmup_tokens) - progress = (real_tokens - warmup_tokens) / (args.my_exit_tokens - warmup_tokens) progress = max(0, min(1, progress)) + lr_final_factor = args.lr_final / args.lr_init - lr_final_factor = 0.1 lr_mult = (0.5 + lr_final_factor / 2) + (0.5 - lr_final_factor / 2) * math.cos(math.pi * progress) + if args.my_exit_tokens > 0: + lr = args.lr_init * lr_mult - lr = args.lr_init * lr_mult + else: + lr = (lr + args.lr_init * lr_mult) / 2 if progress >= 1: my_save( pl_module.state_dict(), f"{args.proj_dir}/rwkv-final.pth", ) exit(0) + if args.weight_decay_final > 0: + wd_now = args.weight_decay * math.exp(math.log(args.weight_</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 2 <s>final / args.weight_decay) * progress) + else: + wd_now = args.weight_decay + for param_group in trainer.optimizers[0].param_groups: + if param_group["weight_decay"] > 0: + param_group["weight_decay"] = wd_now if args.layerwise_lr > 0: param_group["lr"] = lr * param_group["my_lr_scale"] # print(param_group["lr"], param_group["my_lr_scale"]) else: param_group["lr"] = lr trainer.my_lr = lr + trainer.my_wd = wd_now # rank_zero_info(f"{real_step} {lr}") if trainer.global_step == 0: if trainer.is_global_zero: # logging trainer.my_loss_sum = 0 trainer.my_loss_count = 0 trainer.my_log = open(args.proj_dir + "/train_log.txt", "a") trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") try: print(f"\n{trainer.strategy.config}\n") trainer.my_log.write(f"{trainer.strategy.config}\n") except: pass trainer.my_log.flush() if len(args.wandb) > 0: print("Login to wandb...") import wandb wandb.init( project=args.wandb, name=args.run_name + " " + args.my_timestamp,
RWKV-v4neo.src.trainer/generate_init_weight
Modified
BlinkDL~RWKV-LM
69e6c50001e8da742dcfdd7e53064f155a6c9ad1
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM into main
<7>:<add> try: <add> assert k in mm <del> assert k in mm <8>:<add> except: <add> print('missing', k) <add> exit(0)
# module: RWKV-v4neo.src.trainer @rank_zero_only def generate_init_weight(model, init_weight_name): <0> mm = model.generate_init_weight() <1> <2> if model.args.my_pile_stage == 1: <3> if len(model.args.load_model) > 0: <4> print(f"Combine weights from {model.args.load_model}...") <5> load_dict = torch.load(model.args.load_model, map_location="cpu") <6> for k in load_dict: <7> assert k in mm <8> src = load_dict[k] <9> try: <10> mm[k] = src.reshape(mm[k].shape) <11> except: <12> tmp = mm[k].squeeze().clone() <13> print(k, src.shape, '-->', mm[k].shape) <14> ss = src.shape[0] <15> dd = tmp.shape[0] <16> for i in range(dd): <17> pos = i / dd * ss <18> if pos >= ss - 1: <19> tmp[i] = src[ss-1] <20> else: <21> p0 = int(math.floor(pos)) <22> ii = pos - p0 <23> tmp[i] = src[p0] * (1-ii) + src[p0+1] * (ii) <24> mm[k] = tmp.reshape(mm[k].shape) <25> sss = src.squeeze().float().cpu().numpy() <26> print(sss[:10], '...', sss[-10:]) <27> mmm = mm[k].squeeze().float().cpu().numpy() <28> print(mmm[:10], '...', mmm[-10:]) <29> <30> print(f"Save to {init_weight_name}...") <31> torch.save(mm, init_weight_name) <32> <33> if model.args.my_pile_stage == 1: <34> print("Done. Now go for stage 2.") <35> </s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer @rank_zero_only def generate_init_weight(model, init_weight_name): # offset: 1 ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer.train_callback.on_train_epoch_end args = self.args at: datetime datetime() at: datetime.datetime __slots__ = date.__slots__ + time.__slots__ now(tz: Optional[_tzinfo]=...) -> _S __radd__ = __add__ at: math exp(x: SupportsFloat, /) -> float at: torch.serialization load(f: FILE_LIKE, map_location: MAP_LOCATION=None, pickle_module: Any=None, *, weights_only: bool=False, mmap: Optional[bool]=None, pickle_file='data.pkl', encoding: str="bytes", fix_imports: bool=..., errors: str=...) -> Any ===========changed ref 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): args = self.args if trainer.is_global_zero: # logging t_now = time.time_ns() token_per_step = args.ctx_len * args.real_bsz real_step = trainer.global_step + args.epoch_begin * args.epoch_steps kt_s = 0 try: t_cost = (t_now - trainer.my_time_ns) / 1e9 kt_s = token_per_step / t_cost / 1000 self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) self.log("Kt/s", kt_s, prog_bar=True, on_step=True) except: pass trainer.my_time_ns = t_now trainer.my_loss = trainer.my_loss_all.float().mean().item() trainer.my_loss_sum += trainer.my_loss trainer.my_loss_count += 1 trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) # self.log("s", real_step, prog_bar=True, on_step=True) if len(args.wandb) > 0: + lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.my_wd, "Gtokens": real_step * token_per_step / 1e9} - lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "Gtokens": real_step * token</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # offset: 1 <s> lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "Gtokens": real_step * token_per_step / 1e9} if kt_s > 0: lll["kt/s"] = kt_s trainer.my_wandb.log(lll, step=int(real_step)) if args.magic_prime > 0: expand_factor = 2 if args.my_qa_mask > 0 else 1 if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1 + int(args.my_random_steps): to_save_dict = pl_module.state_dict() my_save( to_save_dict, f"{args.proj_dir}/rwkv-final.pth", ) ===========changed ref 2=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): args = self.args # if args.cuda_cleanup > 0: # torch.cuda.empty_cache() real_step = trainer.global_step + args.epoch_begin * args.epoch_steps # LR schedule w_step = args.warmup_steps if args.lr_final == args.lr_init or args.epoch_count == 0: lr = args.lr_init if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) else: decay_step = real_step - args.my_pile_edecay * args.epoch_steps decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps progress = (decay_step - w_step + 1) / (decay_total - w_step) progress = min(1, max(0, progress)) if args.lr_final == 0 or args.lr_init == 0: # linear decay lr = args.lr_init + (args.lr_final - args.lr_init) * progress else: # exp decay lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) # if trainer.is_global_zero: # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) + if args.my_exit_tokens != 0: # cosine decay - if args.my_exit_tokens > 0: # cosine decay if trainer.global_step < w</s>
RWKV-v4neo.src.model/Block.forward
Modified
BlinkDL~RWKV-LM
b42fc101f8b8ffdee4128ac5ba6ee938d6273258
+ dropout
<8>:<add> if self.args.dropout == 0: <add> if self.layer_id == 0 and args.pre_ffn > 0: <del> if self.layer_id == 0 and args.pre_ffn > 0: <9>:<add> x = x + self.ffnPre(self.ln1(x)) <del> x = x + self.ffnPre(self.ln1(x)) <10>:<add> else: <add> x = x + self.att(self.ln1(x)) <add> x = x + self.ffn(self.ln2(x)) <11>:<add> if self.layer_id == 0 and args.pre_ffn > 0: <add> x = self.drop0(x + self.ffnPre(self.ln1(x))) <add> else: <add> x = self.drop0(x + self.att(self.ln1(x))) <del> x = x + self.att(self.ln1(x)) <12>:<add> x = self.drop1(x + self.ffn(self.ln2(x))) <del> x = x + self.ffn(self.ln2(x))
# module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): + def forward(self, x, x_emb=None): <0> args = self.args <1> B, T, C = x.size() <2> if self.layer_id == 0: <3> x = self.ln0(x) <4> if args.my_pos_emb > 0: <5> pos_emb = (self.pos_emb_x + self.pos_emb_y).reshape(T+1, -1)[:-1,:] <6> x = x + pos_emb <7> <8> if self.layer_id == 0 and args.pre_ffn > 0: <9> x = x + self.ffnPre(self.ln1(x)) <10> else: <11> x = x + self.att(self.ln1(x)) <12> x = x + self.ffn(self.ln2(x)) <13> <14> if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: <15> xx = self.tiny_ln(x) <16> q = self.tiny_q(xx)[:, :T, :] <17> k = self.tiny_k(xx)[:, :T, :] <18> c = (q @ k.transpose(-2, -1)) * (args.tiny_att_dim ** (-0.5)) <19> c = c.masked_fill(self.tiny_mask[:T, :T] == 0, 0) <20> x = x + c @ self.tiny_v(x_emb) <21> return x <22>
RWKV-v4neo.src.model/RWKV.__init__
Modified
BlinkDL~RWKV-LM
b42fc101f8b8ffdee4128ac5ba6ee938d6273258
+ dropout
<22>:<add> if args.dropout > 0: <add> self.drop0 = nn.Dropout(p = args.dropout)
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def __init__(self, args): <0> super().__init__() <1> self.args = args <2> if not hasattr(args, 'dim_att'): <3> args.dim_att = args.n_embd <4> if not hasattr(args, 'dim_ffn'): <5> args.dim_ffn = args.n_embd * 4 <6> if not hasattr(args, 'tiny_att_layer'): <7> args.tiny_att_layer = -1 <8> if not hasattr(args, 'tiny_att_dim'): <9> args.tiny_att_dim = -1 <10> <11> self.emb = nn.Embedding(args.vocab_size, args.n_embd) <12> <13> self.blocks = nn.ModuleList([Block(args, i) for i in range(args.n_layer)]) <14> <15> self.ln_out = nn.LayerNorm(args.n_embd) <16> self.head = nn.Linear(args.n_embd, args.vocab_size, bias=False) <17> <18> if args.head_qk > 0: <19> self.head_q = nn.Linear(args.n_embd, args.head_qk, bias=False) <20> self.head_k = nn.Linear(args.n_embd, args.head_qk, bias=False) <21> self.register_buffer("copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) <22>
===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): + def forward(self, x, x_emb=None): args = self.args B, T, C = x.size() if self.layer_id == 0: x = self.ln0(x) if args.my_pos_emb > 0: pos_emb = (self.pos_emb_x + self.pos_emb_y).reshape(T+1, -1)[:-1,:] x = x + pos_emb + if self.args.dropout == 0: + if self.layer_id == 0 and args.pre_ffn > 0: - if self.layer_id == 0 and args.pre_ffn > 0: + x = x + self.ffnPre(self.ln1(x)) - x = x + self.ffnPre(self.ln1(x)) + else: + x = x + self.att(self.ln1(x)) + x = x + self.ffn(self.ln2(x)) else: + if self.layer_id == 0 and args.pre_ffn > 0: + x = self.drop0(x + self.ffnPre(self.ln1(x))) + else: + x = self.drop0(x + self.att(self.ln1(x))) - x = x + self.att(self.ln1(x)) + x = self.drop1(x + self.ffn(self.ln2(x))) - x = x + self.ffn(self.ln2(x)) if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: xx = self.tiny_ln(x) q = self.tiny_q(xx)[:, :T, :] k =</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): + def forward(self, x, x_emb=None): # offset: 1 <s>iny_ln(x) q = self.tiny_q(xx)[:, :T, :] k = self.tiny_k(xx)[:, :T, :] c = (q @ k.transpose(-2, -1)) * (args.tiny_att_dim ** (-0.5)) c = c.masked_fill(self.tiny_mask[:T, :T] == 0, 0) x = x + c @ self.tiny_v(x_emb) return x
RWKV-v4neo.src.model/RWKV.forward
Modified
BlinkDL~RWKV-LM
b42fc101f8b8ffdee4128ac5ba6ee938d6273258
+ dropout
<7>:<add> if args.dropout > 0: <add> x = self.drop0(x)
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def forward(self, idx): <0> args = self.args <1> B, T = idx.size() <2> assert T <= args.ctx_len, "Cannot forward, model ctx_len is exhausted." <3> <4> x = self.emb(idx) <5> x_emb = x <6> <7> if args.tiny_att_dim > 0: <8> for block in self.blocks: <9> if args.grad_cp == 1: <10> x = deepspeed.checkpointing.checkpoint(block, x, x_emb) <11> else: <12> x = block(x, x_emb) <13> else: <14> for block in self.blocks: <15> if args.grad_cp == 1: <16> x = deepspeed.checkpointing.checkpoint(block, x) <17> else: <18> x = block(x) <19> <20> x = self.ln_out(x) <21> <22> if args.head_qk > 0: <23> q = self.head_q(x)[:, :T, :] <24> k = self.head_k(x)[:, :T, :] <25> c = (q @ k.transpose(-2, -1)) * (1.0 / args.head_qk) <26> c = c.masked_fill(self.copy_mask[:T, :T] == 0, 0) <27> <28> if "32" in os.environ["RWKV_FLOAT_MODE"]: <29> c = c @ F.one_hot(idx, num_classes=args.vocab_size) <30> elif os.environ["RWKV_FLOAT_MODE"] == "fp16": <31> c = c @ F.one_hot(idx, num_classes=args.vocab_size).half() <32> elif os.environ["RWKV_FLOAT_MODE"] == "bf16": <33> c = c @ F.one_hot(idx, num_classes=args.vocab_size).bfloat16() <34> </s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def forward(self, idx): # offset: 1 else: x = self.head(x) return x ===========changed ref 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def __init__(self, args): super().__init__() self.args = args if not hasattr(args, 'dim_att'): args.dim_att = args.n_embd if not hasattr(args, 'dim_ffn'): args.dim_ffn = args.n_embd * 4 if not hasattr(args, 'tiny_att_layer'): args.tiny_att_layer = -1 if not hasattr(args, 'tiny_att_dim'): args.tiny_att_dim = -1 self.emb = nn.Embedding(args.vocab_size, args.n_embd) self.blocks = nn.ModuleList([Block(args, i) for i in range(args.n_layer)]) self.ln_out = nn.LayerNorm(args.n_embd) self.head = nn.Linear(args.n_embd, args.vocab_size, bias=False) if args.head_qk > 0: self.head_q = nn.Linear(args.n_embd, args.head_qk, bias=False) self.head_k = nn.Linear(args.n_embd, args.head_qk, bias=False) self.register_buffer("copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) + if args.dropout > 0: + self.drop0 = nn.Dropout(p = args.dropout) ===========changed ref 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): + def forward(self, x, x_emb=None): args = self.args B, T, C = x.size() if self.layer_id == 0: x = self.ln0(x) if args.my_pos_emb > 0: pos_emb = (self.pos_emb_x + self.pos_emb_y).reshape(T+1, -1)[:-1,:] x = x + pos_emb + if self.args.dropout == 0: + if self.layer_id == 0 and args.pre_ffn > 0: - if self.layer_id == 0 and args.pre_ffn > 0: + x = x + self.ffnPre(self.ln1(x)) - x = x + self.ffnPre(self.ln1(x)) + else: + x = x + self.att(self.ln1(x)) + x = x + self.ffn(self.ln2(x)) else: + if self.layer_id == 0 and args.pre_ffn > 0: + x = self.drop0(x + self.ffnPre(self.ln1(x))) + else: + x = self.drop0(x + self.att(self.ln1(x))) - x = x + self.att(self.ln1(x)) + x = self.drop1(x + self.ffn(self.ln2(x))) - x = x + self.ffn(self.ln2(x)) if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: xx = self.tiny_ln(x) q = self.tiny_q(xx)[:, :T, :] k =</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): + def forward(self, x, x_emb=None): # offset: 1 <s>iny_ln(x) q = self.tiny_q(xx)[:, :T, :] k = self.tiny_k(xx)[:, :T, :] c = (q @ k.transpose(-2, -1)) * (args.tiny_att_dim ** (-0.5)) c = c.masked_fill(self.tiny_mask[:T, :T] == 0, 0) x = x + c @ self.tiny_v(x_emb) return x
RWKV-v4neo.src.model/RWKV.training_step_end
Modified
BlinkDL~RWKV-LM
087a66accf9bd698d651b5609bfad42cfca0cd69
增加PyTorch Lightning 2.0支持
<0>:<add> if pl.__version__[0]!='2': <add> all = self.all_gather(batch_parts) <del> all = self.all_gather(batch_parts) <1>:<add> if self.trainer.is_global_zero: <del> if self.trainer.is_global_zero: <2>:<add> self.trainer.my_loss_all = all <del> self.trainer.my_loss_all = all
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def training_step_end(self, batch_parts): <0> all = self.all_gather(batch_parts) <1> if self.trainer.is_global_zero: <2> self.trainer.my_loss_all = all <3>
===========changed ref 0=========== # module: RWKV-v4neo.train logging.basicConfig(level=logging.INFO) if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only + import pytorch_lightning as pl rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1</s> ===========changed ref 1=========== # module: RWKV-v4neo.train # offset: 1 <s>_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: fine-tune RWKV 1.5B using 8xA100 40G = 1.76it/s = 115k token/s, VRAM 37477M # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx_len 1024 --epoch_steps 1000 --epoch_count 1000 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 8 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 8 --precision bf16 --strategy deepspeed_stage_2 --grad_cp 0 # example: fine-tune RWKV 1.5B using 1 GPU fp16 (VRAM 16G) NOTE: fp16 might overflow # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx_len 1024 --epoch_steps 200</s> ===========changed ref 2=========== # module: RWKV-v4neo.train # offset: 2 <s>_count 1000 --epoch_begin 0 --epoch_save 1 \ # --micro_bsz 11 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision fp16 --strategy deepspeed_stage_2_offload --grad_cp 1 parser = ArgumentParser() parser.add_argument("--load_model", default="", type=str) # full path, with .pth parser.add_argument("--wandb", default="", type=str) # wandb project name. if "" then don't use wandb parser.add_argument("--proj_dir", default="out", type=str) parser.add_argument("--random_seed", default="-1", type=int) parser.add_argument("--data_file", default="", type=str) parser.add_argument("--data_type", default="utf-8", type=str) parser.add_argument("--vocab_size", default=0, type=int) # vocab_size = 0 means auto (for char-level LM and .txt data) parser.add_argument("--ctx_len", default=1024, type=int) parser.add_argument("--epoch_steps", default=1000, type=int) # a mini "epoch" has [epoch_steps] steps parser.add_argument("--epoch_count", default=500, type=int) # train for this many "epochs". will continue afterwards with lr = lr_final parser.add_argument("--epoch_begin", default=0, type=int) # if you load a model trained for x "epochs", set epoch_</s> ===========changed ref 3=========== # module: RWKV-v4neo.train # offset: 3 <s> x parser.add_argument("--epoch_save", default=5, type=int) # save the model every [epoch_save] "epochs" parser.add_argument("--micro_bsz", default=12, type=int) # micro batch size (batch size per GPU) parser.add_argument("--n_layer", default=6, type=int) parser.add_argument("--n_embd", default=512, type=int) parser.add_argument("--dim_att", default=0, type=int) parser.add_argument("--dim_ffn", default=0, type=int) parser.add_argument("--pre_ffn", default=0, type=int) # replace first att layer by ffn (sometimes better) parser.add_argument("--head_qk", default=0, type=int) # my headQK trick parser.add_argument("--tiny_att_dim", default=0, type=int) # tiny attention dim parser.add_argument("--tiny_att_layer", default=-999, type=int) # tiny attention @ which layer parser.add_argument("--lr_init", default=6e-4, type=float) # 6e-4 for L12-D768, 4e-4 for L24-D1024, 3e-4 for L24-D2048 parser.add_argument("--lr_final", default=1e-5, type=float) parser.add_argument("--warmup_steps", default=-1, type=int) # try 50 if you load a model parser.add_argument("--beta1", default=0.9, type=float) parser.add_argument("--beta2", default=0.99, type=float) # use 0.999 when your model is close to convergence parser.add</s>
RWKV-v4neo.src.trainer/train_callback.on_train_batch_end
Modified
BlinkDL~RWKV-LM
087a66accf9bd698d651b5609bfad42cfca0cd69
增加PyTorch Lightning 2.0支持
<14>:<add> if pl.__version__[0]=='2': <add> trainer.my_loss = outputs["loss"] <add> else: <add> trainer.my_loss = trainer.my_loss_all.float().mean().item() <del> trainer.my_loss = trainer.my_loss_all.float().mean().item()
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): <0> args = self.args <1> if trainer.is_global_zero: # logging <2> t_now = time.time_ns() <3> token_per_step = args.ctx_len * args.real_bsz <4> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <5> kt_s = 0 <6> try: <7> t_cost = (t_now - trainer.my_time_ns) / 1e9 <8> kt_s = token_per_step / t_cost / 1000 <9> self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) <10> self.log("Kt/s", kt_s, prog_bar=True, on_step=True) <11> except: <12> pass <13> trainer.my_time_ns = t_now <14> trainer.my_loss = trainer.my_loss_all.float().mean().item() <15> trainer.my_loss_sum += trainer.my_loss <16> trainer.my_loss_count += 1 <17> trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count <18> self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) <19> self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) <20> # self.log("s", real_step, prog_bar=True, on_step=True) <21> <22> if len(args.wandb) > 0: <23> lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.my_wd, "Gtokens": real_step * token_per_step / 1e9} <24> if kt_s > 0</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # offset: 1 lll["kt/s"] = kt_s trainer.my_wandb.log(lll, step=int(real_step)) if args.magic_prime > 0: expand_factor = 2 if args.my_qa_mask > 0 else 1 if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1 + int(args.my_random_steps): to_save_dict = pl_module.state_dict() my_save( to_save_dict, f"{args.proj_dir}/rwkv-final.pth", ) ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer my_save(dd, ff) at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: time time_ns() -> int ===========changed ref 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def training_step_end(self, batch_parts): + if pl.__version__[0]!='2': + all = self.all_gather(batch_parts) - all = self.all_gather(batch_parts) + if self.trainer.is_global_zero: - if self.trainer.is_global_zero: + self.trainer.my_loss_all = all - self.trainer.my_loss_all = all ===========changed ref 1=========== # module: RWKV-v4neo.train logging.basicConfig(level=logging.INFO) if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only + import pytorch_lightning as pl rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1</s> ===========changed ref 2=========== # module: RWKV-v4neo.train # offset: 1 <s>_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: fine-tune RWKV 1.5B using 8xA100 40G = 1.76it/s = 115k token/s, VRAM 37477M # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx_len 1024 --epoch_steps 1000 --epoch_count 1000 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 8 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 8 --precision bf16 --strategy deepspeed_stage_2 --grad_cp 0 # example: fine-tune RWKV 1.5B using 1 GPU fp16 (VRAM 16G) NOTE: fp16 might overflow # # python train.py --load_model "/fsx/BlinkDL/CODE/FP16/out_1b2/all-8040.pth" --wandb "" --proj_dir "out" \ # --data_file "../data/train.npy" --data_type "numpy" --vocab_size 50277 \ # --ctx_len 1024 --epoch_steps 200</s> ===========changed ref 3=========== # module: RWKV-v4neo.train # offset: 2 <s>_count 1000 --epoch_begin 0 --epoch_save 1 \ # --micro_bsz 11 --n_layer 24 --n_embd 2048 --pre_ffn 0 --head_qk 0 \ # --lr_init 1e-5 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.999 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision fp16 --strategy deepspeed_stage_2_offload --grad_cp 1 parser = ArgumentParser() parser.add_argument("--load_model", default="", type=str) # full path, with .pth parser.add_argument("--wandb", default="", type=str) # wandb project name. if "" then don't use wandb parser.add_argument("--proj_dir", default="out", type=str) parser.add_argument("--random_seed", default="-1", type=int) parser.add_argument("--data_file", default="", type=str) parser.add_argument("--data_type", default="utf-8", type=str) parser.add_argument("--vocab_size", default=0, type=int) # vocab_size = 0 means auto (for char-level LM and .txt data) parser.add_argument("--ctx_len", default=1024, type=int) parser.add_argument("--epoch_steps", default=1000, type=int) # a mini "epoch" has [epoch_steps] steps parser.add_argument("--epoch_count", default=500, type=int) # train for this many "epochs". will continue afterwards with lr = lr_final parser.add_argument("--epoch_begin", default=0, type=int) # if you load a model trained for x "epochs", set epoch_</s>
RWKV-v4neo.src.trainer/train_callback.on_train_epoch_start
Modified
BlinkDL~RWKV-LM
087a66accf9bd698d651b5609bfad42cfca0cd69
增加PyTorch Lightning 2.0支持
<1>:<add> if pl.__version__[0]=='2': <add> dataset = trainer.train_dataloader.dataset <add> else: <add> dataset = trainer.train_dataloader.dataset.datasets <del> dataset = trainer.train_dataloader.dataset.datasets
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_epoch_start(self, trainer, pl_module): <0> args = self.args <1> dataset = trainer.train_dataloader.dataset.datasets <2> assert "MyDataset" in str(dataset) <3> dataset.global_rank = trainer.global_rank <4> dataset.real_epoch = int(args.epoch_begin + trainer.current_epoch) <5> dataset.world_size = trainer.world_size <6>
===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: RWKV-v4neo.src.trainer.train_callback.on_train_batch_end args = self.args to_save_dict = pl_module.state_dict() ===========changed ref 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): args = self.args if trainer.is_global_zero: # logging t_now = time.time_ns() token_per_step = args.ctx_len * args.real_bsz real_step = trainer.global_step + args.epoch_begin * args.epoch_steps kt_s = 0 try: t_cost = (t_now - trainer.my_time_ns) / 1e9 kt_s = token_per_step / t_cost / 1000 self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) self.log("Kt/s", kt_s, prog_bar=True, on_step=True) except: pass trainer.my_time_ns = t_now + if pl.__version__[0]=='2': + trainer.my_loss = outputs["loss"] + else: + trainer.my_loss = trainer.my_loss_all.float().mean().item() - trainer.my_loss = trainer.my_loss_all.float().mean().item() trainer.my_loss_sum += trainer.my_loss trainer.my_loss_count += 1 trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) # self.log("s", real_step, prog_bar=True, on_step=True) if len(args.wandb) > 0: lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # offset: 1 <s> 0: lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.my_wd, "Gtokens": real_step * token_per_step / 1e9} if kt_s > 0: lll["kt/s"] = kt_s trainer.my_wandb.log(lll, step=int(real_step)) if args.magic_prime > 0: expand_factor = 2 if args.my_qa_mask > 0 else 1 if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1 + int(args.my_random_steps): to_save_dict = pl_module.state_dict() my_save( to_save_dict, f"{args.proj_dir}/rwkv-final.pth", ) ===========changed ref 2=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def training_step_end(self, batch_parts): + if pl.__version__[0]!='2': + all = self.all_gather(batch_parts) - all = self.all_gather(batch_parts) + if self.trainer.is_global_zero: - if self.trainer.is_global_zero: + self.trainer.my_loss_all = all - self.trainer.my_loss_all = all ===========changed ref 3=========== # module: RWKV-v4neo.train logging.basicConfig(level=logging.INFO) if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only + import pytorch_lightning as pl rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1</s>
RWKV-v4neo.src.dataset/MyDataset.__init__
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
# module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): <0> self.args = args <1> <2> if args.data_type == "binidx": <3> self.vocab_size = args.vocab_size <4> rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") <5> <6> if args.my_pile_version == 1: <7> self.data = MMapIndexedDataset(args.data_file) <8> self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size <9> rank_zero_info(f"Data has {self.data_size} tokens.") <10> elif args.my_pile_version == 2: <11> data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') <12> data_list = [i.strip().split(' ') for i in data_list] <13> self.data = [] <14> self.data_size = int(data_list[-1][-1]) <15> rank_zero_info(f"Data has {self.data_size} chunks.") <16> for d in data_list: <17> data = MMapIndexedDataset(d[0]) <18> data_size = len(data._bin_buffer) // data._index._dtype_size <19> assert (data_size - args.ctx_len) == int(d[1]) <20> self.data += [[int(d[-1]), int(d[1]), data]] <21> # rank_zero_info(self.data) <22> <23> if args.my_qa_mask > 0: <24> # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') <25> self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') <26> </s>
===========below chunk 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 else: self.data_pile = None self.data_pile_size = 0 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s>] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for ===========unchanged ref 0=========== at: RWKV-v4neo.src.binidx MMapIndexedDataset(path, skip_warmup=False) at: RWKV-v4neo.src.binidx.MMapIndexedDataset.Index.__init__ self._dtype_size = self._dtype().itemsize at: RWKV-v4neo.src.binidx.MMapIndexedDataset.__init__ self._index = None self._bin_buffer = None at: RWKV-v4neo.src.binidx.MMapIndexedDataset._do_init self._index = self.Index(index_file_path(self._path), skip_warmup) self._bin_buffer = memoryview(self._bin_buffer_mmap) at: RWKV-v4neo.src.utils MaybeIsPrime(number) at: io.BufferedRandom read(self, size: Optional[int]=..., /) -> bytes at: typing.IO __slots__ = () read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: RWKV-v4neo.train logging.basicConfig(level=logging.INFO) if __name__ == "__main__": from argparse import ArgumentParser from pytorch_lightning import Trainer from pytorch_lightning.utilities import rank_zero_info, rank_zero_only import pytorch_lightning as pl rank_zero_info("########## work in progress ##########") ######################################################################################################## # # example: train a simple L12-D768 RWKV on dummy data # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "" --data_type "dummy" --vocab_size 0 \ # --ctx_len 128 --epoch_steps 1000 --epoch_count 20 --epoch_begin 0 --epoch_save 10 \ # --micro_bsz 16 --n_layer 12 --n_embd 768 --pre_ffn 0 --head_qk 0 \ # --lr_init 6e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e-8 \ # --accelerator gpu --devices 1 --precision bf16 --strategy ddp_find_unused_parameters_false --grad_cp 0 # example: train a simple L6-D512 RWKV from scratch on enwik8 # # python train.py --load_model "" --wandb "" --proj_dir "out" \ # --data_file "../data/enwik8" --data_type "utf-8" --vocab_size 0 \ # --ctx_len 512 --epoch_steps 5000 --epoch_count 500 --epoch_begin 0 --epoch_save 5 \ # --micro_bsz 12 --n_layer 6 --n_embd 512 --pre_ffn 0 --head_qk 0 \ # --lr_init 8e-4 --lr_final 1e-5 --warmup_steps 0 --beta1 0.9 --beta2 0.99 --adam_eps 1e</s>
RWKV-v4neo.src.trainer/my_save
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<12>:<add> if 'deepspeed_stage_3' in args.strategy: <add> trainer.save_checkpoint(ff, weights_only=True) <add> else: <add> torch.save(dd, ff) <del> torch.save(dd, ff)
# module: RWKV-v4neo.src.trainer + def my_save(args, trainer, dd, ff): - def my_save(dd, ff): <0> if '14b-run1' in ff: <1> fn = ff.split('/')[-1] <2> fff = '/dev/shm/' + fn <3> torch.save(dd, fff) <4> subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) <5> elif ('world/14b' in ff) or ('world/7b' in ff): <6> aa = ff.split('/')[1] <7> fn = ff.split('/')[-1] <8> fff = f'/dev/shm/{aa}-{fn}' <9> torch.save(dd, fff) <10> subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) <11> else: <12> torch.save(dd, ff) <13>
===========unchanged ref 0=========== at: subprocess Popen() at: torch.serialization save(obj: object, f: FILE_LIKE, pickle_module: Any=pickle, pickle_protocol: int=DEFAULT_PROTOCOL, _use_new_zipfile_serialization: bool=True, _disable_byteorder_record: bool=False) -> None ===========changed ref 0=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): self.args = args if args.data_type == "binidx": self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") if args.my_pile_version == 1: self.data = MMapIndexedDataset(args.data_file) self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size rank_zero_info(f"Data has {self.data_size} tokens.") elif args.my_pile_version == 2: data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') data_list = [i.strip().split(' ') for i in data_list] self.data = [] self.data_size = int(data_list[-1][-1]) rank_zero_info(f"Data has {self.data_size} chunks.") for d in data_list: data = MMapIndexedDataset(d[0]) data_size = len(data._bin_buffer) // data._index._dtype_size assert (data_size - args.ctx_len) == int(d[1]) self.data += [[int(d[-1]), int(d[1]), data]] # rank_zero_info(self.data) if args.my_qa_mask > 0: # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 <s>') self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._index._dtype_size else: self.data_pile = None self.data_pile_size = 0 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s>(f"Current vocab size = {self.vocab_size} (make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") elif args.data_type == "wds_img": self.vocab_size = -1 self.data_size = -1 self.data = None self.error_count = 0 else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 + with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-8") as vocab_file: - with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-16le") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size</s> ===========changed ref 3=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 3 <s>(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for i, ch in enumerate(unique)}
RWKV-v4neo.src.trainer/train_callback.on_train_batch_start
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): <0> args = self.args <1> # if args.cuda_cleanup > 0: <2> # torch.cuda.empty_cache() <3> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <4> <5> # LR schedule <6> w_step = args.warmup_steps <7> if args.lr_final == args.lr_init or args.epoch_count == 0: <8> lr = args.lr_init <9> if trainer.global_step < w_step: <10> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <11> else: <12> decay_step = real_step - args.my_pile_edecay * args.epoch_steps <13> decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps <14> progress = (decay_step - w_step + 1) / (decay_total - w_step) <15> progress = min(1, max(0, progress)) <16> <17> if args.lr_final == 0 or args.lr_init == 0: # linear decay <18> lr = args.lr_init + (args.lr_final - args.lr_init) * progress <19> else: # exp decay <20> lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) <21> <22> if trainer.global_step < w_step: <23> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <24> # if trainer.is_global_zero: <25> # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) <26> <27> if args.my_exit_tokens != 0: # cosine decay</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 1 lr = args.lr_init * (0.2 + 0.8 * trainer.global_step / w_step) else: real_tokens = real_step * args.ctx_len * args.real_bsz warmup_tokens = w_step * args.ctx_len * args.real_bsz progress = (real_tokens - warmup_tokens) / (abs(args.my_exit_tokens) - warmup_tokens) progress = max(0, min(1, progress)) lr_final_factor = args.lr_final / args.lr_init lr_mult = (0.5 + lr_final_factor / 2) + (0.5 - lr_final_factor / 2) * math.cos(math.pi * progress) if args.my_exit_tokens > 0: lr = args.lr_init * lr_mult else: lr = (lr + args.lr_init * lr_mult) / 2 if progress >= 1: my_save( pl_module.state_dict(), f"{args.proj_dir}/rwkv-final.pth", ) exit(0) if args.weight_decay_final > 0: wd_now = args.weight_decay * math.exp(math.log(args.weight_decay_final / args.weight_decay) * progress) else: wd_now = args.weight_decay for param_group in trainer.optimizers[0].param_groups: if param_group["weight_decay"] > 0: param_group["weight_decay"] = wd_now if args.layerwise_lr > 0: param_group["lr"] = lr * param_group["my_lr_scale"] # print(param_group["lr"], param_group["my_lr_scale"])</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 2 <s>["my_lr_scale"] # print(param_group["lr"], param_group["my_lr_scale"]) else: param_group["lr"] = lr trainer.my_lr = lr trainer.my_wd = wd_now # rank_zero_info(f"{real_step} {lr}") if trainer.global_step == 0: if trainer.is_global_zero: # logging trainer.my_loss_sum = 0 trainer.my_loss_count = 0 trainer.my_log = open(args.proj_dir + "/train_log.txt", "a") trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") try: print(f"\n{trainer.strategy.config}\n") trainer.my_log.write(f"{trainer.strategy.config}\n") except: pass trainer.my_log.flush() if len(args.wandb) > 0: print("Login to wandb...") import wandb wandb.init( project=args.wandb, name=args.run_name + " " + args.my_timestamp, config=args, save_code=False, ) trainer.my_wandb = wandb ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer my_save(args, trainer, dd, ff) at: math pi: float cos(x: SupportsFloat, /) -> float exp(x: SupportsFloat, /) -> float log(x: SupportsFloat, base: SupportsFloat=...) -> float ===========changed ref 0=========== # module: RWKV-v4neo.src.trainer + def my_save(args, trainer, dd, ff): - def my_save(dd, ff): if '14b-run1' in ff: fn = ff.split('/')[-1] fff = '/dev/shm/' + fn torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) elif ('world/14b' in ff) or ('world/7b' in ff): aa = ff.split('/')[1] fn = ff.split('/')[-1] fff = f'/dev/shm/{aa}-{fn}' torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) else: + if 'deepspeed_stage_3' in args.strategy: + trainer.save_checkpoint(ff, weights_only=True) + else: + torch.save(dd, ff) - torch.save(dd, ff) ===========changed ref 1=========== # module: RWKV-v4neo.src.dataset class MyDataset(Dataset): def __init__(self, args): self.args = args if args.data_type == "binidx": self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") if args.my_pile_version == 1: self.data = MMapIndexedDataset(args.data_file) self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size rank_zero_info(f"Data has {self.data_size} tokens.") elif args.my_pile_version == 2: data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') data_list = [i.strip().split(' ') for i in data_list] self.data = [] self.data_size = int(data_list[-1][-1]) rank_zero_info(f"Data has {self.data_size} chunks.") for d in data_list: data = MMapIndexedDataset(d[0]) data_size = len(data._bin_buffer) // data._index._dtype_size assert (data_size - args.ctx_len) == int(d[1]) self.data += [[int(d[-1]), int(d[1]), data]] # rank_zero_info(self.data) if args.my_qa_mask > 0: # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') self.data_pile_size = len(self.data_pile._bin_buffer) // self.data._</s>
RWKV-v4neo.src.trainer/train_callback.on_train_batch_end
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<1>:<add> token_per_step = args.ctx_len * args.real_bsz <add> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <3>:<del> token_per_step = args.ctx_len * args.real_bsz <4>:<del> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): <0> args = self.args <1> if trainer.is_global_zero: # logging <2> t_now = time.time_ns() <3> token_per_step = args.ctx_len * args.real_bsz <4> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <5> kt_s = 0 <6> try: <7> t_cost = (t_now - trainer.my_time_ns) / 1e9 <8> kt_s = token_per_step / t_cost / 1000 <9> self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) <10> self.log("Kt/s", kt_s, prog_bar=True, on_step=True) <11> except: <12> pass <13> trainer.my_time_ns = t_now <14> if pl.__version__[0]=='2': <15> trainer.my_loss = outputs["loss"] <16> else: <17> trainer.my_loss = trainer.my_loss_all.float().mean().item() <18> trainer.my_loss_sum += trainer.my_loss <19> trainer.my_loss_count += 1 <20> trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count <21> self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) <22> self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) <23> # self.log("s", real_step, prog_bar=True, on_step=True) <24> <25> if len(args.wandb) > 0: <26> lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # offset: 1 if kt_s > 0: lll["kt/s"] = kt_s trainer.my_wandb.log(lll, step=int(real_step)) if args.magic_prime > 0: expand_factor = 2 if args.my_qa_mask > 0 else 1 if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1 + int(args.my_random_steps): to_save_dict = pl_module.state_dict() my_save( to_save_dict, f"{args.proj_dir}/rwkv-final.pth", ) ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: RWKV-v4neo.src.trainer.train_callback.on_train_batch_start args = self.args at: time time_ns() -> int ===========changed ref 0=========== # module: RWKV-v4neo.src.trainer + def my_save(args, trainer, dd, ff): - def my_save(dd, ff): if '14b-run1' in ff: fn = ff.split('/')[-1] fff = '/dev/shm/' + fn torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) elif ('world/14b' in ff) or ('world/7b' in ff): aa = ff.split('/')[1] fn = ff.split('/')[-1] fff = f'/dev/shm/{aa}-{fn}' torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) else: + if 'deepspeed_stage_3' in args.strategy: + trainer.save_checkpoint(ff, weights_only=True) + else: + torch.save(dd, ff) - torch.save(dd, ff) ===========changed ref 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): args = self.args # if args.cuda_cleanup > 0: # torch.cuda.empty_cache() real_step = trainer.global_step + args.epoch_begin * args.epoch_steps # LR schedule w_step = args.warmup_steps if args.lr_final == args.lr_init or args.epoch_count == 0: lr = args.lr_init if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) else: decay_step = real_step - args.my_pile_edecay * args.epoch_steps decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps progress = (decay_step - w_step + 1) / (decay_total - w_step) progress = min(1, max(0, progress)) if args.lr_final == 0 or args.lr_init == 0: # linear decay lr = args.lr_init + (args.lr_final - args.lr_init) * progress else: # exp decay lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) # if trainer.is_global_zero: # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) if args.my_exit_tokens != 0: # cosine decay if trainer.global_step < w_step: lr = args.lr_init * (0.2 +</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 1 <s> cosine decay if trainer.global_step < w_step: lr = args.lr_init * (0.2 + 0.8 * trainer.global_step / w_step) else: real_tokens = real_step * args.ctx_len * args.real_bsz warmup_tokens = w_step * args.ctx_len * args.real_bsz progress = (real_tokens - warmup_tokens) / (abs(args.my_exit_tokens) - warmup_tokens) progress = max(0, min(1, progress)) lr_final_factor = args.lr_final / args.lr_init lr_mult = (0.5 + lr_final_factor / 2) + (0.5 - lr_final_factor / 2) * math.cos(math.pi * progress) if args.my_exit_tokens > 0: lr = args.lr_init * lr_mult else: lr = (lr + args.lr_init * lr_mult) / 2 if progress >= 1: + if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy): + my_save( - my_save( + args, trainer, + pl_module.state_dict(), - pl_module.state_dict(), + f"{args.proj_dir}/rwkv-final.pth", - f"{args.proj_dir}/rwkv-final.pth", + ) - ) + exit(0) - exit(0) if args.weight_decay_final > 0: wd_now = args.weight_decay * math.exp(math</s>
RWKV-v4neo.src.trainer/train_callback.on_train_epoch_end
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<1>:<add> to_save_dict = {} <add> if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy): # save pth <del> if trainer.is_global_zero: # logging & save state_dict <5>:<del> to_save_dict = {} <13>:<add> args, trainer, <18>:<add> <add> if trainer.is_global_zero: # logging
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): # print(f'########## world_size {dataset.world_size} global_rank {dataset.global_rank} real_epoch {dataset.real_epoch} ##########') def on_train_epoch_end(self, trainer, pl_module): <0> args = self.args <1> if trainer.is_global_zero: # logging & save state_dict <2> if (args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0) or (trainer.current_epoch == args.epoch_count - 1): <3> if args.data_type == 'wds_img': <4> raw_dict = pl_module.state_dict() <5> to_save_dict = {} <6> for k in raw_dict: <7> if k.startswith('encoder.') or k.startswith('decoder.'): <8> to_save_dict[k] = raw_dict[k] <9> else: <10> to_save_dict = pl_module.state_dict() <11> try: <12> my_save( <13> to_save_dict, <14> f"{args.proj_dir}/rwkv-{args.epoch_begin + trainer.current_epoch}.pth", <15> ) <16> except Exception as e: <17> print('Error\n\n', e, '\n\n') <18> trainer.my_log.write(f"{args.epoch_begin + trainer.current_epoch} {trainer.my_epoch_loss:.6f} {math.exp(trainer.my_epoch_loss):.4f} {trainer.my_lr:.8f} {datetime.datetime.now()} {trainer.current_epoch}\n") <19> trainer.my_log.flush() <20> <21> trainer.my_loss_sum = 0 <22> trainer.my_loss_count = 0 <23> if (args.epoch_begin + trainer.current_epoch) >= args.my_exit: <24> exit(0) <25>
===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer my_save(args, trainer, dd, ff) at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: RWKV-v4neo.src.trainer.train_callback.on_train_epoch_start args = self.args ===========changed ref 0=========== # module: RWKV-v4neo.src.trainer + def my_save(args, trainer, dd, ff): - def my_save(dd, ff): if '14b-run1' in ff: fn = ff.split('/')[-1] fff = '/dev/shm/' + fn torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) elif ('world/14b' in ff) or ('world/7b' in ff): aa = ff.split('/')[1] fn = ff.split('/')[-1] fff = f'/dev/shm/{aa}-{fn}' torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) else: + if 'deepspeed_stage_3' in args.strategy: + trainer.save_checkpoint(ff, weights_only=True) + else: + torch.save(dd, ff) - torch.save(dd, ff) ===========changed ref 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): args = self.args + token_per_step = args.ctx_len * args.real_bsz + real_step = trainer.global_step + args.epoch_begin * args.epoch_steps if trainer.is_global_zero: # logging t_now = time.time_ns() - token_per_step = args.ctx_len * args.real_bsz - real_step = trainer.global_step + args.epoch_begin * args.epoch_steps kt_s = 0 try: t_cost = (t_now - trainer.my_time_ns) / 1e9 kt_s = token_per_step / t_cost / 1000 self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) self.log("Kt/s", kt_s, prog_bar=True, on_step=True) except: pass trainer.my_time_ns = t_now if pl.__version__[0]=='2': trainer.my_loss = outputs["loss"] else: trainer.my_loss = trainer.my_loss_all.float().mean().item() trainer.my_loss_sum += trainer.my_loss trainer.my_loss_count += 1 trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) # self.log("s", real_step, prog_bar=True, on_step=True) if len(args.wandb) > 0: lll = {"loss":</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # offset: 1 <s>, on_step=True) if len(args.wandb) > 0: lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.my_wd, "Gtokens": real_step * token_per_step / 1e9} if kt_s > 0: lll["kt/s"] = kt_s trainer.my_wandb.log(lll, step=int(real_step)) + if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy): # save pth if args.magic_prime > 0: expand_factor = 2 if args.my_qa_mask > 0 else 1 if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1 + int(args.my_random_steps): to_save_dict = pl_module.state_dict() my_save( + args, trainer, to_save_dict, f"{args.proj_dir}/rwkv-final.pth", ) ===========changed ref 3=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): args = self.args # if args.cuda_cleanup > 0: # torch.cuda.empty_cache() real_step = trainer.global_step + args.epoch_begin * args.epoch_steps # LR schedule w_step = args.warmup_steps if args.lr_final == args.lr_init or args.epoch_count == 0: lr = args.lr_init if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) else: decay_step = real_step - args.my_pile_edecay * args.epoch_steps decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps progress = (decay_step - w_step + 1) / (decay_total - w_step) progress = min(1, max(0, progress)) if args.lr_final == 0 or args.lr_init == 0: # linear decay lr = args.lr_init + (args.lr_final - args.lr_init) * progress else: # exp decay lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) # if trainer.is_global_zero: # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) if args.my_exit_tokens != 0: # cosine decay if trainer.global_step < w_step: lr = args.lr_init * (0.2 +</s>
RWKV-v4neo.src.model/RUN_CUDA
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<0>:<add> return WKV_5.apply(B, T, C, H, r, k, v, w, u) <del> return WKV.apply(B, T, C, w, u, k, v)
# module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, H, r, k, v, w, u): - def RUN_CUDA(B, T, C, w, u, k, v): <0> return WKV.apply(B, T, C, w, u, k, v) <1>
===========changed ref 0=========== # module: RWKV-v4neo.src.trainer + def my_save(args, trainer, dd, ff): - def my_save(dd, ff): if '14b-run1' in ff: fn = ff.split('/')[-1] fff = '/dev/shm/' + fn torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) elif ('world/14b' in ff) or ('world/7b' in ff): aa = ff.split('/')[1] fn = ff.split('/')[-1] fff = f'/dev/shm/{aa}-{fn}' torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) else: + if 'deepspeed_stage_3' in args.strategy: + trainer.save_checkpoint(ff, weights_only=True) + else: + torch.save(dd, ff) - torch.save(dd, ff) ===========changed ref 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): # print(f'########## world_size {dataset.world_size} global_rank {dataset.global_rank} real_epoch {dataset.real_epoch} ##########') def on_train_epoch_end(self, trainer, pl_module): args = self.args + to_save_dict = {} + if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy): # save pth - if trainer.is_global_zero: # logging & save state_dict if (args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0) or (trainer.current_epoch == args.epoch_count - 1): if args.data_type == 'wds_img': raw_dict = pl_module.state_dict() - to_save_dict = {} for k in raw_dict: if k.startswith('encoder.') or k.startswith('decoder.'): to_save_dict[k] = raw_dict[k] else: to_save_dict = pl_module.state_dict() try: my_save( + args, trainer, to_save_dict, f"{args.proj_dir}/rwkv-{args.epoch_begin + trainer.current_epoch}.pth", ) except Exception as e: print('Error\n\n', e, '\n\n') + + if trainer.is_global_zero: # logging trainer.my_log.write(f"{args.epoch_begin + trainer.current_epoch} {trainer.my_epoch_loss:.6f} {math.exp(trainer.my_epoch_loss):.4f} {trainer.my_lr:.8f} {datetime.datetime.now()} {trainer.current_epoch}\n") trainer.my_log.flush() trainer.my_loss_sum = 0 trainer</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): # print(f'########## world_size {dataset.world_size} global_rank {dataset.global_rank} real_epoch {dataset.real_epoch} ##########') def on_train_epoch_end(self, trainer, pl_module): # offset: 1 <s>_epoch}\n") trainer.my_log.flush() trainer.my_loss_sum = 0 trainer.my_loss_count = 0 if (args.epoch_begin + trainer.current_epoch) >= args.my_exit: exit(0) ===========changed ref 3=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): args = self.args + token_per_step = args.ctx_len * args.real_bsz + real_step = trainer.global_step + args.epoch_begin * args.epoch_steps if trainer.is_global_zero: # logging t_now = time.time_ns() - token_per_step = args.ctx_len * args.real_bsz - real_step = trainer.global_step + args.epoch_begin * args.epoch_steps kt_s = 0 try: t_cost = (t_now - trainer.my_time_ns) / 1e9 kt_s = token_per_step / t_cost / 1000 self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) self.log("Kt/s", kt_s, prog_bar=True, on_step=True) except: pass trainer.my_time_ns = t_now if pl.__version__[0]=='2': trainer.my_loss = outputs["loss"] else: trainer.my_loss = trainer.my_loss_all.float().mean().item() trainer.my_loss_sum += trainer.my_loss trainer.my_loss_count += 1 trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) # self.log("s", real_step, prog_bar=True, on_step=True) if len(args.wandb) > 0: lll = {"loss":</s> ===========changed ref 4=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): # offset: 1 <s>, on_step=True) if len(args.wandb) > 0: lll = {"loss": trainer.my_loss, "lr": trainer.my_lr, "wd": trainer.my_wd, "Gtokens": real_step * token_per_step / 1e9} if kt_s > 0: lll["kt/s"] = kt_s trainer.my_wandb.log(lll, step=int(real_step)) + if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy): # save pth if args.magic_prime > 0: expand_factor = 2 if args.my_qa_mask > 0 else 1 if int(real_step) == int(args.magic_prime * expand_factor // args.real_bsz) - 1 + int(args.my_random_steps): to_save_dict = pl_module.state_dict() my_save( + args, trainer, to_save_dict, f"{args.proj_dir}/rwkv-final.pth", )
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.__init__
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<3>:<del> self.ctx_len = args.ctx_len <4>:<del> self.n_embd = args.n_embd <7>:<add> self.n_head = args.dim_att // self.head_size <del> self.n_head = self.n_embd // self.head_size <8>:<add> assert args.dim_att % self.n_head == 0 <del> assert self.n_embd % self.n_head == 0 <9>:<add> self.head_size_divisor = 8 <11>:<add> assert args.ctx_len % self.chunk_len == 0 <del> assert self.ctx_len % self.chunk_len == 0 <25>:<add> if 'r3' in os.environ["RWKV_MY_TESTING"]: <add> self.time_mix_g = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <add> self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) <add> <28>:<add> decay_speed[h] = -6 + 5 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) <del> decay_speed[h] = -8 + 7 * (h / (self.
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> self.ctx_len = args.ctx_len <4> self.n_embd = args.n_embd <5> <6> self.head_size = 64 <7> self.n_head = self.n_embd // self.head_size <8> assert self.n_embd % self.n_head == 0 <9> <10> self.chunk_len = 512 <11> assert self.ctx_len % self.chunk_len == 0 <12> <13> with torch.no_grad(): <14> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <15> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <16> ddd = torch.ones(1, 1, args.n_embd) <17> for i in range(args.n_embd): <18> ddd[0, 0, i] = i / args.n_embd <19> <20> # fancy time_mix <21> self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) <22> self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) <23> self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <24> <25> # fancy time_decay <26> decay_speed = torch.ones(self.n_head) <27> for h in range(self.n_head): <28> decay_speed[h] = -8 + 7 * (h / (self.</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): # offset: 1 self.time_decay = nn.Parameter(decay_speed) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) if 'r2' in os.environ["RWKV_MY_TESTING"]: self.time_faaaa = nn.Parameter(torch.ones(self.n_head) * 0.05) else: self.time_first = nn.Parameter(torch.ones(self.n_head) * (-3.0)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) ===========changed ref 0=========== # module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, H, r, k, v, w, u): - def RUN_CUDA(B, T, C, w, u, k, v): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) - return WKV.apply(B, T, C, w, u, k, v) ===========changed ref 1=========== # module: RWKV-v4neo.src.trainer + def my_save(args, trainer, dd, ff): - def my_save(dd, ff): if '14b-run1' in ff: fn = ff.split('/')[-1] fff = '/dev/shm/' + fn torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) elif ('world/14b' in ff) or ('world/7b' in ff): aa = ff.split('/')[1] fn = ff.split('/')[-1] fff = f'/dev/shm/{aa}-{fn}' torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) else: + if 'deepspeed_stage_3' in args.strategy: + trainer.save_checkpoint(ff, weights_only=True) + else: + torch.save(dd, ff) - torch.save(dd, ff) ===========changed ref 2=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): # print(f'########## world_size {dataset.world_size} global_rank {dataset.global_rank} real_epoch {dataset.real_epoch} ##########') def on_train_epoch_end(self, trainer, pl_module): args = self.args + to_save_dict = {} + if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy): # save pth - if trainer.is_global_zero: # logging & save state_dict if (args.epoch_save > 0 and trainer.current_epoch % args.epoch_save == 0) or (trainer.current_epoch == args.epoch_count - 1): if args.data_type == 'wds_img': raw_dict = pl_module.state_dict() - to_save_dict = {} for k in raw_dict: if k.startswith('encoder.') or k.startswith('decoder.'): to_save_dict[k] = raw_dict[k] else: to_save_dict = pl_module.state_dict() try: my_save( + args, trainer, to_save_dict, f"{args.proj_dir}/rwkv-{args.epoch_begin + trainer.current_epoch}.pth", ) except Exception as e: print('Error\n\n', e, '\n\n') + + if trainer.is_global_zero: # logging trainer.my_log.write(f"{args.epoch_begin + trainer.current_epoch} {trainer.my_epoch_loss:.6f} {math.exp(trainer.my_epoch_loss):.4f} {trainer.my_lr:.8f} {datetime.datetime.now()} {trainer.current_epoch}\n") trainer.my_log.flush() trainer.my_loss_sum = 0 trainer</s> ===========changed ref 3=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): # print(f'########## world_size {dataset.world_size} global_rank {dataset.global_rank} real_epoch {dataset.real_epoch} ##########') def on_train_epoch_end(self, trainer, pl_module): # offset: 1 <s>_epoch}\n") trainer.my_log.flush() trainer.my_loss_sum = 0 trainer.my_loss_count = 0 if (args.epoch_begin + trainer.current_epoch) >= args.my_exit: exit(0) ===========changed ref 4=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): args = self.args + token_per_step = args.ctx_len * args.real_bsz + real_step = trainer.global_step + args.epoch_begin * args.epoch_steps if trainer.is_global_zero: # logging t_now = time.time_ns() - token_per_step = args.ctx_len * args.real_bsz - real_step = trainer.global_step + args.epoch_begin * args.epoch_steps kt_s = 0 try: t_cost = (t_now - trainer.my_time_ns) / 1e9 kt_s = token_per_step / t_cost / 1000 self.log("REAL it/s", 1.0 / t_cost, prog_bar=True, on_step=True) self.log("Kt/s", kt_s, prog_bar=True, on_step=True) except: pass trainer.my_time_ns = t_now if pl.__version__[0]=='2': trainer.my_loss = outputs["loss"] else: trainer.my_loss = trainer.my_loss_all.float().mean().item() trainer.my_loss_sum += trainer.my_loss trainer.my_loss_count += 1 trainer.my_epoch_loss = trainer.my_loss_sum / trainer.my_loss_count self.log("lr", trainer.my_lr, prog_bar=True, on_step=True) self.log("loss", trainer.my_epoch_loss, prog_bar=True, on_step=True) # self.log("s", real_step, prog_bar=True, on_step=True) if len(args.wandb) > 0: lll = {"loss":</s>
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.jit_func_2
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<0>:<add> B, H, TT, S = r.size() <del> B, H, TT, S = r.size() <1>:<add> T = self.chunk_len <del> T = self.chunk_len <3>:<add> s = torch.zeros(B, H, S, S, device=r.device, dtype=r.dtype) # state <del> s = torch.zeros(B, H, S, S, device=r.device, dtype=r.dtype) # state <4>:<add> x = torch.zeros(B, H, TT, S, device=r.device, dtype=r.dtype) # output <del> x = torch.zeros(B, H, TT, S, device=r.device, dtype=r.dtype) # output <6>:<del> ################################################################################ <7>:<del> ######## <8>:<add> for i in range(TT // T): <del> for i in range(TT // T): <9>:<add> rr = r[:, :, i*T:i*T+T, :] <del> rr = r[:, :, i*T:i*T+T, :] <10>:<add> kk = k[:, :, :, i*T:i*T+T] <del> kk = k[:, :, :, i*T:i*T+T] <11>:<add> vv = v[:, :, i*T:i*T+T, :] <del> vv = v[:, :, i*T:i*T+T, :] <13>:<add> x[:, :, i*T:i*T+T, :] = ((rr @ kk) * w) @ vv + (rr @ s) * wb <del> x[:, :, i*T:i*T+T, :] = ((rr @ kk) * w) @ vv + (rr @ s) * wb <15>:<add> s = ws * s + (kk * wk) @ vv <del> s = ws * s + (kk * wk) @ vv <16>:<del> ######## <17>:<del> ################################################################################
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): + @MyFunction - @MyFunction + def jit_func_2(self, r, k, v, w, wk, wb, ws): - def jit_func_2(self, r, k, v, w, wk, wb, ws): <0> B, H, TT, S = r.size() <1> T = self.chunk_len <2> <3> s = torch.zeros(B, H, S, S, device=r.device, dtype=r.dtype) # state <4> x = torch.zeros(B, H, TT, S, device=r.device, dtype=r.dtype) # output <5> <6> ################################################################################ <7> ######## <8> for i in range(TT // T): <9> rr = r[:, :, i*T:i*T+T, :] <10> kk = k[:, :, :, i*T:i*T+T] <11> vv = v[:, :, i*T:i*T+T, :] <12> <13> x[:, :, i*T:i*T+T, :] = ((rr @ kk) * w) @ vv + (rr @ s) * wb <14> <15> s = ws * s + (kk * wk) @ vv <16> ######## <17> ################################################################################ <18> <19> x = x.transpose(1, 2).contiguous().view(B * TT, H*S) # BHTS -> BTHS -> BTC <20> x = self.ln_x(x).view(B, TT, H*S) <21> return self.output(x) <22>
===========changed ref 0=========== # module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, H, r, k, v, w, u): - def RUN_CUDA(B, T, C, w, u, k, v): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) - return WKV.apply(B, T, C, w, u, k, v) ===========changed ref 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id - self.ctx_len = args.ctx_len - self.n_embd = args.n_embd self.head_size = 64 + self.n_head = args.dim_att // self.head_size - self.n_head = self.n_embd // self.head_size + assert args.dim_att % self.n_head == 0 - assert self.n_embd % self.n_head == 0 + self.head_size_divisor = 8 self.chunk_len = 512 + assert args.ctx_len % self.chunk_len == 0 - assert self.ctx_len % self.chunk_len == 0 with torch.no_grad(): ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd # fancy time_mix self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) + if 'r3' in os.environ["</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): # offset: 1 <s>d, 0.5 * ratio_1_to_almost0)) + if 'r3' in os.environ["RWKV_MY_TESTING"]: + self.time_mix_g = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) + self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) + # fancy time_decay decay_speed = torch.ones(self.n_head) for h in range(self.n_head): + decay_speed[h] = -6 + 5 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) - decay_speed[h] = -8 + 7 * (h / (self.n_head - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time_decay = nn.Parameter(decay_speed) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) if 'r2' in os.environ["RWKV_MY_TESTING"]: + tmp = torch.zeros(self.n_head) + for h in range(self.n_head): + tmp[h] = ratio_0_to_1 * (1 - (h / (self.n_head - 1))) + self.time_faaaa = nn.Parameter(tmp) - self.time_faaaa = nn.Parameter(torch.ones(self.n_head) * 0.05) </s> ===========changed ref 3=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): # offset: 2 <s> self.time_first = nn.Parameter(torch.ones(self.n_head) * (-3.0)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) + self.ln_x = nn.GroupNorm(self.n_head, args.dim_att) - self.ln_x = nn.GroupNorm(self.n_head, self.n_embd) ===========changed ref 4=========== # module: RWKV-v4neo.src.trainer + def my_save(args, trainer, dd, ff): - def my_save(dd, ff): if '14b-run1' in ff: fn = ff.split('/')[-1] fff = '/dev/shm/' + fn torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) elif ('world/14b' in ff) or ('world/7b' in ff): aa = ff.split('/')[1] fn = ff.split('/')[-1] fff = f'/dev/shm/{aa}-{fn}' torch.save(dd, fff) subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) else: + if 'deepspeed_stage_3' in args.strategy: + trainer.save_checkpoint(ff, weights_only=True) + else: + torch.save(dd, ff) - torch.save(dd, ff)
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5_Preview.forward
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<3>:<add> if 'r3' in os.environ["RWKV_MY_TESTING"]: <add> r, k, v, g = self.jit_func(x) <add> else: <add> r, k, v = self.jit_func(x) <del> r, k, v = self.jit_func(x) <34>:<add> if 'r3' in os.environ["RWKV_MY_TESTING"]: <add> return self.jit_func_2(r, k, v, g, w, wk, wb, ws) <del> return self.jit_func_2(r, k, v, w
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def forward(self, x): <0> H = self.n_head <1> T = self.chunk_len <2> <3> r, k, v = self.jit_func(x) <4> <5> w = torch.exp(-torch.exp(self.time_decay.float())).unsqueeze(-1) <6> <7> if 'r2' in os.environ["RWKV_MY_TESTING"]: <8> u = self.time_faaaa.float().unsqueeze(-1) <9> else: <10> u = torch.exp(self.time_first.float()).unsqueeze(-1) <11> <12> ################################################################################ <13> ######## <14> ws = w.pow(T).reshape(1, H, 1, 1) <15> <16> ind = torch.arange(T-1, -1, -1, device=r.device).unsqueeze(0).repeat(H, 1) <17> w = w.repeat(1, T).pow(ind) <18> <19> wk = w.reshape(1, H, 1, T) <20> wb = wk.transpose(-2, -1).flip(2) <21> <22> w = torch.cat([w[:, 1:], u], dim=1) <23> w = F.pad(w, (0, T)) <24> w = torch.tile(w, [T]) <25> w = w[:, :-T].reshape(-1, T, 2 * T - 1) <26> w = w[:, :, T-1:].reshape(1, H, T, T) <27> ######## <28> ################################################################################ <29> <30> w = w.to(dtype=r.dtype) <31> wk = wk.to(dtype=r.dtype) <32> wb = wb.to(dtype=r.dtype) <33> ws = ws.to(dtype=r.dtype) <34> return self.jit_func_2(r, k, v, w</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def forward(self, x): # offset: 1 ===========changed ref 0=========== # module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, H, r, k, v, w, u): - def RUN_CUDA(B, T, C, w, u, k, v): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) - return WKV.apply(B, T, C, w, u, k, v) ===========changed ref 1=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): + @MyFunction - @MyFunction + def jit_func_2(self, r, k, v, w, wk, wb, ws): - def jit_func_2(self, r, k, v, w, wk, wb, ws): + B, H, TT, S = r.size() - B, H, TT, S = r.size() + T = self.chunk_len - T = self.chunk_len + s = torch.zeros(B, H, S, S, device=r.device, dtype=r.dtype) # state - s = torch.zeros(B, H, S, S, device=r.device, dtype=r.dtype) # state + x = torch.zeros(B, H, TT, S, device=r.device, dtype=r.dtype) # output - x = torch.zeros(B, H, TT, S, device=r.device, dtype=r.dtype) # output - ################################################################################ - ######## + for i in range(TT // T): - for i in range(TT // T): + rr = r[:, :, i*T:i*T+T, :] - rr = r[:, :, i*T:i*T+T, :] + kk = k[:, :, :, i*T:i*T+T] - kk = k[:, :, :, i*T:i*T+T] + vv = v[:, :, i*T:i*T+T, :] - vv = v[:, :, i*T:i*T+T, :] + x[:, :, i*T:i*T+T, :] = ((rr @ kk) * w) @ vv + (rr @ s) * wb - x[:, :, i*T:i*T+T, :] = ((rr @</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): + @MyFunction - @MyFunction + def jit_func_2(self, r, k, v, w, wk, wb, ws): - def jit_func_2(self, r, k, v, w, wk, wb, ws): # offset: 1 <s>rr @ s) * wb - x[:, :, i*T:i*T+T, :] = ((rr @ kk) * w) @ vv + (rr @ s) * wb + s = ws * s + (kk * wk) @ vv - s = ws * s + (kk * wk) @ vv - ######## - ################################################################################ + - + x = x.transpose(1, 2).contiguous().view(B * TT, H*S) # BHTS -> BTHS -> BTC - x = x.transpose(1, 2).contiguous().view(B * TT, H*S) # BHTS -> BTHS -> BTC + x = self.ln_x(x / self.head_size_divisor).view(B, TT, H*S) - x = self.ln_x(x).view(B, TT, H*S) + return self.output(x) - return self.output(x) ===========changed ref 3=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id - self.ctx_len = args.ctx_len - self.n_embd = args.n_embd self.head_size = 64 + self.n_head = args.dim_att // self.head_size - self.n_head = self.n_embd // self.head_size + assert args.dim_att % self.n_head == 0 - assert self.n_embd % self.n_head == 0 + self.head_size_divisor = 8 self.chunk_len = 512 + assert args.ctx_len % self.chunk_len == 0 - assert self.ctx_len % self.chunk_len == 0 with torch.no_grad(): ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd # fancy time_mix self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) + if 'r3' in os.environ["</s>
RWKV-v4neo.src.model/RWKV_ChannelMix.forward
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<4>:<add> k = torch.relu(k) ** 2 <del> k = torch.square(torch.relu(k))
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_ChannelMix(MyModule): @MyFunction def forward(self, x): <0> xx = self.time_shift(x) <1> xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) <2> xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) <3> k = self.key(xk) <4> k = torch.square(torch.relu(k)) <5> kv = self.value(k) <6> return torch.sigmoid(self.receptance(xr)) * kv <7>
===========changed ref 0=========== # module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, H, r, k, v, w, u): - def RUN_CUDA(B, T, C, w, u, k, v): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) - return WKV.apply(B, T, C, w, u, k, v) ===========changed ref 1=========== # module: RWKV-v4neo.src.model + + class WKV_5(torch.autograd.Function): + @staticmethod + def forward(ctx, B, T, C, H, r, k, v, w, u): + with torch.no_grad(): + assert HEAD_SIZE == C // H + assert r.dtype == torch.bfloat16 + assert k.dtype == torch.bfloat16 + assert v.dtype == torch.bfloat16 + assert w.dtype == torch.bfloat16 + assert u.dtype == torch.bfloat16 + ctx.B = B + ctx.T = T + ctx.C = C + ctx.H = H + r = r.contiguous() + k = k.contiguous() + v = v.contiguous() + w = w.float().contiguous() + u = u.contiguous() + ew = -torch.exp(w) + eew = torch.exp(ew) + ctx.save_for_backward(r, k, v, eew, ew, u) + y = torch.empty((B, T, C), device=w.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) + return y + ===========changed ref 2=========== # module: RWKV-v4neo.src.model + + class WKV_5(torch.autograd.Function): + @staticmethod + def backward(ctx, gy): + with torch.no_grad(): + B = ctx.B + T = ctx.T + C = ctx.C + H = ctx.H + gy = gy.contiguous() + assert gy.dtype == torch.bfloat16 + r, k, v, eew, ew, u = ctx.saved_tensors + gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.float, memory_format=torch.contiguous_format) + gu = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.float, memory_format=torch.contiguous_format) + + wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) + + gw = torch.sum(gw.view(B*T, H, C//H), 0) + gu = torch.sum(gu.view(B*T, H, C//H), 0) + return (None, None, None, None, gr, gk, gv, gw.bfloat16(), gu.bfloat16()) + ===========changed ref 3=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def forward(self, x): H = self.n_head T = self.chunk_len + if 'r3' in os.environ["RWKV_MY_TESTING"]: + r, k, v, g = self.jit_func(x) + else: + r, k, v = self.jit_func(x) - r, k, v = self.jit_func(x) w = torch.exp(-torch.exp(self.time_decay.float())).unsqueeze(-1) if 'r2' in os.environ["RWKV_MY_TESTING"]: u = self.time_faaaa.float().unsqueeze(-1) else: u = torch.exp(self.time_first.float()).unsqueeze(-1) ################################################################################ ######## ws = w.pow(T).reshape(1, H, 1, 1) ind = torch.arange(T-1, -1, -1, device=r.device).unsqueeze(0).repeat(H, 1) w = w.repeat(1, T).pow(ind) wk = w.reshape(1, H, 1, T) wb = wk.transpose(-2, -1).flip(2) w = torch.cat([w[:, 1:], u], dim=1) w = F.pad(w, (0, T)) w = torch.tile(w, [T]) w = w[:, :-T].reshape(-1, T, 2 * T - 1) w = w[:, :, T-1:].reshape(1, H, T, T) ######## ################################################################################ w = w.to(dtype=r.dtype) wk = wk.to(dtype=r.dtype) wb = wb.to(dtype=r.dtype) </s> ===========changed ref 4=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5_Preview(MyModule): def forward(self, x): # offset: 1 <s> wk = wk.to(dtype=r.dtype) wb = wb.to(dtype=r.dtype) ws = ws.to(dtype=r.dtype) + if 'r3' in os.environ["RWKV_MY_TESTING"]: + return self.jit_func_2(r, k, v, g, w, wk, wb, ws) - return self.jit_func_2(r, k, v, w, wk, wb, ws) + else: + return self.jit_func_2(r, k, v, w, wk, wb, ws)
RWKV-v4neo.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<16>:<add> if 'r4' in os.environ["RWKV_MY_TESTING"]: <del> if 'r' in os.environ["RWKV_MY_TESTING"]: <17>:<add> self.att = RWKV_TimeMix_RWKV5(args, layer_id) <add> elif 'r' in os.environ["RWKV_MY_TESTING"]:
# module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> if 'r' in os.environ["RWKV_MY_TESTING"]: <17> self.att = RWKV_TimeMix_RWKV5_Preview(args, layer_id) <18> else: <19> self.att = RWKV_TimeMix(args, layer_id) <20> <21> if 'g' in os.environ["RWKV_MY_TESTING"]: <22> self.ffn = MishGLU(args, layer_id) <23> else: <24> self.ffn = RWKV_ChannelMix(args, layer_id) <25> <26> if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: <27> self.tiny_ln = nn.LayerNorm(args.n_embd) <28> self.tiny_q = nn.Linear(</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout) ===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_ChannelMix(MyModule): @MyFunction def forward(self, x): xx = self.time_shift(x) xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) k = self.key(xk) + k = torch.relu(k) ** 2 - k = torch.square(torch.relu(k)) kv = self.value(k) return torch.sigmoid(self.receptance(xr)) * kv ===========changed ref 1=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr + xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) + xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) + xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) + xg = x * self.time_mix_g + xx * (1 - self.time_mix_g) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = F.silu(self.gate(xg)) + + return r, k, v, g + ===========changed ref 2=========== # module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, H, r, k, v, w, u): - def RUN_CUDA(B, T, C, w, u, k, v): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) - return WKV.apply(B, T, C, w, u, k, v) ===========changed ref 3=========== # module: RWKV-v4neo.src.model + + class WKV_5(torch.autograd.Function): + @staticmethod + def forward(ctx, B, T, C, H, r, k, v, w, u): + with torch.no_grad(): + assert HEAD_SIZE == C // H + assert r.dtype == torch.bfloat16 + assert k.dtype == torch.bfloat16 + assert v.dtype == torch.bfloat16 + assert w.dtype == torch.bfloat16 + assert u.dtype == torch.bfloat16 + ctx.B = B + ctx.T = T + ctx.C = C + ctx.H = H + r = r.contiguous() + k = k.contiguous() + v = v.contiguous() + w = w.float().contiguous() + u = u.contiguous() + ew = -torch.exp(w) + eew = torch.exp(ew) + ctx.save_for_backward(r, k, v, eew, ew, u) + y = torch.empty((B, T, C), device=w.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) + return y + ===========changed ref 4=========== # module: RWKV-v4neo.src.model + + class WKV_5(torch.autograd.Function): + @staticmethod + def backward(ctx, gy): + with torch.no_grad(): + B = ctx.B + T = ctx.T + C = ctx.C + H = ctx.H + gy = gy.contiguous() + assert gy.dtype == torch.bfloat16 + r, k, v, eew, ew, u = ctx.saved_tensors + gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.float, memory_format=torch.contiguous_format) + gu = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.float, memory_format=torch.contiguous_format) + + wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) + + gw = torch.sum(gw.view(B*T, H, C//H), 0) + gu = torch.sum(gu.view(B*T, H, C//H), 0) + return (None, None, None, None, gr, gk, gv, gw.bfloat16(), gu.bfloat16()) +
RWKV-v4neo.src.model/RWKV.__init__
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<10>:<add> assert args.n_embd % 32 == 0 <add> assert args.dim_att % 32 == 0 <add> assert args.dim_ffn % 32 == 0
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def __init__(self, args): <0> super().__init__() <1> self.args = args <2> if not hasattr(args, 'dim_att'): <3> args.dim_att = args.n_embd <4> if not hasattr(args, 'dim_ffn'): <5> args.dim_ffn = args.n_embd * 4 <6> if not hasattr(args, 'tiny_att_layer'): <7> args.tiny_att_layer = -1 <8> if not hasattr(args, 'tiny_att_dim'): <9> args.tiny_att_dim = -1 <10> <11> self.emb = nn.Embedding(args.vocab_size, args.n_embd) <12> <13> self.blocks = nn.ModuleList([Block(args, i) for i in range(args.n_layer)]) <14> <15> self.ln_out = nn.LayerNorm(args.n_embd) <16> self.head = nn.Linear(args.n_embd, args.vocab_size, bias=False) <17> <18> if args.head_qk > 0: <19> self.head_q = nn.Linear(args.n_embd, args.head_qk, bias=False) <20> self.head_k = nn.Linear(args.n_embd, args.head_qk, bias=False) <21> self.register_buffer("copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) <22> if args.dropout > 0: <23> self.drop0 = nn.Dropout(p = args.dropout) <24>
===========changed ref 0=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x / self.head_size_divisor).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g = self.jit_func(x) + + x = RUN_CUDA(B, T, C, H, r, k, v, w=self.time_decay, u=self.time_faaaa) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_ChannelMix(MyModule): @MyFunction def forward(self, x): xx = self.time_shift(x) xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) k = self.key(xk) + k = torch.relu(k) ** 2 - k = torch.square(torch.relu(k)) kv = self.value(k) return torch.sigmoid(self.receptance(xr)) * kv ===========changed ref 3=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr + xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) + xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) + xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) + xg = x * self.time_mix_g + xx * (1 - self.time_mix_g) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = F.silu(self.gate(xg)) + + return r, k, v, g + ===========changed ref 4=========== # module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, H, r, k, v, w, u): - def RUN_CUDA(B, T, C, w, u, k, v): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) - return WKV.apply(B, T, C, w, u, k, v) ===========changed ref 5=========== # module: RWKV-v4neo.src.model + + class WKV_5(torch.autograd.Function): + @staticmethod + def forward(ctx, B, T, C, H, r, k, v, w, u): + with torch.no_grad(): + assert HEAD_SIZE == C // H + assert r.dtype == torch.bfloat16 + assert k.dtype == torch.bfloat16 + assert v.dtype == torch.bfloat16 + assert w.dtype == torch.bfloat16 + assert u.dtype == torch.bfloat16 + ctx.B = B + ctx.T = T + ctx.C = C + ctx.H = H + r = r.contiguous() + k = k.contiguous() + v = v.contiguous() + w = w.float().contiguous() + u = u.contiguous() + ew = -torch.exp(w) + eew = torch.exp(ew) + ctx.save_for_backward(r, k, v, eew, ew, u) + y = torch.empty((B, T, C), device=w.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) + return y + ===========changed ref 6=========== # module: RWKV-v4neo.src.model + + class WKV_5(torch.autograd.Function): + @staticmethod + def backward(ctx, gy): + with torch.no_grad(): + B = ctx.B + T = ctx.T + C = ctx.C + H = ctx.H + gy = gy.contiguous() + assert gy.dtype == torch.bfloat16 + r, k, v, eew, ew, u = ctx.saved_tensors + gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.float, memory_format=torch.contiguous_format) + gu = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.float, memory_format=torch.contiguous_format) + + wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) + + gw = torch.sum(gw.view(B*T, H, C//H), 0) + gu = torch.sum(gu.view(B*T, H, C//H), 0) + return (None, None, None, None, gr, gk, gv, gw.bfloat16(), gu.bfloat16()) +
RWKV-v4neo.src.model/RWKV.configure_optimizers
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<19>:<add> lr_2x.add(n) <del> lr_3x.add(n) <21>:<add> lr_1x.add(n) <del> lr_2x.add(n)
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): <0> args = self.args <1> <2> lr_decay = set() <3> lr_1x = set() <4> lr_2x = set() <5> lr_3x = set() <6> for n, p in self.named_parameters(): <7> if ("time_mix" in n) and (args.layerwise_lr > 0): <8> if args.my_pile_stage == 2: <9> lr_2x.add(n) <10> else: <11> lr_1x.add(n) <12> elif ("time_decay" in n) and (args.layerwise_lr > 0): <13> if args.my_pile_stage == 2: <14> lr_3x.add(n) <15> else: <16> lr_2x.add(n) <17> elif ("time_faaaa" in n) and (args.layerwise_lr > 0): <18> if args.my_pile_stage == 2: <19> lr_3x.add(n) <20> else: <21> lr_2x.add(n) <22> elif ("time_first" in n) and (args.layerwise_lr > 0): <23> lr_3x.add(n) <24> elif (len(p.squeeze().shape) >= 2) and (args.weight_decay > 0): <25> lr_decay.add(n) <26> else: <27> lr_1x.add(n) <28> <29> lr_decay = sorted(list(lr_decay)) <30> lr_1x = sorted(list(lr_1x)) <31> lr_2x = sorted(list(lr_2x)) <32> lr_3x = sorted(list(lr_3x)) <33> # print('decay', lr_decay) <34> # print('1x', lr_1x) <35> # print('2x', lr_2x) <36> # print('3x', lr_3</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 1 param_dict = {n: p for n, p in self.named_parameters()} if args.layerwise_lr > 0: if args.my_pile_stage == 2: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init}, ] else: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups = [{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}] if args.weight_decay > 0: optim_groups += [{"params": [param_dict[n] for n in lr_decay], "weight_decay": args.weight_decay, "my_lr_scale": 1.0}] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 2 <s>_scale": 1.0}] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=True, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=True, amsgrad=False) else: if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False) ===========changed ref 0=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x / self.head_size_divisor).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g = self.jit_func(x) + + x = RUN_CUDA(B, T, C, H, r, k, v, w=self.time_decay, u=self.time_faaaa) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_ChannelMix(MyModule): @MyFunction def forward(self, x): xx = self.time_shift(x) xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) k = self.key(xk) + k = torch.relu(k) ** 2 - k = torch.square(torch.relu(k)) kv = self.value(k) return torch.sigmoid(self.receptance(xr)) * kv ===========changed ref 3=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr + xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) + xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) + xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) + xg = x * self.time_mix_g + xx * (1 - self.time_mix_g) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = F.silu(self.gate(xg)) + + return r, k, v, g +
RWKV-v4neo.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
673d241cdaaf54cb927f49e2a2ca4542b3d98fab
Merge pull request #178 from fluxlinkage/main
<19>:<add> m[n] = (p * 0.0) + (layer_scale ** 0.7) <del> m[n] = (p * 0.0) + (layer_scale ** 0.5)
# module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> for n in self.state_dict(): <11> p = self.state_dict()[n] <12> shape = p.shape <13> <14> gain = 1.0 <15> scale = 1.0 <16> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n: <17> if 'ln_x.weight' in n: <18> layer_scale = (1+int(n.split('.')[1])) / self.args.n_layer <19> m[n] = (p * 0.0) + (layer_scale ** 0.5) <20> else: <21> m[n] = p <22> else: <23> if n == "emb.weight": <24> scale = -1 * self.args.lr_init <25> else: <26> if shape[0] > shape[1]: <27> gain = math.sqrt(shape[0] / shape[1]) <28> if 'r' in os.environ["RWKV_MY_TESTING"]: <29> zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] <30> else: <31> zero = [".att.key.", ".att.receptance.", ".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] <32> for kk in zero: <33> if kk in n: <34> </s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 if n == "head.weight": scale = 0.5 if "head_k." in n: scale = 0.1 if "head_q." in n: scale = 0 print(f"{str(shape[0]).ljust(5)} {str(shape[1]).ljust(5)} {str(scale).ljust(4)} {n}") if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((shape[0], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() # if n == "emb.weight": # print(m[n]) gc.collect() torch.cuda.empty_cache() return m ===========changed ref 0=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x / self.head_size_divisor).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g = self.jit_func(x) + + x = RUN_CUDA(B, T, C, H, r, k, v, w=self.time_decay, u=self.time_faaaa) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_ChannelMix(MyModule): @MyFunction def forward(self, x): xx = self.time_shift(x) xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) k = self.key(xk) + k = torch.relu(k) ** 2 - k = torch.square(torch.relu(k)) kv = self.value(k) return torch.sigmoid(self.receptance(xr)) * kv ===========changed ref 3=========== # module: RWKV-v4neo.src.model + ######################################################################################################## + + class RWKV_TimeMix_RWKV5(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr + xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) + xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) + xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) + xg = x * self.time_mix_g + xx * (1 - self.time_mix_g) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = F.silu(self.gate(xg)) + + return r, k, v, g + ===========changed ref 4=========== # module: RWKV-v4neo.src.model class RWKV(pl.LightningModule): def __init__(self, args): super().__init__() self.args = args if not hasattr(args, 'dim_att'): args.dim_att = args.n_embd if not hasattr(args, 'dim_ffn'): args.dim_ffn = args.n_embd * 4 if not hasattr(args, 'tiny_att_layer'): args.tiny_att_layer = -1 if not hasattr(args, 'tiny_att_dim'): args.tiny_att_dim = -1 + assert args.n_embd % 32 == 0 + assert args.dim_att % 32 == 0 + assert args.dim_ffn % 32 == 0 self.emb = nn.Embedding(args.vocab_size, args.n_embd) self.blocks = nn.ModuleList([Block(args, i) for i in range(args.n_layer)]) self.ln_out = nn.LayerNorm(args.n_embd) self.head = nn.Linear(args.n_embd, args.vocab_size, bias=False) if args.head_qk > 0: self.head_q = nn.Linear(args.n_embd, args.head_qk, bias=False) self.head_k = nn.Linear(args.n_embd, args.head_qk, bias=False) self.register_buffer("copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) ===========changed ref 5=========== # module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, H, r, k, v, w, u): - def RUN_CUDA(B, T, C, w, u, k, v): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) - return WKV.apply(B, T, C, w, u, k, v)
RWKV-v4neo.src.model/WKV_5.forward
Modified
BlinkDL~RWKV-LM
653ae70a66f0a9905e232f23ff7936dbbf3b26f7
misc
<1>:<del> assert HEAD_SIZE == C // H <7>:<add> assert HEAD_SIZE == C // H <11>:<add> assert r.is_contiguous() <del> r = r.contiguous() <12>:<add> assert k.is_contiguous() <del> k = k.contiguous() <13>:<add> assert v.is_contiguous() <del> v = v.contiguous() <14>:<add> assert w.is_contiguous() <add> assert u.is_contiguous() <add> ew = (-torch.exp(w.float())).contiguous() <del> w = w.float().contiguous() <15>:<del> u = u.contiguous() <16>:<del> ew = -torch.exp(w) <17>:<add> eew = (torch.exp(ew)).contiguous() <del> eew = torch.exp(ew) <19>:<add> y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <del> y = torch.empty((B, T, C), device=w.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) <20>:<add> wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) <del> wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) <21>:<add> return y <del> return y
# module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): <0> with torch.no_grad(): <1> assert HEAD_SIZE == C // H <2> assert r.dtype == torch.bfloat16 <3> assert k.dtype == torch.bfloat16 <4> assert v.dtype == torch.bfloat16 <5> assert w.dtype == torch.bfloat16 <6> assert u.dtype == torch.bfloat16 <7> ctx.B = B <8> ctx.T = T <9> ctx.C = C <10> ctx.H = H <11> r = r.contiguous() <12> k = k.contiguous() <13> v = v.contiguous() <14> w = w.float().contiguous() <15> u = u.contiguous() <16> ew = -torch.exp(w) <17> eew = torch.exp(ew) <18> ctx.save_for_backward(r, k, v, eew, ew, u) <19> y = torch.empty((B, T, C), device=w.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) <20> wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) <21> return y <22>
===========unchanged ref 0=========== at: RWKV-v4neo.src.model HEAD_SIZE = 64 wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) at: torch._C bfloat16: dtype = ... contiguous_format: memory_format = ... ===========unchanged ref 1=========== at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor exp(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.function._SingleLevelFunction forward(ctx: Any, *args: Any, **kwargs: Any) -> Any vjp = backward at: torch.autograd.grad_mode no_grad()
RWKV-v4neo.src.model/WKV_5.backward
Modified
BlinkDL~RWKV-LM
653ae70a66f0a9905e232f23ff7936dbbf3b26f7
misc
<1>:<add> assert gy.dtype == torch.bfloat16 <5>:<add> assert gy.is_contiguous() <del> gy = gy.contiguous() <6>:<del> assert gy.dtype == torch.bfloat16 <8>:<add> gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <del> gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) <9>:<add> gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <del> gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) <10>:<add> gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <del> gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) <11>:<add> gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) #
# module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def backward(ctx, gy): <0> with torch.no_grad(): <1> B = ctx.B <2> T = ctx.T <3> C = ctx.C <4> H = ctx.H <5> gy = gy.contiguous() <6> assert gy.dtype == torch.bfloat16 <7> r, k, v, eew, ew, u = ctx.saved_tensors <8> gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) <9> gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) <10> gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) <11> gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.float, memory_format=torch.contiguous_format) <12> gu = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.float, memory_format=torch.contiguous_format) <13> <14> wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) <15> <16> gw = torch.sum(gw.view(B*T, H, C//H), 0) <17> gu = torch.sum(gu.view(B*T, H, C//H), 0) <18> return (None, None, None, None, gr, gk, gv, gw.bfloat16(), gu.bfloat16()) <19>
===========unchanged ref 0=========== at: RWKV-v4neo.src.model wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) at: torch._C bfloat16: dtype = ... contiguous_format: memory_format = ... ===========unchanged ref 1=========== at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========unchanged ref 2=========== sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor sum(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.function._SingleLevelFunction backward(ctx: Any, *grad_outputs: Any) -> Any at: torch.autograd.grad_mode no_grad() ===========changed ref 0=========== # module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): - assert HEAD_SIZE == C // H assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 + assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H + assert r.is_contiguous() - r = r.contiguous() + assert k.is_contiguous() - k = k.contiguous() + assert v.is_contiguous() - v = v.contiguous() + assert w.is_contiguous() + assert u.is_contiguous() + ew = (-torch.exp(w.float())).contiguous() - w = w.float().contiguous() - u = u.contiguous() - ew = -torch.exp(w) + eew = (torch.exp(ew)).contiguous() - eew = torch.exp(ew) ctx.save_for_backward(r, k, v, eew, ew, u) + y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) - y = torch.empty((B, T, C), device=w.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) - wkv5_cuda.forward(B, T, C, H, r, k, v,</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): # offset: 1 <s> u, y) - wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) + return y - return y
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5.__init__
Modified
BlinkDL~RWKV-LM
653ae70a66f0a9905e232f23ff7936dbbf3b26f7
misc
<5>:<add> assert HEAD_SIZE == self.head_size # change HEAD_SIZE to match args.head_size_a
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.head_size = args.head_size_a <5> self.n_head = args.dim_att // self.head_size <6> assert args.dim_att % self.n_head == 0 <7> self.head_size_divisor = args.head_size_divisor <8> <9> with torch.no_grad(): <10> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <11> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <12> ddd = torch.ones(1, 1, args.n_embd) <13> for i in range(args.n_embd): <14> ddd[0, 0, i] = i / args.n_embd <15> <16> # fancy time_mix <17> self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) <18> self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) <19> self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <20> self.time_mix_g = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <21> <22> # fancy time_decay <23> decay_speed = torch.ones(args.dim_att) <24> for n in range(args.dim_att): <25> decay_speed[n] = -6 + 5 * (n / (args.</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5(MyModule): def __init__(self, args, layer_id): # offset: 1 self.time_decay = nn.Parameter(decay_speed.reshape(self.n_head, self.head_size)) # print(layer_id, self.time_decay.flatten()[:3].cpu().numpy(), '...', self.time_decay.flatten()[-3:].cpu().numpy()) tmp = torch.zeros(args.dim_att) for n in range(args.dim_att): zigzag = ((n + 1) % 3 - 1) * 0.1 tmp[n] = ratio_0_to_1 * (1 - (n / (args.dim_att - 1))) + zigzag self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) self.ln_x = nn.GroupNorm(self.n_head, args.dim_att) ===========unchanged ref 0=========== at: RWKV-v4neo.src.model HEAD_SIZE = 64 at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor ===========unchanged ref 1=========== zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) at: torch.nn.modules.normalization GroupNorm(num_groups: int, num_channels: int, eps: float=1e-5, affine: bool=True, device=None, dtype=None) at: torch.nn.modules.padding ZeroPad2d(padding: _size_4_t) ===========unchanged ref 2=========== at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): - assert HEAD_SIZE == C // H assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 + assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H + assert r.is_contiguous() - r = r.contiguous() + assert k.is_contiguous() - k = k.contiguous() + assert v.is_contiguous() - v = v.contiguous() + assert w.is_contiguous() + assert u.is_contiguous() + ew = (-torch.exp(w.float())).contiguous() - w = w.float().contiguous() - u = u.contiguous() - ew = -torch.exp(w) + eew = (torch.exp(ew)).contiguous() - eew = torch.exp(ew) ctx.save_for_backward(r, k, v, eew, ew, u) + y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) - y = torch.empty((B, T, C), device=w.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) + wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) - wkv5_cuda.forward(B, T, C, H, r, k, v,</s>
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5.jit_func
Modified
BlinkDL~RWKV-LM
ad848131362c68ac651b16c0bea25a7ba3f6a6ea
RWKV5 - extra silu for better performance (breaking change)
<10>:<add> v = F.silu(self.value(xv)) <del> v = self.value(xv)
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5(MyModule): @MyFunction def jit_func(self, x): <0> B, T, C = x.size() <1> <2> xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr <3> xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) <4> xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) <5> xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) <6> xg = x * self.time_mix_g + xx * (1 - self.time_mix_g) <7> <8> r = self.receptance(xr) <9> k = self.key(xk) <10> v = self.value(xv) <11> g = F.silu(self.gate(xg)) <12> <13> return r, k, v, g <14>
===========unchanged ref 0=========== at: RWKV-v4neo.src.model MyFunction = torch.jit.script_method MyFunction = __nop at: RWKV-v4neo.src.model.RWKV_TimeMix_RWKV5.__init__ self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) self.time_mix_g = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) at: torch.nn.functional silu(input: Tensor, inplace: bool=False) -> Tensor
RWKV-v4neo.src.model/RUN_CUDA
Modified
BlinkDL~RWKV-LM
e7a6e5a5362e82eb3a75998ce2f986cbc4902bcb
fix
<0>:<add> return WKV.apply(B, T, C, w, u, k, v) <del> return WKV_5.apply(B, T, C, H, r, k, v, w, u)
# module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, w, u, k, v): - def RUN_CUDA(B, T, C, H, r, k, v, w, u): <0> return WKV_5.apply(B, T, C, H, r, k, v, w, u) <1>
===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## # CUDA RWKV5 Kernel ######################################################################################################## + if 'r4' in os.environ["RWKV_MY_TESTING"]: + HEAD_SIZE = str(os.environ["RWKV_HEAD_SIZE_A"]) - HEAD_SIZE = os.environ["RWKV_HEAD_SIZE_A"] + wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], - wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], + verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) - verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) + + class WKV_5(torch.autograd.Function): + @staticmethod + def forward(ctx, B, T, C, H, r, k, v, w, u): + with torch.no_grad(): + assert r.dtype == torch.bfloat16 + assert k.dtype == torch.bfloat16 + assert v.dtype == torch.bfloat16 + assert w.dtype == torch.bfloat16 + assert u.dtype == torch.bfloat16 + assert HEAD_SIZE == C // H + ctx.B = B + ctx.T = T + ctx.C = C + ctx.H = H + assert r.is_contiguous() + assert k.is_contiguous() + assert v</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model # offset: 1 <s> ctx.H = H + assert r.is_contiguous() + assert k.is_contiguous() + assert v.is_contiguous() + assert w.is_contiguous() + assert u.is_contiguous() + ew = (-torch.exp(w.float())).contiguous() + eew = (torch.exp(ew)).contiguous() + ctx.save_for_backward(r, k, v, eew, ew, u) + y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) + return y + @staticmethod + def backward(ctx, gy): + with torch.no_grad(): + assert gy.dtype == torch.bfloat16 + B = ctx.B + T = ctx.T + C = ctx.C + H = ctx.H + assert gy.is_contiguous() + r, k, v, eew, ew, u = ctx.saved_tensors + gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format</s> ===========changed ref 2=========== # module: RWKV-v4neo.src.model # offset: 2 <s>ch.contiguous_format) # .uniform_(-1, 1) + gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gu = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) + gw = torch.sum(gw.view(B*T, H, C//H), 0) + gu = torch.sum(gu.view(B*T, H, C//H), 0) + return (None, None, None, None, gr, gk, gv, gw, gu) + + def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) +
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5.forward
Modified
BlinkDL~RWKV-LM
e7a6e5a5362e82eb3a75998ce2f986cbc4902bcb
fix
<5>:<add> x = RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w=self.time_decay, u=self.time_faaaa) <del> x = RUN_CUDA(B, T, C, H, r, k, v, w=self.time_decay, u=self.time_faaaa)
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5(MyModule): def forward(self, x): <0> B, T, C = x.size() <1> H = self.n_head <2> <3> r, k, v, g = self.jit_func(x) <4> <5> x = RUN_CUDA(B, T, C, H, r, k, v, w=self.time_decay, u=self.time_faaaa) <6> <7> return self.jit_func_2(x, g) <8>
===========unchanged ref 0=========== at: RWKV-v4neo.src.model RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u) at: RWKV-v4neo.src.model.RWKV_TimeMix_RWKV5 jit_func(self, x) jit_func(x) at: RWKV-v4neo.src.model.RWKV_TimeMix_RWKV5.__init__ self.n_head = args.dim_att // self.head_size self.time_decay = nn.Parameter(decay_speed.reshape(self.n_head, self.head_size)) self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size)) at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] ===========unchanged ref 1=========== call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) ===========changed ref 0=========== # module: RWKV-v4neo.src.model + def RUN_CUDA_RWKV5(B, T, C, H, r, k, v, w, u): + return WKV_5.apply(B, T, C, H, r, k, v, w, u) + ===========changed ref 1=========== # module: RWKV-v4neo.src.model + def RUN_CUDA(B, T, C, w, u, k, v): - def RUN_CUDA(B, T, C, H, r, k, v, w, u): + return WKV.apply(B, T, C, w, u, k, v) - return WKV_5.apply(B, T, C, H, r, k, v, w, u) ===========changed ref 2=========== # module: RWKV-v4neo.src.model ######################################################################################################## # CUDA RWKV5 Kernel ######################################################################################################## + if 'r4' in os.environ["RWKV_MY_TESTING"]: + HEAD_SIZE = str(os.environ["RWKV_HEAD_SIZE_A"]) - HEAD_SIZE = os.environ["RWKV_HEAD_SIZE_A"] + wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], - wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], + verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) - verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) + + class WKV_5(torch.autograd.Function): + @staticmethod + def forward(ctx, B, T, C, H, r, k, v, w, u): + with torch.no_grad(): + assert r.dtype == torch.bfloat16 + assert k.dtype == torch.bfloat16 + assert v.dtype == torch.bfloat16 + assert w.dtype == torch.bfloat16 + assert u.dtype == torch.bfloat16 + assert HEAD_SIZE == C // H + ctx.B = B + ctx.T = T + ctx.C = C + ctx.H = H + assert r.is_contiguous() + assert k.is_contiguous() + assert v</s> ===========changed ref 3=========== # module: RWKV-v4neo.src.model # offset: 1 <s> ctx.H = H + assert r.is_contiguous() + assert k.is_contiguous() + assert v.is_contiguous() + assert w.is_contiguous() + assert u.is_contiguous() + ew = (-torch.exp(w.float())).contiguous() + eew = (torch.exp(ew)).contiguous() + ctx.save_for_backward(r, k, v, eew, ew, u) + y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) + return y + @staticmethod + def backward(ctx, gy): + with torch.no_grad(): + assert gy.dtype == torch.bfloat16 + B = ctx.B + T = ctx.T + C = ctx.C + H = ctx.H + assert gy.is_contiguous() + r, k, v, eew, ew, u = ctx.saved_tensors + gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format</s>
RWKV-v4neo.src.model/WKV_5.backward
Modified
BlinkDL~RWKV-LM
3db37a72356b736966ddd377268f02b80963af3f
Merge pull request #186 from PicoCreator/patch-1
<11>:<add> gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <del> gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <12>:<add> gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <del> gu = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <14>:<add> gw = torch.sum(gw, 0).view(H, C//H) <del> gw = torch.sum(gw.view(B*T, H, C//H), 0) <15>:<add> gu = torch.sum(gu, 0).view(H, C//H) <del> gu = torch.sum(gu.view(B*T, H, C//H), 0)
# module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def backward(ctx, gy): <0> with torch.no_grad(): <1> assert gy.dtype == torch.bfloat16 <2> B = ctx.B <3> T = ctx.T <4> C = ctx.C <5> H = ctx.H <6> assert gy.is_contiguous() <7> r, k, v, eew, ew, u = ctx.saved_tensors <8> gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <9> gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <10> gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <11> gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <12> gu = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) <13> wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) <14> gw = torch.sum(gw.view(B*T, H, C//H), 0) <15> gu = torch.sum(gu.view(B*T, H, C//H), 0)</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def backward(ctx, gy): # offset: 1 ===========unchanged ref 0=========== at: RWKV-v4neo.src.model wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) at: torch._C bfloat16: dtype = ... contiguous_format: memory_format = ... ===========unchanged ref 1=========== at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========unchanged ref 2=========== sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor sum(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.function._SingleLevelFunction backward(ctx: Any, *grad_outputs: Any) -> Any vjp = backward at: torch.autograd.grad_mode no_grad() ===========changed ref 0=========== # module: RWKV-v4neo.src.model ######################################################################################################## # CUDA RWKV5 Kernel ######################################################################################################## if 'r4' in os.environ["RWKV_MY_TESTING"]: HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model # offset: 1 <s> .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k, v, eew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) - gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format</s>
RWKV-v4neo.src.model/RWKV_TimeMix_RWKV5.jit_func
Modified
BlinkDL~RWKV-LM
3db37a72356b736966ddd377268f02b80963af3f
Merge pull request #186 from PicoCreator/patch-1
<10>:<add> v = self.value(xv) <del> v = F.silu(self.value(xv))
# module: RWKV-v4neo.src.model ######################################################################################################## class RWKV_TimeMix_RWKV5(MyModule): @MyFunction def jit_func(self, x): <0> B, T, C = x.size() <1> <2> xx = self.time_shift(x) # Mix x with the previous timestep to produce xk, xv, xr <3> xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) <4> xv = x * self.time_mix_v + xx * (1 - self.time_mix_v) <5> xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) <6> xg = x * self.time_mix_g + xx * (1 - self.time_mix_g) <7> <8> r = self.receptance(xr) <9> k = self.key(xk) <10> v = F.silu(self.value(xv)) <11> g = F.silu(self.gate(xg)) <12> <13> return r, k, v, g <14>
===========unchanged ref 0=========== at: RWKV-v4neo.src.model MyFunction = torch.jit.script_method MyFunction = __nop at: RWKV-v4neo.src.model.RWKV_TimeMix_RWKV5.__init__ self.time_mix_k = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0)) self.time_mix_v = nn.Parameter(torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1) self.time_mix_r = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) self.time_mix_g = nn.Parameter(torch.pow(ddd, 0.5 * ratio_1_to_almost0)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) at: torch.nn.functional silu(input: Tensor, inplace: bool=False) -> Tensor ===========changed ref 0=========== # module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, eew, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gw = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) - gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) + gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) - gu = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1</s> ===========changed ref 1=========== # module: RWKV-v4neo.src.model class WKV_5(torch.autograd.Function): @staticmethod def backward(ctx, gy): # offset: 1 <s>False, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.backward(B, T, C, H, r, k, v, eew, ew, u, gy, gr, gk, gv, gw, gu) + gw = torch.sum(gw, 0).view(H, C//H) - gw = torch.sum(gw.view(B*T, H, C//H), 0) + gu = torch.sum(gu, 0).view(H, C//H) - gu = torch.sum(gu.view(B*T, H, C//H), 0) return (None, None, None, None, gr, gk, gv, gw, gu) ===========changed ref 2=========== # module: RWKV-v4neo.src.model ######################################################################################################## # CUDA RWKV5 Kernel ######################################################################################################## if 'r4' in os.environ["RWKV_MY_TESTING"]: HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) wkv5_cuda = load(name="wkv5", sources=["cuda/wkv5_op.cpp", f"cuda/wkv5_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}"]) class WKV_5(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ew = (-torch.exp(w.float())).contiguous() eew = (torch.exp(ew)).contiguous() ctx.save_for_backward(r, k, v, eew, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format) # .uniform_(-1, 1) wkv5_cuda.forward(B, T, C, H, r, k</s>
RWKV-v4neo.src.trainer/train_callback.on_train_batch_start
Modified
BlinkDL~RWKV-LM
06ef9a2199df8e47f356ed8b5195b4fc8de41b41
bugfix & new cuda
<9>:<del> if trainer.global_step < w_step: <10>:<del> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <21>:<del> <22>:<del> if trainer.global_step < w_step: <23>:<del> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step)
# module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): <0> args = self.args <1> # if args.cuda_cleanup > 0: <2> # torch.cuda.empty_cache() <3> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <4> <5> # LR schedule <6> w_step = args.warmup_steps <7> if args.lr_final == args.lr_init or args.epoch_count == 0: <8> lr = args.lr_init <9> if trainer.global_step < w_step: <10> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <11> else: <12> decay_step = real_step - args.my_pile_edecay * args.epoch_steps <13> decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps <14> progress = (decay_step - w_step + 1) / (decay_total - w_step) <15> progress = min(1, max(0, progress)) <16> <17> if args.lr_final == 0 or args.lr_init == 0: # linear decay <18> lr = args.lr_init + (args.lr_final - args.lr_init) * progress <19> else: # exp decay <20> lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) <21> <22> if trainer.global_step < w_step: <23> lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) <24> # if trainer.is_global_zero: <25> # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) <26> <27> if args.my_exit_tokens != 0: # cosine decay</s>
===========below chunk 0=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 1 lr = args.lr_init * (0.2 + 0.8 * trainer.global_step / w_step) else: real_tokens = real_step * args.ctx_len * args.real_bsz warmup_tokens = w_step * args.ctx_len * args.real_bsz progress = (real_tokens - warmup_tokens) / (abs(args.my_exit_tokens) - warmup_tokens) progress = max(0, min(1, progress)) lr_final_factor = args.lr_final / args.lr_init lr_mult = (0.5 + lr_final_factor / 2) + (0.5 - lr_final_factor / 2) * math.cos(math.pi * progress) if args.my_exit_tokens > 0: lr = args.lr_init * lr_mult else: lr = (lr + args.lr_init * lr_mult) / 2 if progress >= 1: if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy): my_save( args, trainer, pl_module.state_dict(), f"{args.proj_dir}/rwkv-final.pth", ) exit(0) if args.weight_decay_final > 0: wd_now = args.weight_decay * math.exp(math.log(args.weight_decay_final / args.weight_decay) * progress) else: wd_now = args.weight_decay for param_group in trainer.optimizers[0].param_groups: if param_group["weight_decay"] > 0: param_group["weight_decay"] = wd_now if args.layerwise_lr > 0: param_group["lr"] = lr</s> ===========below chunk 1=========== # module: RWKV-v4neo.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 2 <s>weight_decay"] = wd_now if args.layerwise_lr > 0: param_group["lr"] = lr * param_group["my_lr_scale"] # print(param_group["lr"], param_group["my_lr_scale"]) else: param_group["lr"] = lr trainer.my_lr = lr trainer.my_wd = wd_now # rank_zero_info(f"{real_step} {lr}") if trainer.global_step == 0: if trainer.is_global_zero: # logging trainer.my_loss_sum = 0 trainer.my_loss_count = 0 trainer.my_log = open(args.proj_dir + "/train_log.txt", "a") trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") try: print(f"\n{trainer.strategy.config}\n") trainer.my_log.write(f"{trainer.strategy.config}\n") except: pass trainer.my_log.flush() if len(args.wandb) > 0: print("Login to wandb...") import wandb wandb.init( project=args.wandb, name=args.run_name + " " + args.my_timestamp, config=args, save_code=False, ) trainer.my_wandb = wandb ===========unchanged ref 0=========== at: RWKV-v4neo.src.trainer my_save(args, trainer, dd, ff) at: RWKV-v4neo.src.trainer.train_callback.__init__ self.args = args at: math pi: float cos(x: SupportsFloat, /) -> float exp(x: SupportsFloat, /) -> float log(x: SupportsFloat, base: SupportsFloat=...) -> float
RWKV-v5.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
8fea4ebeabc61c03d79763786a7766db65ce45e7
v6 training
<16>:<add> if 'x060' in os.environ["RWKV_MY_TESTING"]: <add> self.att = RWKV_Tmix_x060(args, layer_id) <add> else: <add> self.att = RWKV_TimeMix_RWKV5(args, layer_id) <del> self.att = RWKV_TimeMix_RWKV5(args, layer_id) <21>:<add> if 'x060' in os.environ["RWKV_MY_TESTING"]: <add> self.ffn = RWKV_CMix_x060(args, layer_id) <add> else: <add> self.ffn = RWKV_ChannelMix(args, layer_id) <del> self.ffn = RWKV_ChannelMix(args, layer_id)
# module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> self.att = RWKV_TimeMix_RWKV5(args, layer_id) <17> <18> if 'g' in os.environ["RWKV_MY_TESTING"]: <19> self.ffn = MishGLU(args, layer_id) <20> else: <21> self.ffn = RWKV_ChannelMix(args, layer_id) <22> <23> if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: <24> self.tiny_ln = nn.LayerNorm(args.n_embd) <25> self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) <26> self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim</s>
===========below chunk 0=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout) ===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV_Tmix_x060.__init__ self.head_size = args.head_size_a self.n_head = args.dim_att // self.head_size ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ===========unchanged ref 1=========== at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========unchanged ref 2=========== ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor ===========unchanged ref 3=========== zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) at: torch.nn.modules.padding ZeroPad2d(padding: _size_4_t) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v5.src.model + def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): + return WKV_6.apply(B, T, C, H, r, k, v, w, u) +
RWKV-v5.src.model/RWKV.configure_optimizers
Modified
BlinkDL~RWKV-LM
8fea4ebeabc61c03d79763786a7766db65ce45e7
v6 training
<7>:<add> if (("_w1" in n) or ("_w2" in n)) and (args.layerwise_lr > 0): <add> lr_1x.add(n) <add> elif (("time_mix" in n) or ("time_maa" in n)) and (args.layerwise_lr > 0): <del> if ("time_mix" in n) and (args.layerwise_lr > 0): <12>:<add> elif (("time_decay" in n) or ("time_daaaa" in n)) and (args.layerwise_lr > 0): <del> elif ("time_decay" in n) and (args.layerwise_lr > 0):
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): <0> args = self.args <1> <2> lr_decay = set() <3> lr_1x = set() <4> lr_2x = set() <5> lr_3x = set() <6> for n, p in self.named_parameters(): <7> if ("time_mix" in n) and (args.layerwise_lr > 0): <8> if args.my_pile_stage == 2: <9> lr_2x.add(n) <10> else: <11> lr_1x.add(n) <12> elif ("time_decay" in n) and (args.layerwise_lr > 0): <13> if args.my_pile_stage == 2: <14> lr_3x.add(n) <15> else: <16> lr_2x.add(n) <17> elif ("time_faaaa" in n) and (args.layerwise_lr > 0): <18> if args.my_pile_stage == 2: <19> lr_2x.add(n) <20> else: <21> lr_1x.add(n) <22> elif ("time_first" in n) and (args.layerwise_lr > 0): <23> lr_3x.add(n) <24> elif (len(p.squeeze().shape) >= 2) and (args.weight_decay > 0): <25> lr_decay.add(n) <26> else: <27> lr_1x.add(n) <28> <29> lr_decay = sorted(list(lr_decay)) <30> lr_1x = sorted(list(lr_1x)) <31> lr_2x = sorted(list(lr_2x)) <32> lr_3x = sorted(list(lr_3x)) <33> # print('decay', lr_decay) <34> # print('1x', lr_1x) <35> # print('2x', lr_2x) <36> # print('3x', lr_3x)</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 1 if args.layerwise_lr > 0: if args.my_pile_stage == 2: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init}, ] else: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups = [{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}] if args.weight_decay > 0: optim_groups += [{"params": [param_dict[n] for n in lr_decay], "weight_decay": args.weight_decay, "my_lr_scale": 1.0}] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 2 <s>SpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=True, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=True, amsgrad=False) else: if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False) ===========unchanged ref 0=========== at: RWKV-v5.src.model MyModule = nn.Module MyModule = torch.jit.ScriptModule MyFunction = torch.jit.script_method MyFunction = __nop at: RWKV-v5.src.model.RWKV_ChannelMix.__init__ self.receptance = nn.Linear(args.n_embd, args.n_embd, bias=False) self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False) at: RWKV-v5.src.model.RWKV_ChannelMix.forward xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) k = torch.relu(k) ** 2 ===========unchanged ref 1=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor relu(input: Tensor) -> Tensor sigmoid(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.grad_mode no_grad() ===========unchanged ref 2=========== at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] __init__() __init__(self) forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] at: torch.nn.functional mish(input: Tensor, inplace: bool=False) -> Tensor at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) at: torch.nn.modules.padding ZeroPad2d(padding: _size_4_t) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v5.src.model + class RWKV_Tmix_x060(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x).view(B, T, C) + x = self.output(x * g) + return x +
RWKV-v5.src.model/RWKV_Tmix_x060.__init__
Modified
BlinkDL~RWKV-LM
62572c917d8b4d27456085cb43e43883fa9a9e52
better
# module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.head_size = args.head_size_a <5> self.n_head = args.dim_att // self.head_size <6> assert args.dim_att % self.n_head == 0 <7> <8> with torch.no_grad(): <9> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <10> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <11> ddd = torch.ones(1, 1, args.n_embd) <12> for i in range(args.n_embd): <13> ddd[0, 0, i] = i / args.n_embd <14> <15> # fancy time_mix <16> self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <17> self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <18> self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <19> self.time_maa_v = nn.Parameter(1.0 - (torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)) <20> self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <21> self.time_maa_g = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): # offset: 1 TIME_MIX_EXTRA_DIM = 32 # generate TIME_MIX for w,k,v,r,g self.time_maa_w1 = nn.Parameter(torch.empty(args.n_embd, TIME_MIX_EXTRA_DIM*5).uniform_(-0.01, 0.01)) self.time_maa_w2 = nn.Parameter(torch.zeros(5, TIME_MIX_EXTRA_DIM, args.n_embd)) # fancy time_decay decay_speed = torch.ones(args.dim_att) for n in range(args.dim_att): decay_speed[n] = -6 + 5 * (n / (args.dim_att - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) TIME_DECAY_EXTRA_DIM = 64 self.time_decay_w1 = nn.Parameter(torch.empty(args.n_embd, TIME_DECAY_EXTRA_DIM).uniform_(-0.01, 0.01)) self.time_decay_w2 = nn.Parameter(torch.zeros(TIME_DECAY_EXTRA_DIM, args.dim_att)) tmp = torch.zeros(args.dim_att) for n in range(args.dim_att): zigzag = ((n + 1) % 3 - 1) * 0.1 tmp[n] = ratio_0_to_1 * (1 - (n / (args.dim_att - 1))) + zigzag self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size)) self.time_shift = nn.ZeroPad2d((0</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): # offset: 2 <s>(self.n_head, self.head_size)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) self.ln_x = nn.GroupNorm(self.n_head, args.dim_att, eps=(1e-5)*(args.head_size_divisor**2)) ===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor ===========unchanged ref 1=========== zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] __init__() __init__(self) forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) ===========unchanged ref 2=========== at: torch.nn.modules.normalization GroupNorm(num_groups: int, num_channels: int, eps: float=1e-5, affine: bool=True, device=None, dtype=None) at: torch.nn.modules.padding ZeroPad2d(padding: _size_4_t) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...)
RWKV-v5.src.model/RWKV_Tmix_x060.__init__
Modified
BlinkDL~RWKV-LM
cf854f356103a7848fc1f69352bebb5ec153de47
misc
# module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.head_size = args.head_size_a <5> self.n_head = args.dim_att // self.head_size <6> assert args.dim_att % self.n_head == 0 <7> <8> with torch.no_grad(): <9> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <10> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <11> ddd = torch.ones(1, 1, args.n_embd) <12> for i in range(args.n_embd): <13> ddd[0, 0, i] = i / args.n_embd <14> <15> # fancy time_mix <16> self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <17> self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <18> self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <19> self.time_maa_v = nn.Parameter(1.0 - (torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)) <20> self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <21> self.time_maa_g = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): # offset: 1 TIME_MIX_EXTRA_DIM = 32 # generate TIME_MIX for w,k,v,r,g self.time_maa_w1 = nn.Parameter(torch.zeros(args.n_embd, TIME_MIX_EXTRA_DIM*5).uniform_(-1e-4, 1e-4)) self.time_maa_w2 = nn.Parameter(torch.zeros(5, TIME_MIX_EXTRA_DIM, args.n_embd).uniform_(-1e-4, 1e-4)) # fancy time_decay decay_speed = torch.ones(args.dim_att) for n in range(args.dim_att): decay_speed[n] = -6 + 5 * (n / (args.dim_att - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) TIME_DECAY_EXTRA_DIM = 64 self.time_decay_w1 = nn.Parameter(torch.zeros(args.n_embd, TIME_DECAY_EXTRA_DIM).uniform_(-1e-4, 1e-4)) self.time_decay_w2 = nn.Parameter(torch.zeros(TIME_DECAY_EXTRA_DIM, args.dim_att).uniform_(-1e-4, 1e-4)) tmp = torch.zeros(args.dim_att) for n in range(args.dim_att): zigzag = ((n + 1) % 3 - 1) * 0.1 tmp[n] = ratio_0_to_1 * (1 - (n / (args.dim_att - 1))) + zigzag self.time_faaaa = nn.Parameter(tmp.reshape</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): # offset: 2 <s>.dim_att - 1))) + zigzag self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) self.ln_x = nn.GroupNorm(self.n_head, args.dim_att, eps=(1e-5)*(args.head_size_divisor**2)) ===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor ===========unchanged ref 1=========== zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.jit._script.ScriptModule __init__(arg=None) __init__(self, arg=None) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...)
RWKV-v5.src.model/RWKV.__init__
Modified
BlinkDL~RWKV-LM
cf854f356103a7848fc1f69352bebb5ec153de47
misc
<5>:<add> args.dim_ffn = int((args.n_embd * 3.5) // 32 * 32) # default = 3.5x emb size <del> args.dim_ffn = args.n_embd * 4
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def __init__(self, args): <0> super().__init__() <1> self.args = args <2> if not hasattr(args, 'dim_att'): <3> args.dim_att = args.n_embd <4> if not hasattr(args, 'dim_ffn'): <5> args.dim_ffn = args.n_embd * 4 <6> if not hasattr(args, 'tiny_att_layer'): <7> args.tiny_att_layer = -1 <8> if not hasattr(args, 'tiny_att_dim'): <9> args.tiny_att_dim = -1 <10> assert args.n_embd % 32 == 0 <11> assert args.dim_att % 32 == 0 <12> assert args.dim_ffn % 32 == 0 <13> <14> self.emb = nn.Embedding(args.vocab_size, args.n_embd) <15> <16> self.blocks = nn.ModuleList([Block(args, i) for i in range(args.n_layer)]) <17> <18> self.ln_out = nn.LayerNorm(args.n_embd) <19> self.head = nn.Linear(args.n_embd, args.vocab_size, bias=False) <20> <21> if args.head_qk > 0: <22> self.head_q = nn.Linear(args.n_embd, args.head_qk, bias=False) <23> self.head_k = nn.Linear(args.n_embd, args.head_qk, bias=False) <24> self.register_buffer("copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) <25> if args.dropout > 0: <26> self.drop0 = nn.Dropout(p = args.dropout) <27>
===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========changed ref 0=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.head_size = args.head_size_a self.n_head = args.dim_att // self.head_size assert args.dim_att % self.n_head == 0 with torch.no_grad(): ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd # fancy time_mix self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_v = nn.Parameter(1.0 - (torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)) self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0)) self.time_maa_g = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0)) TIME_MIX_EXTRA_DIM = 32 # generate TIME_MIX</s> ===========changed ref 1=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): # offset: 1 <s> ratio_1_to_almost0)) TIME_MIX_EXTRA_DIM = 32 # generate TIME_MIX for w,k,v,r,g + self.time_maa_w1 = nn.Parameter(torch.zeros(args.n_embd, TIME_MIX_EXTRA_DIM*5)) - self.time_maa_w1 = nn.Parameter(torch.zeros(args.n_embd, TIME_MIX_EXTRA_DIM*5).uniform_(-1e-4, 1e-4)) + self.time_maa_w2 = nn.Parameter(torch.zeros(5, TIME_MIX_EXTRA_DIM, args.n_embd).uniform_(-0.01, 0.01)) - self.time_maa_w2 = nn.Parameter(torch.zeros(5, TIME_MIX_EXTRA_DIM, args.n_embd).uniform_(-1e-4, 1e-4)) # fancy time_decay decay_speed = torch.ones(args.dim_att) for n in range(args.dim_att): decay_speed[n] = -6 + 5 * (n / (args.dim_att - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) TIME_DECAY_EXTRA_DIM = 64 + self.time_decay_w1 = nn.Parameter(torch.zeros(args.n_embd, TIME_DECAY_EXTRA_DIM)) - self.time_decay_w1 = nn.Parameter(torch.zeros(args.n_</s> ===========changed ref 2=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): # offset: 2 <s>, TIME_DECAY_EXTRA_DIM).uniform_(-1e-4, 1e-4)) + self.time_decay_w2 = nn.Parameter(torch.zeros(TIME_DECAY_EXTRA_DIM, args.dim_att).uniform_(-0.01, 0.01)) - self.time_decay_w2 = nn.Parameter(torch.zeros(TIME_DECAY_EXTRA_DIM, args.dim_att).uniform_(-1e-4, 1e-4)) tmp = torch.zeros(args.dim_att) for n in range(args.dim_att): zigzag = ((n + 1) % 3 - 1) * 0.1 tmp[n] = ratio_0_to_1 * (1 - (n / (args.dim_att - 1))) + zigzag self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) self.ln_x = nn.GroupNorm(self.n_head, args.dim_att, eps=(1
RWKV-v5.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
461c87c7dce7e149cf61331bd7a4aba671ab924e
better guide
<18>:<add> elif 'x052' in os.environ["RWKV_MY_TESTING"]: <del> else: <19>:<add> self.att = RWKV_Tmix_x052(args, layer_id) <del> self.att = RWKV_TimeMix_RWKV5(args, layer_id) <26>:<add> elif 'x052' in os.environ["RWKV_MY_TESTING"]: <del> else: <27>:<add> self.ffn = RWKV_CMix_x052(args, layer_id) <del> self.ffn = RWKV_ChannelMix(args, layer_id)
# module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> if 'x060' in os.environ["RWKV_MY_TESTING"]: <17> self.att = RWKV_Tmix_x060(args, layer_id) <18> else: <19> self.att = RWKV_TimeMix_RWKV5(args, layer_id) <20> <21> if 'g' in os.environ["RWKV_MY_TESTING"]: <22> self.ffn = MishGLU(args, layer_id) <23> else: <24> if 'x060' in os.environ["RWKV_MY_TESTING"]: <25> self.ffn = RWKV_CMix_x060(args, layer_id) <26> else: <27> self.ffn = RWKV_ChannelMix(args, layer_id) <28> <29> if args.tiny_att_dim</s>
===========below chunk 0=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 self.tiny_ln = nn.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout) ===========unchanged ref 0=========== at: RWKV-v5.src.model RWKV_Tmix_x052(args, layer_id) RWKV_Tmix_x060(args, layer_id) RWKV_CMix_x052(args, layer_id) RWKV_CMix_x060(args, layer_id) MishGLU(args, layer_id) at: os environ = _createenviron() at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor tril(input: Tensor, diagonal: _int=0, *, out: Optional[Tensor]=None) -> Tensor ===========unchanged ref 1=========== zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.modules.dropout Dropout(p: float=0.5, inplace: bool=False) at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) at: torch.nn.modules.module Module() at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] ===========unchanged ref 2=========== _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None __init__(self) -> None __init__() -> None forward: Callable[..., Any] = _forward_unimplemented register_buffer(name: str, tensor: Optional[Tensor], persistent: bool=True) -> None __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) at: torch.nn.modules.normalization LayerNorm(normalized_shape: _shape_t, eps: float=1e-5, elementwise_affine: bool=True, bias: bool=True, device=None, dtype=None) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_CMix_x052(MyModule): + @MyFunction + def forward(self, x): + xx = self.time_shift(x) + xk = x * self.time_mix_k + xx * (1 - self.time_mix_k) + xr = x * self.time_mix_r + xx * (1 - self.time_mix_r) + k = self.key(xk) + k = torch.relu(k) ** 2 + kv = self.value(k) + return torch.sigmoid(self.receptance(xr)) * kv +
RWKV-v5.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
e7c27f00dde647b72636ae72f27a167a53987217
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM
<20>:<add> elif 'mamba' in os.environ["RWKV_MY_TESTING"]: <add> self.att = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params <23>:<del> else: <24>:<add> elif 'x060' in os.environ["RWKV_MY_TESTING"]: <del> if 'x060' in os.environ["RWKV_MY_TESTING"]: <25>:<add> self.ffn = RWKV_CMix_x060(args, layer_id) <del> self.ffn = RWKV_CMix_x060(args, layer_id) <26>:<add> elif 'x052' in os.environ["RWKV_MY_TESTING"]: <del> elif 'x052' in os.environ["RWKV_MY_TESTING"]
# module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> if 'x060' in os.environ["RWKV_MY_TESTING"]: <17> self.att = RWKV_Tmix_x060(args, layer_id) <18> elif 'x052' in os.environ["RWKV_MY_TESTING"]: <19> self.att = RWKV_Tmix_x052(args, layer_id) <20> <21> if 'g' in os.environ["RWKV_MY_TESTING"]: <22> self.ffn = MishGLU(args, layer_id) <23> else: <24> if 'x060' in os.environ["RWKV_MY_TESTING"]: <25> self.ffn = RWKV_CMix_x060(args, layer_id) <26> elif 'x052' in os.environ["RWKV_MY_TESTING"]</s>
===========below chunk 0=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 self.ffn = RWKV_CMix_x052(args, layer_id) if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: self.tiny_ln = nn.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout) ===========unchanged ref 0=========== at: RWKV-v5.src.model RWKV_Tmix_x052(args, layer_id) RWKV_Tmix_x060(args, layer_id) RWKV_CMix_x052(args, layer_id) RWKV_CMix_x060(args, layer_id) MishGLU(args, layer_id) at: os environ = _createenviron() at: torch._C._VariableFunctions zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) ===========unchanged ref 1=========== at: torch.nn.modules.module Module() at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None __init__(self) -> None __init__() -> None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) at: torch.nn.modules.normalization LayerNorm(normalized_shape: _shape_t, eps: float=1e-5, elementwise_affine: bool=True, bias: bool=True, device=None, dtype=None) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...)
RWKV-v5.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
e7c27f00dde647b72636ae72f27a167a53987217
Merge branch 'main' of https://github.com/BlinkDL/RWKV-LM
<10>:<add> n_params = 0 <13>:<add> <add> s0 = str(shape[0]) if len(shape) > 0 else "" <add> s1 = str(shape[1]) if len(shape) > 1 else "" <add> s2 = str(shape[2]) if len(shape) > 2 else "" <add> print(f"{s0.ljust(5)} {s1.ljust(5)} {s2.ljust(5)} {n}", end="") <22>:<add> print() <23>:<add> if 'mamba' in os.environ["RWKV_MY_TESTING"]: <add> m[n] = p <add> if n == "emb.weight": <del> if n == "emb.weight": <24>:<add> scale = -1e-4 <add> nn.init.uniform_(m[n], a=scale, b=-scale) <add> print(f" [scale {scale}]") <add> elif n == "head.weight": <add> if shape[0] > shape[1]:
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> for n in self.state_dict(): <11> p = self.state_dict()[n] <12> shape = p.shape <13> <14> gain = 1.0 <15> scale = 1.0 <16> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n: <17> if 'ln_x.weight' in n: <18> layer_scale = (1+int(n.split('.')[1])) / self.args.n_layer <19> m[n] = (p * 0.0) + (layer_scale ** 0.7) <20> else: <21> m[n] = p <22> else: <23> if n == "emb.weight": <24> scale = -1 * self.args.lr_init <25> else: <26> if shape[0] > shape[1]: <27> gain = math.sqrt(shape[0] / shape[1]) <28> <29> zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] <30> <31> for kk in zero: <32> if kk in n: <33> scale = 0 <34> if n == "head.weight": <35> scale = 0.5 <36> if "head_k." in n: <37> scale = 0.1 <38> if "head_q." in n: <39> scale = 0 <40> <41> print(f"{str(shape[0]).ljust(5)} {str(shape[1]).ljust(5)} {str(</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((shape[0], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() # if n == "emb.weight": # print(m[n]) gc.collect() torch.cuda.empty_cache() return m ===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV.__init__ self.args = args at: math sqrt(x: SupportsFloat, /) -> float at: os environ = _createenviron() at: torch.nn.init uniform_(tensor: Tensor, a: float=0., b: float=1.) -> Tensor zeros_(tensor: Tensor) -> Tensor orthogonal_(tensor, gain=1) ===========changed ref 0=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ln1 = nn.LayerNorm(args.n_embd) self.ln2 = nn.LayerNorm(args.n_embd) if self.layer_id == 0: self.ln0 = nn.LayerNorm(args.n_embd) if args.my_pos_emb > 0: self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) if self.layer_id == 0 and self.args.pre_ffn > 0: self.ffnPre = RWKV_ChannelMix(args, 0) else: if 'x060' in os.environ["RWKV_MY_TESTING"]: self.att = RWKV_Tmix_x060(args, layer_id) elif 'x052' in os.environ["RWKV_MY_TESTING"]: self.att = RWKV_Tmix_x052(args, layer_id) + elif 'mamba' in os.environ["RWKV_MY_TESTING"]: + self.att = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params if 'g' in os.environ["RWKV_MY_TESTING"]: self.ffn = MishGLU(args, layer_id) - else: + elif 'x060' in os.environ["RWKV_MY_TESTING"]</s> ===========changed ref 1=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 <s>_id) - else: + elif 'x060' in os.environ["RWKV_MY_TESTING"]: - if 'x060' in os.environ["RWKV_MY_TESTING"]: + self.ffn = RWKV_CMix_x060(args, layer_id) - self.ffn = RWKV_CMix_x060(args, layer_id) + elif 'x052' in os.environ["RWKV_MY_TESTING"]: - elif 'x052' in os.environ["RWKV_MY_TESTING"]: + self.ffn = RWKV_CMix_x052(args, layer_id) - self.ffn = RWKV_CMix_x052(args, layer_id) + elif 'mamba' in os.environ["RWKV_MY_TESTING"]: + self.ffn = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: self.tiny_ln = nn.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False)</s> ===========changed ref 2=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 2 <s> self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout)
RWKV-v5.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
4e2e255b9ad9ec4dd075b20d70e177c3aeb135f2
better
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> n_params = 0 <11> for n in self.state_dict(): <12> p = self.state_dict()[n] <13> shape = p.shape <14> <15> s0 = str(shape[0]) if len(shape) > 0 else "" <16> s1 = str(shape[1]) if len(shape) > 1 else "" <17> s2 = str(shape[2]) if len(shape) > 2 else "" <18> print(f"{s0.ljust(5)} {s1.ljust(5)} {s2.ljust(5)} {n}", end="") <19> <20> gain = 1.0 <21> scale = 1.0 <22> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n: <23> if 'ln_x.weight' in n: <24> layer_scale = (1+int(n.split('.')[1])) / self.args.n_layer <25> m[n] = (p * 0.0) + (layer_scale ** 0.7) <26> else: <27> m[n] = p <28> print() <29> else: <30> if 'mamba' in os.environ["RWKV_MY_TESTING"]: <31> m[n] = p <32> if n == "emb.weight": <33> scale = -1e-4 <34> nn.init.uniform_(m[n], a=scale, b=-scale) <35> print(f" [scale {scale}]") <36> elif n == "head.weight": <37> if shape[0] > shape[1]: <38> gain = math.sqrt(shape[0</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 scale = 0.5 nn.init.orthogonal_(m[n], gain=gain * scale) print(f" [scale {scale}]") elif '.out_proj.weight' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") elif '.bias' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") else: print() else: if n == "emb.weight": scale = -1e-4 else: if shape[0] > shape[1]: gain = math.sqrt(shape[0] / shape[1]) zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] for kk in zero: if kk in n: scale = 0 if n == "head.weight": scale = 0.5 if "head_k." in n: scale = 0.1 if "head_q." in n: scale = 0 print(f" [scale {scale}]") if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((shape[0], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) </s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 2 <s>=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() n_params += m[n].numel() # if n == "emb.weight": # print(m[n]) print('model params', n_params) gc.collect() torch.cuda.empty_cache() return m ===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV.__init__ self.args = args at: math sqrt(x: SupportsFloat, /) -> float at: os environ = _createenviron() at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.init uniform_(tensor: Tensor, a: float=0., b: float=1.) -> Tensor zeros_(tensor: Tensor) -> Tensor ===========unchanged ref 1=========== orthogonal_(tensor, gain=1)
RWKV-v5.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
402dab6bd3bb22acd4b149ff04f1a8ab149317b1
fix init
<37>:<add> if shape[1] > shape[0]: # !!! only for pytorch where linear layer weight is transposed !!! <del> if shape[0] > shape[1]: <38>:<add> gain = math.sqrt(shape[1] / shape[0]) <del> gain = math.sqrt(shape[0
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> n_params = 0 <11> for n in self.state_dict(): <12> p = self.state_dict()[n] <13> shape = p.shape <14> <15> s0 = str(shape[0]) if len(shape) > 0 else "" <16> s1 = str(shape[1]) if len(shape) > 1 else "" <17> s2 = str(shape[2]) if len(shape) > 2 else "" <18> print(f"{s0.ljust(5)} {s1.ljust(5)} {s2.ljust(5)} {n}", end="") <19> <20> gain = 1.0 <21> scale = 1.0 <22> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n: <23> if 'ln_x.weight' in n: <24> layer_scale = (1+int(n.split('.')[1])) / self.args.n_layer <25> m[n] = (p * 0.0) + (layer_scale ** 0.7) <26> else: <27> m[n] = p <28> print() <29> else: <30> if 'mamba' in os.environ["RWKV_MY_TESTING"]: <31> m[n] = p <32> if n == "emb.weight": <33> scale = -1e-4 <34> nn.init.uniform_(m[n], a=scale, b=-scale) <35> print(f" [scale {scale}]") <36> elif n == "head.weight": <37> if shape[0] > shape[1]: <38> gain = math.sqrt(shape[0</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 scale = 0.5 nn.init.orthogonal_(m[n], gain=gain * scale) print(f" [scale {scale}]") elif '.out_proj.weight' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") elif '.bias' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") else: print() else: if n == "emb.weight": scale = -1e-4 else: if shape[0] > shape[1]: gain = math.sqrt(shape[0] / shape[1]) zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] for kk in zero: if kk in n: scale = 0 if n == "head.weight": scale = 0.5 if "head_k." in n: scale = 0.1 if "head_q." in n: scale = 0 for kk in [".att.key."]: if kk in n: scale = 0.1 for kk in [".ffn.key."]: if kk in n: scale = 0.5 for kk in [".att.gate."]: if kk in n: scale = 0.1 print(f" [scale {scale}]") if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((shape[0], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0],</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 2 <s>], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() n_params += m[n].numel() # if n == "emb.weight": # print(m[n]) print('model params', n_params) gc.collect() torch.cuda.empty_cache() return m ===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV.__init__ self.args = args at: math sqrt(x: SupportsFloat, /) -> float at: os environ = _createenviron() at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.init uniform_(tensor: Tensor, a: float=0., b: float=1.) -> Tensor zeros_(tensor: Tensor) -> Tensor ===========unchanged ref 1=========== orthogonal_(tensor, gain=1)
RWKV-v5.src.model/RWKV_Tmix_x060.__init__
Modified
BlinkDL~RWKV-LM
2142cb78e9bca400991008400b257176788badb4
MODEL_TYPE "x060a-f4" (sometimes better)
# module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.head_size = args.head_size_a <5> self.n_head = args.dim_att // self.head_size <6> assert args.dim_att % self.n_head == 0 <7> <8> with torch.no_grad(): <9> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <10> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <11> ddd = torch.ones(1, 1, args.n_embd) <12> for i in range(args.n_embd): <13> ddd[0, 0, i] = i / args.n_embd <14> <15> # fancy time_mix <16> self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <17> self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <18> self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <19> self.time_maa_v = nn.Parameter(1.0 - (torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)) <20> self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0)) <21> self.time_maa_g = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): # offset: 1 TIME_MIX_EXTRA_DIM = 32 # generate TIME_MIX for w,k,v,r,g self.time_maa_w1 = nn.Parameter(torch.zeros(args.n_embd, TIME_MIX_EXTRA_DIM*5)) self.time_maa_w2 = nn.Parameter(torch.zeros(5, TIME_MIX_EXTRA_DIM, args.n_embd).uniform_(-0.01, 0.01)) # fancy time_decay decay_speed = torch.ones(args.dim_att) for n in range(args.dim_att): decay_speed[n] = -6 + 5 * (n / (args.dim_att - 1)) ** (0.7 + 1.3 * ratio_0_to_1) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) TIME_DECAY_EXTRA_DIM = 64 self.time_decay_w1 = nn.Parameter(torch.zeros(args.n_embd, TIME_DECAY_EXTRA_DIM)) self.time_decay_w2 = nn.Parameter(torch.zeros(TIME_DECAY_EXTRA_DIM, args.dim_att).uniform_(-0.01, 0.01)) tmp = torch.zeros(args.dim_att) for n in range(args.dim_att): zigzag = ((n + 1) % 3 - 1) * 0.1 tmp[n] = ratio_0_to_1 * (1 - (n / (args.dim_att - 1))) + zigzag self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size)) self.time_shift = nn.ZeroPad2d((0</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): def __init__(self, args, layer_id): # offset: 2 <s>(self.n_head, self.head_size)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.gate = nn.Linear(args.n_embd, args.dim_att, bias=False) self.ln_x = nn.GroupNorm(self.n_head, args.dim_att, eps=(1e-5)*(args.head_size_divisor**2)) ===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor ===========unchanged ref 1=========== zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.jit._script.ScriptModule __init__() __init__(self, arg=None) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...)
RWKV-v5.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
2142cb78e9bca400991008400b257176788badb4
MODEL_TYPE "x060a-f4" (sometimes better)
<16>:<add> if 'x060a' in os.environ["RWKV_MY_TESTING"]: <del> if 'x060' in os.environ["RWKV_MY_TESTING"]: <17>:<add> self.att = RWKV_Tmix_x060a(args, layer_id) <add> elif 'x060' in os.environ["RWKV_MY_TESTING"]:
# module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> if 'x060' in os.environ["RWKV_MY_TESTING"]: <17> self.att = RWKV_Tmix_x060(args, layer_id) <18> elif 'x052' in os.environ["RWKV_MY_TESTING"]: <19> self.att = RWKV_Tmix_x052(args, layer_id) <20> elif 'mamba' in os.environ["RWKV_MY_TESTING"]: <21> self.att = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params <22> <23> if 'g' in os.environ["RWKV_MY_TESTING"]: <24> self.ffn = MishGLU(args, layer_id) <25> elif</s>
===========below chunk 0=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 self.ffn = RWKV_CMix_x060(args, layer_id) elif 'x052' in os.environ["RWKV_MY_TESTING"]: self.ffn = RWKV_CMix_x052(args, layer_id) elif 'mamba' in os.environ["RWKV_MY_TESTING"]: self.ffn = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: self.tiny_ln = nn.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout) ===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.jit._script.ScriptModule __init__() __init__(self, arg=None) ===========unchanged ref 1=========== at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g, w = self.jit_func(x) + x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) - x + + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) + + xw = x + xx * (self.time_maa_w + mw) + xk = x + xx * (self.time_maa_k + mk) + xv = x + xx * (self.time_maa_v + mv) + xr = x + xx * (self.time_maa_r + mr) + xg = x + xx * (self.time_maa_g + mg) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = torch.tanh(xg @ self.gate_w1) @ self.gate_w2 + + ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 + w = self.time_decay + ww + + return r, k, v, g, w +
RWKV-v5.src.model/RWKV.__init__
Modified
BlinkDL~RWKV-LM
2142cb78e9bca400991008400b257176788badb4
MODEL_TYPE "x060a-f4" (sometimes better)
<5>:<add> if '-f4' in os.environ["RWKV_MY_TESTING"]: <add> args.dim_ffn = int((args.n_embd * 4) // 32 * 32) <add> else: <add> args.dim_ffn = int((args.n_embd * 3.5) // 32 * 32) # default = 3.5x emb size <del> args.dim_ffn = int((args.n_embd * 3.5) // 32 * 32) # default = 3.5x emb size
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def __init__(self, args): <0> super().__init__() <1> self.args = args <2> if not hasattr(args, 'dim_att'): <3> args.dim_att = args.n_embd <4> if not hasattr(args, 'dim_ffn'): <5> args.dim_ffn = int((args.n_embd * 3.5) // 32 * 32) # default = 3.5x emb size <6> if not hasattr(args, 'tiny_att_layer'): <7> args.tiny_att_layer = -1 <8> if not hasattr(args, 'tiny_att_dim'): <9> args.tiny_att_dim = -1 <10> assert args.n_embd % 32 == 0 <11> assert args.dim_att % 32 == 0 <12> assert args.dim_ffn % 32 == 0 <13> <14> self.emb = nn.Embedding(args.vocab_size, args.n_embd) <15> <16> self.blocks = nn.ModuleList([Block(args, i) for i in range(args.n_layer)]) <17> <18> self.ln_out = nn.LayerNorm(args.n_embd) <19> self.head = nn.Linear(args.n_embd, args.vocab_size, bias=False) <20> <21> if args.head_qk > 0: <22> self.head_q = nn.Linear(args.n_embd, args.head_qk, bias=False) <23> self.head_k = nn.Linear(args.n_embd, args.head_qk, bias=False) <24> self.register_buffer("copy_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) <25> if args.dropout > 0: <26> self.drop0 = nn.Dropout(p = args.dropout) <27>
===========unchanged ref 0=========== at: torch._C._VariableFunctions zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g, w = self.jit_func(x) + x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) - x + + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) + + xw = x + xx * (self.time_maa_w + mw) + xk = x + xx * (self.time_maa_k + mk) + xv = x + xx * (self.time_maa_v + mv) + xr = x + xx * (self.time_maa_r + mr) + xg = x + xx * (self.time_maa_g + mg) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = torch.tanh(xg @ self.gate_w1) @ self.gate_w2 + + ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 + w = self.time_decay + ww + + return r, k, v, g, w + ===========changed ref 3=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ln1 = nn.LayerNorm(args.n_embd) self.ln2 = nn.LayerNorm(args.n_embd) if self.layer_id == 0: self.ln0 = nn.LayerNorm(args.n_embd) if args.my_pos_emb > 0: self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) if self.layer_id == 0 and self.args.pre_ffn > 0: self.ffnPre = RWKV_ChannelMix(args, 0) else: + if 'x060a' in os.environ["RWKV_MY_TESTING"]: - if 'x060' in os.environ["RWKV_MY_TESTING"]: + self.att = RWKV_Tmix_x060a(args, layer_id) + elif 'x060' in os.environ["RWKV_MY_TESTING"]: self.att = RWKV_Tmix_x060(args, layer_id) elif 'x052' in os.environ["RWKV_MY_TESTING"]: self.att = RWKV_Tmix_x052(args, layer_id) elif 'mamba' in os.environ["RWKV_MY_TESTING"]: self.att = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rw</s>
RWKV-v5.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
2142cb78e9bca400991008400b257176788badb4
MODEL_TYPE "x060a-f4" (sometimes better)
<22>:<add> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n or n.endswith('_w') or n.endswith('_w1') or n.endswith('_w2') or n.endswith('_bias'): <del> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n:
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> n_params = 0 <11> for n in self.state_dict(): <12> p = self.state_dict()[n] <13> shape = p.shape <14> <15> s0 = str(shape[0]) if len(shape) > 0 else "" <16> s1 = str(shape[1]) if len(shape) > 1 else "" <17> s2 = str(shape[2]) if len(shape) > 2 else "" <18> print(f"{s0.ljust(5)} {s1.ljust(5)} {s2.ljust(5)} {n}", end="") <19> <20> gain = 1.0 <21> scale = 1.0 <22> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n: <23> if 'ln_x.weight' in n: <24> layer_scale = (1+int(n.split('.')[1])) / self.args.n_layer <25> m[n] = (p * 0.0) + (layer_scale ** 0.7) <26> else: <27> m[n] = p <28> print() <29> else: <30> if 'mamba' in os.environ["RWKV_MY_TESTING"]: <31> m[n] = p <32> if n == "emb.weight": <33> scale = -1e-4 <34> nn.init.uniform_(m[n], a=scale, b=-scale) <35> print(f" [scale {scale}]") <36> elif n == "head.weight": <37> if shape[1] > shape[0]: # !!! only for pytorch where linear layer weight</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 gain = math.sqrt(shape[1] / shape[0]) nn.init.orthogonal_(m[n], gain=gain * scale) print(f" [scale {scale}]") elif '.out_proj.weight' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") elif '.bias' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") else: print() else: if n == "emb.weight": scale = -1e-4 else: assert n.endswith('.weight') if shape[1] > shape[0]: # !!! only for pytorch where linear layer weight is transposed !!! gain = math.sqrt(shape[1] / shape[0]) zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] for kk in zero: if kk in n: scale = 0 if "head_k." in n: scale = 0.1 if "head_q." in n: scale = 0 for kk in [".att.key."]: if kk in n: scale = 0.1 for kk in [".att.gate."]: if kk in n: scale = 0.1 print(f" [scale {scale}]") if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((shape[0], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) </s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 2 <s> device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() n_params += m[n].numel() # if n == "emb.weight": # print(m[n]) print('model params', n_params) gc.collect() torch.cuda.empty_cache() return m ===========changed ref 0=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g, w = self.jit_func(x) + x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060a(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) - x + + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) + + xw = x + xx * (self.time_maa_w + mw) + xk = x + xx * (self.time_maa_k + mk) + xv = x + xx * (self.time_maa_v + mv) + xr = x + xx * (self.time_maa_r + mr) + xg = x + xx * (self.time_maa_g + mg) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = torch.tanh(xg @ self.gate_w1) @ self.gate_w2 + + ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 + w = self.time_decay + ww + + return r, k, v, g, w +
RWKV-v5.src.model/WKV_6.forward
Modified
BlinkDL~RWKV-LM
ffb089b9b21063bb4c247f900b71a917556ad522
faster v6 cuda
<16>:<del> ew = (-torch.exp(w.float())).contiguous() <17>:<add> ctx.save_for_backward(r, k, v, w, u) <del> ctx.save_for_backward(r, k, v, ew, u) <19>:<add> wkv6_cuda.forward(B, T, C, H, r, k, v, w, u, y) <del> wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y)
# module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): <0> with torch.no_grad(): <1> assert r.dtype == torch.bfloat16 <2> assert k.dtype == torch.bfloat16 <3> assert v.dtype == torch.bfloat16 <4> assert w.dtype == torch.bfloat16 <5> assert u.dtype == torch.bfloat16 <6> assert HEAD_SIZE == C // H <7> ctx.B = B <8> ctx.T = T <9> ctx.C = C <10> ctx.H = H <11> assert r.is_contiguous() <12> assert k.is_contiguous() <13> assert v.is_contiguous() <14> assert w.is_contiguous() <15> assert u.is_contiguous() <16> ew = (-torch.exp(w.float())).contiguous() <17> ctx.save_for_backward(r, k, v, ew, u) <18> y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <19> wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) <20> return y <21>
===========unchanged ref 0=========== at: RWKV-v5.src.model HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) at: torch._C bfloat16: dtype = ... contiguous_format: memory_format = ... ===========unchanged ref 1=========== at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.function._SingleLevelFunction forward(ctx: Any, *args: Any, **kwargs: Any) -> Any vjp = backward at: torch.autograd.grad_mode no_grad() ===========changed ref 0=========== # module: RWKV-v5.src.model HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x060' in os.environ["RWKV_MY_TESTING"]: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() - ew = (-torch.exp(w.float())).contiguous() + ctx.save_for_backward(r, k, v, w, u) - ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + wkv6_cuda.forward(B, T</s> ===========changed ref 1=========== # module: RWKV-v5.src.model # offset: 1 <s>ch.contiguous_format)#.uniform_(-100, 100) + wkv6_cuda.forward(B, T, C, H, r, k, v, w, u, y) - wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() + r, k, v, w, u = ctx.saved_tensors - r, k, v, ew, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#</s>
RWKV-v5.src.model/WKV_6.backward
Modified
BlinkDL~RWKV-LM
ffb089b9b21063bb4c247f900b71a917556ad522
faster v6 cuda
<7>:<add> r, k, v, w, u = ctx.saved_tensors <del> r, k, v, ew, u = ctx.saved_tensors <13>:<add> wkv6_cuda.backward(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gw, gu) <del> wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu)
# module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod def backward(ctx, gy): <0> with torch.no_grad(): <1> assert gy.dtype == torch.bfloat16 <2> B = ctx.B <3> T = ctx.T <4> C = ctx.C <5> H = ctx.H <6> assert gy.is_contiguous() <7> r, k, v, ew, u = ctx.saved_tensors <8> gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <9> gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <10> gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <11> gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <12> gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <13> wkv6_cuda.backward(B, T, C, H, r, k, v, ew, u, gy, gr, gk, gv, gw, gu) <14> gu = torch.sum(gu, 0).view(H, C//H) <15> return (None, None, None, None, gr, gk, gv, gw, gu) <16>
===========unchanged ref 0=========== at: RWKV-v5.src.model wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) at: torch._C bfloat16: dtype = ... contiguous_format: memory_format = ... ===========unchanged ref 1=========== at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========unchanged ref 2=========== sum(input: Tensor, dim: Sequence[Union[str, ellipsis, None]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor sum(input: Tensor, *, dtype: Optional[_dtype]=None) -> Tensor sum(input: Tensor, dim: Optional[Union[_int, _size]], keepdim: _bool=False, *, dtype: Optional[_dtype]=None, out: Optional[Tensor]=None) -> Tensor at: torch.autograd.function._SingleLevelFunction backward(ctx: Any, *grad_outputs: Any) -> Any at: torch.autograd.grad_mode no_grad() ===========changed ref 0=========== # module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() - ew = (-torch.exp(w.float())).contiguous() + ctx.save_for_backward(r, k, v, w, u) - ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + wkv6_cuda.forward(B, T, C, H, r, k, v, w, u, y) - wkv6_cuda.forward(B, T, C, H, r, k, v, ew, u, y) return y ===========changed ref 1=========== # module: RWKV-v5.src.model HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x060' in os.environ["RWKV_MY_TESTING"]: wkv6_cuda = load(name="wkv6", sources=["cuda/wkv6_op.cpp", f"cuda/wkv6_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() - ew = (-torch.exp(w.float())).contiguous() + ctx.save_for_backward(r, k, v, w, u) - ctx.save_for_backward(r, k, v, ew, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + wkv6_cuda.forward(B, T</s>
RWKV-v5.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
43045b702a509eca80237d2516887a0f79dc7984
fix
<20>:<del> gain = 1.0 <29>:<add> elif n == "emb.weight": <add> m[n] = p <add> scale = -1e-4 <add> nn.init.uniform_(m[n], a=scale, b=-scale) <add> print(f" [scale {scale}]") <add> elif n == "head.weight": <add> m[n] = p <add> if self.args.vocab_size > self.args.n_embd: <add> scale = 0.5 * math.sqrt(self.args.vocab_size / self.args.n_embd) <add> else: <add> scale = 0.5 <add> nn.init.orthogonal_(m[n], gain=scale) <add> print(f" [scale {scale}]") <32>:<del> if n == "emb.weight": <33>:<del> scale = -1e-4 <34>:<del> nn.init.uniform_(m[n], a=scale, b=-scale) <35>:<del> print(f" [scale {scale
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> n_params = 0 <11> for n in self.state_dict(): <12> p = self.state_dict()[n] <13> shape = p.shape <14> <15> s0 = str(shape[0]) if len(shape) > 0 else "" <16> s1 = str(shape[1]) if len(shape) > 1 else "" <17> s2 = str(shape[2]) if len(shape) > 2 else "" <18> print(f"{s0.ljust(5)} {s1.ljust(5)} {s2.ljust(5)} {n}", end="") <19> <20> gain = 1.0 <21> scale = 1.0 <22> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n or n.endswith('_w') or n.endswith('_w1') or n.endswith('_w2') or n.endswith('_bias'): <23> if 'ln_x.weight' in n: <24> layer_scale = (1+int(n.split('.')[1])) / self.args.n_layer <25> m[n] = (p * 0.0) + (layer_scale ** 0.7) <26> else: <27> m[n] = p <28> print() <29> else: <30> if 'mamba' in os.environ["RWKV_MY_TESTING"]: <31> m[n] = p <32> if n == "emb.weight": <33> scale = -1e-4 <34> nn.init.uniform_(m[n], a=scale, b=-scale) <35> print(f" [scale {scale</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 elif n == "head.weight": if shape[1] > shape[0]: # !!! only for pytorch where linear layer weight is transposed !!! gain = math.sqrt(shape[1] / shape[0]) nn.init.orthogonal_(m[n], gain=gain * scale) print(f" [scale {scale}]") elif '.out_proj.weight' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") elif '.bias' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") else: print() else: if n == "emb.weight": scale = -1e-4 else: assert n.endswith('.weight') if shape[1] > shape[0]: # !!! only for pytorch where linear layer weight is transposed !!! gain = math.sqrt(shape[1] / shape[0]) zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] for kk in zero: if kk in n: scale = 0 if "head_k." in n: scale = 0.1 if "head_q." in n: scale = 0 for kk in [".att.key."]: if kk in n: scale = 0.1 for kk in [".att.gate."]: if kk in n: scale = 0.1 print(f" [scale {scale}]") if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 2 <s>) if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((shape[0], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=gain * scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() n_params += m[n].numel() # if n == "emb.weight": # print(m[n]) print('model params', n_params) gc.collect() torch.cuda.empty_cache() return m ===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV.__init__ self.args = args at: math sqrt(x: SupportsFloat, /) -> float at: os environ = _createenviron() at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.init uniform_(tensor: Tensor, a: float=0., b: float=1.) -> Tensor zeros_(tensor: Tensor) -> Tensor ===========unchanged ref 1=========== orthogonal_(tensor, gain=1)
RWKV-v5.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
033ce840de872dd67c42ed9f9bba08fa2d683eb7
v6.0b & prepare for state-tuning
<18>:<add> elif 'x060b' in os.environ["RWKV_MY_TESTING"]: <add> self.att = RWKV_Tmix_x060b(args, layer_id)
# module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> if 'x060a' in os.environ["RWKV_MY_TESTING"]: <17> self.att = RWKV_Tmix_x060a(args, layer_id) <18> elif 'x060' in os.environ["RWKV_MY_TESTING"]: <19> self.att = RWKV_Tmix_x060(args, layer_id) <20> elif 'x052' in os.environ["RWKV_MY_TESTING"]: <21> self.att = RWKV_Tmix_x052(args, layer_id) <22> elif 'mamba' in os.environ["RWKV_MY_TESTING"]: <23> self.att = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6</s>
===========below chunk 0=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 if 'g' in os.environ["RWKV_MY_TESTING"]: self.ffn = MishGLU(args, layer_id) elif 'x060' in os.environ["RWKV_MY_TESTING"]: self.ffn = RWKV_CMix_x060(args, layer_id) elif 'x052' in os.environ["RWKV_MY_TESTING"]: self.ffn = RWKV_CMix_x052(args, layer_id) elif 'mamba' in os.environ["RWKV_MY_TESTING"]: self.ffn = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: self.tiny_ln = nn.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout) ===========unchanged ref 0=========== at: RWKV-v5.src.model MyFunction = torch.jit.script_method MyFunction = __nop RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u) at: RWKV-v5.src.model.RWKV_Tmix_x060b.__init__ self.n_head = args.dim_att // self.head_size self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0)) self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_v = nn.Parameter(1.0 - (torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)) self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_rkvw_w1 = nn.Parameter(torch.zeros(args.n_embd, D_MIX_LORA*4)) self.time_maa_rkvw_w2 = nn.Parameter(torch.zeros(4, D_MIX_LORA, args.n_embd).uniform_(-0.01, 0.01)) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) self.time_decay_w1 = nn.Parameter(torch.zeros(args.n_embd, D_DECAY_LORA)) ===========unchanged ref 1=========== self.time_decay_w2 = nn.Parameter(torch.zeros(D_DECAY_LORA, args.dim_att).uniform_(-0.01, 0.01)) self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) at: torch._C._VariableFunctions bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor tanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) at: torch.nn.modules.normalization LayerNorm(normalized_shape: _shape_t, eps: float=1e-5, elementwise_affine: bool=True, bias: bool=True, device=None, dtype=None) ===========changed ref 0=========== # module: RWKV-v5.src.model + def RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u, s): + return WKV_6STATE.apply(B, T, C, H, r, k, v, w, u, s) + ===========changed ref 1=========== # module: RWKV-v5.src.model + + class WKV_6STATE(torch.autograd.Function): + @staticmethod + def forward(ctx, B, T, C, H, r, k, v, w, u, s): + with torch.no_grad(): + assert r.dtype == torch.bfloat16 + assert k.dtype == torch.bfloat16 + assert v.dtype == torch.bfloat16 + assert w.dtype == torch.bfloat16 + assert u.dtype == torch.bfloat16 + assert s.dtype == torch.bfloat16 + assert HEAD_SIZE == C // H + ctx.B = B + ctx.T = T + ctx.C = C + ctx.H = H + assert r.is_contiguous() + assert k.is_contiguous() + assert v.is_contiguous() + assert w.is_contiguous() + assert u.is_contiguous() + assert s.is_contiguous() + ctx.save_for_backward(r, k, v, w, u, s) + y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) + return y +
RWKV-v5.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
1a44eede5f6bd9610944d7c4787a8f2f867a12d6
state-tuning (not working yet)
<21>:<add> if os.environ["RWKV_TRAIN_TYPE"] == 'states': <add> self.att = RWKV_Tmix_x060_state(args, layer_id) <add> else: <add> self.att = RWKV_Tmix_x060(args, layer_id) <del> self.att = RWKV_Tmix_x060(args, layer_id)
# module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> if 'x060a' in os.environ["RWKV_MY_TESTING"]: <17> self.att = RWKV_Tmix_x060a(args, layer_id) <18> elif 'x060b' in os.environ["RWKV_MY_TESTING"]: <19> self.att = RWKV_Tmix_x060b(args, layer_id) <20> elif 'x060' in os.environ["RWKV_MY_TESTING"]: <21> self.att = RWKV_Tmix_x060(args, layer_id) <22> elif 'x052' in os.environ["RWKV_MY_TESTING"]: <23> self.att = RWKV_Tmix_x052(args, layer_id) <24> elif 'mamba' in os.environ["RWKV_MY_TEST</s>
===========below chunk 0=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 self.att = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params if 'g' in os.environ["RWKV_MY_TESTING"]: self.ffn = MishGLU(args, layer_id) elif 'x060' in os.environ["RWKV_MY_TESTING"]: self.ffn = RWKV_CMix_x060(args, layer_id) elif 'x052' in os.environ["RWKV_MY_TESTING"]: self.ffn = RWKV_CMix_x052(args, layer_id) elif 'mamba' in os.environ["RWKV_MY_TESTING"]: self.ffn = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: self.tiny_ln = nn.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(</s> ===========below chunk 1=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 2 <s>len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout) ===========unchanged ref 0=========== at: RWKV-v5.src.model MyModule = nn.Module MyModule = torch.jit.ScriptModule RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u) at: RWKV-v5.src.model.RWKV_Tmix_x060b jit_func_2(x) jit_func_2(self, x) at: RWKV-v5.src.model.RWKV_Tmix_x060b.__init__ self.time_faaaa = nn.Parameter(tmp.reshape(self.n_head, self.head_size)) at: RWKV-v5.src.model.RWKV_Tmix_x060b.forward B, T, C = x.size() B, T, C = x.size() B, T, C = x.size() H = self.n_head r, k, v, w = self.jit_func(x) r, k, v, w = self.jit_func(x) r, k, v, w = self.jit_func(x) r, k, v, w = self.jit_func(x) at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] __init__() __init__(self) forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] ===========changed ref 0=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g, w = self.jit_func(x) + x = RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u=self.time_faaaa, s=self.time_state) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) - x + + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) + + xw = x + xx * (self.time_maa_w + mw) + xk = x + xx * (self.time_maa_k + mk) + xv = x + xx * (self.time_maa_v + mv) + xr = x + xx * (self.time_maa_r + mr) + xg = x + xx * (self.time_maa_g + mg) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = F.silu(self.gate(xg)) + + ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 + w = self.time_decay + ww + + return r, k, v, g, w +
RWKV-v5.src.model/RWKV.configure_optimizers
Modified
BlinkDL~RWKV-LM
1a44eede5f6bd9610944d7c4787a8f2f867a12d6
state-tuning (not working yet)
<7>:<add> if not p.requires_grad: <add> continue
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): <0> args = self.args <1> <2> lr_decay = set() <3> lr_1x = set() <4> lr_2x = set() <5> lr_3x = set() <6> for n, p in self.named_parameters(): <7> if (("_w1" in n) or ("_w2" in n)) and (args.layerwise_lr > 0): <8> lr_1x.add(n) <9> elif (("time_mix" in n) or ("time_maa" in n)) and (args.layerwise_lr > 0): <10> if args.my_pile_stage == 2: <11> lr_2x.add(n) <12> else: <13> lr_1x.add(n) <14> elif (("time_decay" in n) or ("time_daaaa" in n)) and (args.layerwise_lr > 0): <15> if args.my_pile_stage == 2: <16> lr_3x.add(n) <17> else: <18> lr_2x.add(n) <19> elif ("time_faaaa" in n) and (args.layerwise_lr > 0): <20> if args.my_pile_stage == 2: <21> lr_2x.add(n) <22> else: <23> lr_1x.add(n) <24> elif ("time_first" in n) and (args.layerwise_lr > 0): <25> lr_3x.add(n) <26> elif (len(p.squeeze().shape) >= 2) and (args.weight_decay > 0): <27> lr_decay.add(n) <28> else: <29> lr_1x.add(n) <30> <31> lr_decay = sorted(list(lr_decay)) <32> lr_1x = sorted(list(lr_1x)) <33> lr_2x = sorted(list(lr_2x)) <34> lr</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 1 # print('decay', lr_decay) # print('1x', lr_1x) # print('2x', lr_2x) # print('3x', lr_3x) param_dict = {n: p for n, p in self.named_parameters()} if args.layerwise_lr > 0: if args.my_pile_stage == 2: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init}, ] else: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups = [{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}] if args.weight_decay > 0: optim_groups += [{"params": [param_dict[n] for n in lr</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 2 <s> if args.weight_decay > 0: optim_groups += [{"params": [param_dict[n] for n in lr_decay], "weight_decay": args.weight_decay, "my_lr_scale": 1.0}] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=True, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=True, amsgrad=False) else: if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False) ===========unchanged ref 0=========== at: RWKV-v5.src.model RWKV_CMix_x052(args, layer_id) ===========changed ref 0=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g, w = self.jit_func(x) + x = RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u=self.time_faaaa, s=self.time_state) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) - x + + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) + + xw = x + xx * (self.time_maa_w + mw) + xk = x + xx * (self.time_maa_k + mk) + xv = x + xx * (self.time_maa_v + mv) + xr = x + xx * (self.time_maa_r + mr) + xg = x + xx * (self.time_maa_g + mg) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = F.silu(self.gate(xg)) + + ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 + w = self.time_decay + ww + + return r, k, v, g, w +
RWKV-v5.src.dataset/MyDataset.__init__
Modified
BlinkDL~RWKV-LM
1a44eede5f6bd9610944d7c4787a8f2f867a12d6
state-tuning (not working yet)
# module: RWKV-v5.src.dataset class MyDataset(Dataset): def __init__(self, args): <0> self.args = args <1> <2> if args.data_type == "binidx": <3> self.vocab_size = args.vocab_size <4> rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") <5> <6> if args.my_pile_version == 1: <7> self.data = MMapIndexedDataset(args.data_file) <8> self.data_size = len(self.data._bin_buffer) // self.data._index._dtype_size <9> rank_zero_info(f"Data has {self.data_size} tokens.") <10> elif args.my_pile_version == 2: <11> data_list = open(args.data_file, "r", encoding='utf-8').read().strip().split('\n') <12> data_list = [i.strip().split(' ') for i in data_list] <13> self.data = [] <14> self.data_size = int(data_list[-1][-1]) <15> rank_zero_info(f"Data has {self.data_size} chunks.") <16> for d in data_list: <17> data = MMapIndexedDataset(d[0]) <18> data_size = len(data._bin_buffer) // data._index._dtype_size <19> assert (data_size - args.ctx_len) == int(d[1]) <20> self.data += [[int(d[-1]), int(d[1]), data]] <21> # rank_zero_info(self.data) <22> <23> if args.my_qa_mask > 0: <24> # self.data_pile = MMapIndexedDataset('/fsx/pile/pile_20B_tokenizer_text_document') <25> self.data_pile = MMapIndexedDataset('/fsx/pile_deduped/pile_0.87_deduped_text_document') <26> self.</s>
===========below chunk 0=========== # module: RWKV-v5.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 1 else: self.data_pile = None self.data_pile_size = 0 if args.my_pile_stage > 0: # assert self.data_size == 332115325534 and self.vocab_size == 50277 self.samples_per_epoch = args.epoch_steps * args.real_bsz assert self.samples_per_epoch == 40320 rank_zero_info(f"########## Pile 20b-tokenized stage {args.my_pile_stage} ##########") dataset_slot = self.data_size // args.ctx_len if args.my_pile_stage != 4: assert MaybeIsPrime(args.magic_prime) assert args.magic_prime % 3 == 2 assert args.magic_prime / dataset_slot > 0.99 and args.magic_prime / dataset_slot <= 1 elif args.data_type == "numpy": self.data = np.load(args.data_file).astype("int") self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens.") elif args.data_type == "uint16": self.data = np.fromfile(args.data_file, dtype=np.uint16).astype("int32").reshape(-1, args.my_sample_len) self.vocab_size = args.vocab_size rank_zero_info(f"Current vocab size = {self.vocab_size} (make sure it's correct)") self.data_size = self.data.shape[0] rank_zero_info(f"Data has {self.data_size} samples.") else: if args.</s> ===========below chunk 1=========== # module: RWKV-v5.src.dataset class MyDataset(Dataset): def __init__(self, args): # offset: 2 <s> rank_zero_info(f"Data has {self.data_size} samples.") else: if args.data_type == "dummy": rank_zero_info("Building dummy data...") self.data = "" for i in range(100000): aa = (i) % 10000 bb = (i * i) % 10000 cc = aa + bb self.data += f".{aa}+{bb}={cc}." else: self.data = open(args.data_file, "r", encoding=args.data_type).read() rank_zero_info("Building token list...") unique = sorted(list(set(self.data))) self.vocab_size = len(unique) # rank_zero_info() # for u in unique: # print(u, end=' ') # rank_zero_info('\n\n') xx = 0 xxObj = {} for u in unique: xxObj[xx] = u xx += 1 with open(f"{args.proj_dir}/vocab.json", "w", encoding="utf-8") as vocab_file: vocab_file.write(json.dumps(xxObj, ensure_ascii=False)) self.data_size = len(self.data) rank_zero_info(f"Data has {self.data_size} tokens, {self.vocab_size} vocab size.") self.stoi = {ch: i for i, ch in enumerate(unique)} self.itos = {i: ch for i, ch in enumerate(unique)} ===========unchanged ref 0=========== at: RWKV-v5.src.binidx MMapIndexedDataset(path, skip_warmup=False) at: RWKV-v5.src.binidx.MMapIndexedDataset.Index.__init__ self._dtype_size = self._dtype().itemsize at: RWKV-v5.src.binidx.MMapIndexedDataset.__init__ self._index = None self._bin_buffer = None at: RWKV-v5.src.binidx.MMapIndexedDataset._do_init self._index = self.Index(index_file_path(self._path), skip_warmup) self._bin_buffer = memoryview(self._bin_buffer_mmap) at: RWKV-v5.src.utils MaybeIsPrime(number) at: io.BufferedWriter read(self, size: Optional[int]=..., /) -> bytes at: typing.IO __slots__ = () read(n: int=...) -> AnyStr ===========changed ref 0=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + @MyFunction + def jit_func_2(self, x, g): + B, T, C = x.size() + x = x.view(B * T, C) + + x = self.ln_x(x).view(B, T, C) + x = self.output(x * g) + return x + ===========changed ref 1=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + def forward(self, x): + B, T, C = x.size() + H = self.n_head + + r, k, v, g, w = self.jit_func(x) + x = RUN_CUDA_RWKV6_STATE(B, T, C, H, r, k, v, w, u=self.time_faaaa, s=self.time_state) + + return self.jit_func_2(x, g) + ===========changed ref 2=========== # module: RWKV-v5.src.model + ######################################################################################################## + + class RWKV_Tmix_x060_state(MyModule): + @MyFunction + def jit_func(self, x): + B, T, C = x.size() + + xx = self.time_shift(x) - x + + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) + + xw = x + xx * (self.time_maa_w + mw) + xk = x + xx * (self.time_maa_k + mk) + xv = x + xx * (self.time_maa_v + mv) + xr = x + xx * (self.time_maa_r + mr) + xg = x + xx * (self.time_maa_g + mg) + + r = self.receptance(xr) + k = self.key(xk) + v = self.value(xv) + g = F.silu(self.gate(xg)) + + ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 + w = self.time_decay + ww + + return r, k, v, g, w +
RWKV-v5.src.model/RWKV.configure_optimizers
Modified
BlinkDL~RWKV-LM
5d856c2ce27709d1ef715608f4701f9605956376
still testing state-tuning...
<7>:<add> <add> # if not p.requires_grad: <del> if not p.requires_grad: <8>:<add> # continue <del> continue <9>:<add> if args.train_type == 'states': <add> if 'time_state' not in n: <add> continue <add>
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): <0> args = self.args <1> <2> lr_decay = set() <3> lr_1x = set() <4> lr_2x = set() <5> lr_3x = set() <6> for n, p in self.named_parameters(): <7> if not p.requires_grad: <8> continue <9> if (("_w1" in n) or ("_w2" in n)) and (args.layerwise_lr > 0): <10> lr_1x.add(n) <11> elif (("time_mix" in n) or ("time_maa" in n)) and (args.layerwise_lr > 0): <12> if args.my_pile_stage == 2: <13> lr_2x.add(n) <14> else: <15> lr_1x.add(n) <16> elif (("time_decay" in n) or ("time_daaaa" in n)) and (args.layerwise_lr > 0): <17> if args.my_pile_stage == 2: <18> lr_3x.add(n) <19> else: <20> lr_2x.add(n) <21> elif ("time_faaaa" in n) and (args.layerwise_lr > 0): <22> if args.my_pile_stage == 2: <23> lr_2x.add(n) <24> else: <25> lr_1x.add(n) <26> elif ("time_first" in n) and (args.layerwise_lr > 0): <27> lr_3x.add(n) <28> elif (len(p.squeeze().shape) >= 2) and (args.weight_decay > 0): <29> lr_decay.add(n) <30> else: <31> lr_1x.add(n) <32> <33> lr_decay = sorted(list(lr_decay)) <34> lr_1x = sorted(list(lr_1x)) <35> lr_2</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 1 lr_3x = sorted(list(lr_3x)) if self.trainer.is_global_zero: print('decay', lr_decay, '\n') print('1x', lr_1x, '\n') print('2x', lr_2x, '\n') print('3x', lr_3x, '\n') param_dict = {n: p for n, p in self.named_parameters()} if args.layerwise_lr > 0: if args.my_pile_stage == 2: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init}, ] else: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups = [{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 2 <s>params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}] if args.weight_decay > 0: optim_groups += [{"params": [param_dict[n] for n in lr_decay], "weight_decay": args.weight_decay, "my_lr_scale": 1.0}] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=True, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=True, amsgrad=False) else: if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False) ===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV.__init__ self.args = args
RWKV-v5.src.model/RWKV.configure_optimizers
Modified
BlinkDL~RWKV-LM
64b7fe4c66fcf7da37019630268075b0558f6dc5
better state training
<11>:<add> if 'time_sta' not in n: <del> if 'time_state' not in n: <16>:<add> elif (("time_sta" in n) and (args.weight_decay > 0)): <add> lr_decay.add(n)
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): <0> args = self.args <1> <2> lr_decay = set() <3> lr_1x = set() <4> lr_2x = set() <5> lr_3x = set() <6> for n, p in self.named_parameters(): <7> <8> # if not p.requires_grad: <9> # continue <10> if args.train_type == 'states': <11> if 'time_state' not in n: <12> continue <13> <14> if (("_w1" in n) or ("_w2" in n)) and (args.layerwise_lr > 0): <15> lr_1x.add(n) <16> elif (("time_mix" in n) or ("time_maa" in n)) and (args.layerwise_lr > 0): <17> if args.my_pile_stage == 2: <18> lr_2x.add(n) <19> else: <20> lr_1x.add(n) <21> elif (("time_decay" in n) or ("time_daaaa" in n)) and (args.layerwise_lr > 0): <22> if args.my_pile_stage == 2: <23> lr_3x.add(n) <24> else: <25> lr_2x.add(n) <26> elif ("time_faaaa" in n) and (args.layerwise_lr > 0): <27> if args.my_pile_stage == 2: <28> lr_2x.add(n) <29> else: <30> lr_1x.add(n) <31> elif ("time_first" in n) and (args.layerwise_lr > 0): <32> lr_3x.add(n) <33> elif (len(p.squeeze().shape) >= 2) and (args.weight_decay > 0): <34> lr_decay.add(n) <35> else: <36> lr_1x.add(n) <37> </s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 1 lr_1x = sorted(list(lr_1x)) lr_2x = sorted(list(lr_2x)) lr_3x = sorted(list(lr_3x)) if self.trainer.is_global_zero: print('decay', lr_decay, '\n') print('1x', lr_1x, '\n') print('2x', lr_2x, '\n') print('3x', lr_3x, '\n') param_dict = {n: p for n, p in self.named_parameters()} if args.layerwise_lr > 0: if args.my_pile_stage == 2: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init}, ] else: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups =</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 2 <s>decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups = [{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}] if args.weight_decay > 0: optim_groups += [{"params": [param_dict[n] for n in lr_decay], "weight_decay": args.weight_decay, "my_lr_scale": 1.0}] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=True, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=True, amsgrad=False) else: if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False) ===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV.__init__ self.args = args
RWKV-v5.src.trainer/my_save
Modified
BlinkDL~RWKV-LM
64b7fe4c66fcf7da37019630268075b0558f6dc5
better state training
<15>:<add> if args.train_type == 'states': <add> ddd = {} <add> for k, v in dd.items(): <add> if 'time_sta' in k: <add> ddd[k] = v.clone() <add> torch.save(ddd, ff) <add> else: <add> torch.save(dd, ff) <del> torch.save(dd, ff)
# module: RWKV-v5.src.trainer def my_save(args, trainer, dd, ff): <0> if '14b-run1' in ff: <1> fn = ff.split('/')[-1] <2> fff = '/dev/shm/' + fn <3> torch.save(dd, fff) <4> subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-14b-4k/{fn} --quiet", shell=True) <5> elif ('world/14b' in ff) or ('world/7b' in ff): <6> aa = ff.split('/')[1] <7> fn = ff.split('/')[-1] <8> fff = f'/dev/shm/{aa}-{fn}' <9> torch.save(dd, fff) <10> subprocess.Popen(f" aws s3 mv {fff} s3://rwkv-world/{aa}-{fn} --quiet", shell=True) <11> else: <12> if 'deepspeed_stage_3' in args.strategy: <13> trainer.save_checkpoint(ff, weights_only=True) <14> else: <15> torch.save(dd, ff) <16>
===========unchanged ref 0=========== at: subprocess Popen() at: torch.serialization save(obj: object, f: FILE_LIKE, pickle_module: Any=pickle, pickle_protocol: int=DEFAULT_PROTOCOL, _use_new_zipfile_serialization: bool=True, _disable_byteorder_record: bool=False) -> None ===========changed ref 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): args = self.args lr_decay = set() lr_1x = set() lr_2x = set() lr_3x = set() for n, p in self.named_parameters(): # if not p.requires_grad: # continue if args.train_type == 'states': + if 'time_sta' not in n: - if 'time_state' not in n: continue if (("_w1" in n) or ("_w2" in n)) and (args.layerwise_lr > 0): lr_1x.add(n) + elif (("time_sta" in n) and (args.weight_decay > 0)): + lr_decay.add(n) elif (("time_mix" in n) or ("time_maa" in n)) and (args.layerwise_lr > 0): if args.my_pile_stage == 2: lr_2x.add(n) else: lr_1x.add(n) elif (("time_decay" in n) or ("time_daaaa" in n)) and (args.layerwise_lr > 0): if args.my_pile_stage == 2: lr_3x.add(n) else: lr_2x.add(n) elif ("time_faaaa" in n) and (args.layerwise_lr > 0): if args.my_pile_stage == 2: lr_2x.add(n) else: lr_1x.add(n) elif ("time_first" in n) and (args.layerwise_lr > 0): lr_3x.add(n) elif (len(p.squeeze().shape) >= 2) and (args.weight_decay > 0): lr_decay.add(n) else:</s> ===========changed ref 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 1 <s>().shape) >= 2) and (args.weight_decay > 0): lr_decay.add(n) else: lr_1x.add(n) lr_decay = sorted(list(lr_decay)) lr_1x = sorted(list(lr_1x)) lr_2x = sorted(list(lr_2x)) lr_3x = sorted(list(lr_3x)) if self.trainer.is_global_zero: print('decay', lr_decay, '\n') print('1x', lr_1x, '\n') print('2x', lr_2x, '\n') print('3x', lr_3x, '\n') param_dict = {n: p for n, p in self.named_parameters()} if args.layerwise_lr > 0: if args.my_pile_stage == 2: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"params": [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 2e-3 / args.lr_init}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 5.0},# test: 3e-3 / args.lr_init}, ] else: optim_groups = [ {"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}, {"</s> ===========changed ref 2=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 2 <s> [param_dict[n] for n in lr_2x], "weight_decay": 0.0, "my_lr_scale": 2.0}, {"params": [param_dict[n] for n in lr_3x], "weight_decay": 0.0, "my_lr_scale": 3.0}, ] else: optim_groups = [{"params": [param_dict[n] for n in lr_1x], "weight_decay": 0.0, "my_lr_scale": 1.0}] if args.weight_decay > 0: optim_groups += [{"params": [param_dict[n] for n in lr_decay], "weight_decay": args.weight_decay, "my_lr_scale": 1.0}] if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=True, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=True, amsgrad=False) else: if self.deepspeed_offload: return DeepSpeedCPUAdam(optim_groups, lr=self.args.lr_init, betas=self.args.betas, eps=self.args.adam_eps, bias_correction=True, adamw_mode=False, weight_decay=0, amsgrad=False) return FusedAdam(optim_groups, lr=self.args.lr_init, betas=self.args</s> ===========changed ref 3=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def configure_optimizers(self): # offset: 3 <s>etas, eps=self.args.adam_eps, bias_correction=True, adam_w_mode=False, weight_decay=0, amsgrad=False)
RWKV-v5.src.model/RWKV_Tmix_x060c.jit_func
Modified
BlinkDL~RWKV-LM
bf86e0cec3bc876aa93305200d271fc1f0c43e92
.
<19>:<add> # k = k * (1-(-w.exp()).exp()) # for fp32 <del> k = k * (1-(-w.exp()).exp()) <20>:<add> k = k * (1-(-w.float().exp()).exp()).to(dtype=torch.bfloat16) # for bf16
# module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060c(MyModule): @MyFunction def jit_func(self, x): <0> B, T, C = x.size() <1> <2> xx = self.time_shift(x) - x <3> <4> xxx = x + xx * self.time_maa_x <5> xxx = torch.tanh(xxx @ self.time_maa_rkvw_w1).view(B*T, 4, -1).transpose(0, 1) <6> xxx = torch.bmm(xxx, self.time_maa_rkvw_w2).view(4, B, T, C) <7> <8> r, k, v, w = xxx.unbind(dim=0) <9> r = x + xx * (self.time_maa_r + r) <10> k = x + xx * (self.time_maa_k + k) <11> v = x + xx * (self.time_maa_v + v) <12> w = x + xx * (self.time_maa_w + w) <13> <14> r = self.receptance(r) <15> k = self.key(k) <16> v = self.value(v) <17> w = self.time_decay + torch.tanh(w @ self.time_decay_w1) @ self.time_decay_w2 <18> <19> k = k * (1-(-w.exp()).exp()) <20> <21> return r, k, v, w <22>
===========unchanged ref 0=========== at: RWKV-v5.src.model MyFunction = torch.jit.script_method MyFunction = __nop at: RWKV-v5.src.model.RWKV_Tmix_x060c.__init__ self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, 0.5 * ratio_1_to_almost0)) self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_v = nn.Parameter(1.0 - (torch.pow(ddd, ratio_1_to_almost0) + 0.3 * ratio_0_to_1)) self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.time_maa_rkvw_w1 = nn.Parameter(torch.zeros(args.n_embd, D_MIX_LORA*4)) self.time_maa_rkvw_w2 = nn.Parameter(torch.zeros(4, D_MIX_LORA, args.n_embd).uniform_(-0.01, 0.01)) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) self.time_decay_w1 = nn.Parameter(torch.zeros(args.n_embd, D_DECAY_LORA)) self.time_decay_w2 = nn.Parameter(torch.zeros(D_DECAY_LORA, args.dim_att).uniform_(-0.01, 0.01)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) ===========unchanged ref 1=========== self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) at: torch._C bfloat16: dtype = ... at: torch._C._VariableFunctions bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor tanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor
RWKV-v5.src.model/WKV_6.forward
Modified
BlinkDL~RWKV-LM
86affd71c53425d1abc5398f940b09fd8c2a3a99
fully compatible with torch jit
<1>:<add> B, T, C = r.size() <add> H = C // HEAD_SIZE <add> assert C % HEAD_SIZE == 0 <6>:<del> assert HEAD_SIZE == C // H <18>:<add> torch.ops.wkv6.forward(B, T, C, H, r, k, v, w, u, y) <del> wkv6_cuda.forward(B, T, C, H, r, k, v, w, u, y)
# module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod + def forward(ctx, r, k, v, w, u): - def forward(ctx, B, T, C, H, r, k, v, w, u): <0> with torch.no_grad(): <1> assert r.dtype == torch.bfloat16 <2> assert k.dtype == torch.bfloat16 <3> assert v.dtype == torch.bfloat16 <4> assert w.dtype == torch.bfloat16 <5> assert u.dtype == torch.bfloat16 <6> assert HEAD_SIZE == C // H <7> ctx.B = B <8> ctx.T = T <9> ctx.C = C <10> ctx.H = H <11> assert r.is_contiguous() <12> assert k.is_contiguous() <13> assert v.is_contiguous() <14> assert w.is_contiguous() <15> assert u.is_contiguous() <16> ctx.save_for_backward(r, k, v, w, u) <17> y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <18> wkv6_cuda.forward(B, T, C, H, r, k, v, w, u, y) <19> return y <20>
===========unchanged ref 0=========== at: RWKV-v5.src.model HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) at: torch._C bfloat16: dtype = ... contiguous_format: memory_format = ... at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.function._SingleLevelFunction forward(ctx: Any, *args: Any, **kwargs: Any) -> Any vjp = backward ===========unchanged ref 1=========== at: torch.autograd.grad_mode no_grad() ===========changed ref 0=========== # module: RWKV-v5.src.model HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'states': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state</s> ===========changed ref 1=========== # module: RWKV-v5.src.model # offset: 1 <s>bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False</s>
RWKV-v5.src.model/WKV_6.backward
Modified
BlinkDL~RWKV-LM
86affd71c53425d1abc5398f940b09fd8c2a3a99
fully compatible with torch jit
<13>:<add> torch.ops.wkv6.backward(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gw, gu) <del> wkv6_cuda.backward(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gw, gu) <15>:<add> return (gr, gk, gv, gw, gu) <del> return (None, None, None, None, gr, gk, gv, gw, gu)
# module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod def backward(ctx, gy): <0> with torch.no_grad(): <1> assert gy.dtype == torch.bfloat16 <2> B = ctx.B <3> T = ctx.T <4> C = ctx.C <5> H = ctx.H <6> assert gy.is_contiguous() <7> r, k, v, w, u = ctx.saved_tensors <8> gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <9> gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <10> gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <11> gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <12> gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) <13> wkv6_cuda.backward(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gw, gu) <14> gu = torch.sum(gu, 0).view(H, C//H) <15> return (None, None, None, None, gr, gk, gv, gw, gu) <16>
===========unchanged ref 0=========== at: RWKV-v5.src.model.WKV_6.forward y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) at: torch._C bfloat16: dtype = ... contiguous_format: memory_format = ... at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========unchanged ref 1=========== at: torch._ops ops = _Ops() at: torch.autograd.function._SingleLevelFunction backward(ctx: Any, *grad_outputs: Any) -> Any at: torch.autograd.grad_mode no_grad() ===========changed ref 0=========== # module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod + def forward(ctx, r, k, v, w, u): - def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): + B, T, C = r.size() + H = C // HEAD_SIZE + assert C % HEAD_SIZE == 0 assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 - assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ctx.save_for_backward(r, k, v, w, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + torch.ops.wkv6.forward(B, T, C, H, r, k, v, w, u, y) - wkv6_cuda.forward(B, T, C, H, r, k, v, w, u, y) return y ===========changed ref 1=========== # module: RWKV-v5.src.model HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'states': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state</s> ===========changed ref 2=========== # module: RWKV-v5.src.model # offset: 1 <s>bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state_cuda.forward(B, T, C, H, r, k, v, w, u, s, y) return y @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u, s = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gs = torch.empty((B, H, C//H, C//H), device=gy.device, requires_grad=False</s>
RWKV-v5.src.model/RUN_CUDA_RWKV6
Modified
BlinkDL~RWKV-LM
86affd71c53425d1abc5398f940b09fd8c2a3a99
fully compatible with torch jit
<0>:<add> return WKV_6.apply(r, k, v, w, u) <del> return WKV_6.apply(B, T, C, H, r, k, v, w, u)
# module: RWKV-v5.src.model + def RUN_CUDA_RWKV6(r, k, v, w, u): - def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): <0> return WKV_6.apply(B, T, C, H, r, k, v, w, u) <1>
===========unchanged ref 0=========== at: RWKV-v5.src.model.WKV_6.backward gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.sum(gu, 0).view(H, C//H) ===========changed ref 0=========== # module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod + def forward(ctx, r, k, v, w, u): - def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): + B, T, C = r.size() + H = C // HEAD_SIZE + assert C % HEAD_SIZE == 0 assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 - assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ctx.save_for_backward(r, k, v, w, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + torch.ops.wkv6.forward(B, T, C, H, r, k, v, w, u, y) - wkv6_cuda.forward(B, T, C, H, r, k, v, w, u, y) return y ===========changed ref 1=========== # module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + torch.ops.wkv6.backward(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gw, gu) - wkv6_cuda.backward(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) + return (gr, gk</s> ===========changed ref 2=========== # module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod def backward(ctx, gy): # offset: 1 <s>) gu = torch.sum(gu, 0).view(H, C//H) + return (gr, gk, gv, gw, gu) - return (None, None, None, None, gr, gk, gv, gw, gu) ===========changed ref 3=========== # module: RWKV-v5.src.model HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) if 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'states': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization", f"-D_N_={HEAD_SIZE}", f"-D_T_={int(os.environ['RWKV_CTXLEN'])}"]) class WKV_6STATE(torch.autograd.Function): @staticmethod def forward(ctx, B, T, C, H, r, k, v, w, u, s): with torch.no_grad(): assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 assert s.dtype == torch.bfloat16 assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() assert s.is_contiguous() ctx.save_for_backward(r, k, v, w, u, s) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) wkv6state</s>
RWKV-v5.src.model/RWKV_Tmix_x060.forward
Modified
BlinkDL~RWKV-LM
86affd71c53425d1abc5398f940b09fd8c2a3a99
fully compatible with torch jit
<1>:<del> H = self.n_head <3>:<del> r, k, v, g, w = self.jit_func(x) <4>:<del> x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) <5>:<add> xx = self.time_shift(x) - x <6>:<add> xxx = x + xx * self.time_maa_x <add> xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) <add> xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) <add> mw, mk, mv, mr, mg = xxx.unbind(dim=0) <del> return self.jit_func_2(x, g)
# module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): + @MyFunction def forward(self, x): <0> B, T, C = x.size() <1> H = self.n_head <2> <3> r, k, v, g, w = self.jit_func(x) <4> x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) <5> <6> return self.jit_func_2(x, g) <7>
===========unchanged ref 0=========== at: RWKV-v5.src.model MyModule = nn.Module MyModule = torch.jit.ScriptModule at: torch.jit._script.ScriptModule __jit_unused_properties__ = [ "code", "code_with_constants", "graph", "inlined_graph", "original_name", ] __init__() __init__(self) forward: Callable[..., Any] = _CachedForward() # type: ignore[assignment] ===========changed ref 0=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): - @MyFunction - def jit_func_2(self, x, g): - B, T, C = x.size() - x = x.view(B * T, C) - - x = self.ln_x(x).view(B, T, C) - x = self.output(x * g) - return x - ===========changed ref 1=========== # module: RWKV-v5.src.model + def RUN_CUDA_RWKV6(r, k, v, w, u): - def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): + return WKV_6.apply(r, k, v, w, u) - return WKV_6.apply(B, T, C, H, r, k, v, w, u) ===========changed ref 2=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): - @MyFunction - def jit_func(self, x): - B, T, C = x.size() - - xx = self.time_shift(x) - x - - xxx = x + xx * self.time_maa_x - xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) - xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) - mw, mk, mv, mr, mg = xxx.unbind(dim=0) - - xw = x + xx * (self.time_maa_w + mw) - xk = x + xx * (self.time_maa_k + mk) - xv = x + xx * (self.time_maa_v + mv) - xr = x + xx * (self.time_maa_r + mr) - xg = x + xx * (self.time_maa_g + mg) - - r = self.receptance(xr) - k = self.key(xk) - v = self.value(xv) - g = F.silu(self.gate(xg)) - - ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 - w = self.time_decay + ww - - return r, k, v, g, w - ===========changed ref 3=========== # module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod + def forward(ctx, r, k, v, w, u): - def forward(ctx, B, T, C, H, r, k, v, w, u): with torch.no_grad(): + B, T, C = r.size() + H = C // HEAD_SIZE + assert C % HEAD_SIZE == 0 assert r.dtype == torch.bfloat16 assert k.dtype == torch.bfloat16 assert v.dtype == torch.bfloat16 assert w.dtype == torch.bfloat16 assert u.dtype == torch.bfloat16 - assert HEAD_SIZE == C // H ctx.B = B ctx.T = T ctx.C = C ctx.H = H assert r.is_contiguous() assert k.is_contiguous() assert v.is_contiguous() assert w.is_contiguous() assert u.is_contiguous() ctx.save_for_backward(r, k, v, w, u) y = torch.empty((B, T, C), device=r.device, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + torch.ops.wkv6.forward(B, T, C, H, r, k, v, w, u, y) - wkv6_cuda.forward(B, T, C, H, r, k, v, w, u, y) return y ===========changed ref 4=========== # module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod def backward(ctx, gy): with torch.no_grad(): assert gy.dtype == torch.bfloat16 B = ctx.B T = ctx.T C = ctx.C H = ctx.H assert gy.is_contiguous() r, k, v, w, u = ctx.saved_tensors gr = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gk = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gv = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gw = torch.empty((B, T, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) gu = torch.empty((B, C), device=gy.device, requires_grad=False, dtype=torch.bfloat16, memory_format=torch.contiguous_format)#.uniform_(-100, 100) + torch.ops.wkv6.backward(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gw, gu) - wkv6_cuda.backward(B, T, C, H, r, k, v, w, u, gy, gr, gk, gv, gw, gu) gu = torch.sum(gu, 0).view(H, C//H) + return (gr, gk</s> ===========changed ref 5=========== # module: RWKV-v5.src.model class WKV_6(torch.autograd.Function): @staticmethod def backward(ctx, gy): # offset: 1 <s>) gu = torch.sum(gu, 0).view(H, C//H) + return (gr, gk, gv, gw, gu) - return (None, None, None, None, gr, gk, gv, gw, gu)
RWKV-v5.src.model/RWKV_Tmix_x060a.forward
Modified
BlinkDL~RWKV-LM
86affd71c53425d1abc5398f940b09fd8c2a3a99
fully compatible with torch jit
<1>:<del> H = self.n_head <3>:<del> r, k, v, g, w = self.jit_func(x) <4>:<del> x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) <5>:<add> xx = self.time_shift(x) - x <6>:<add> xxx = x + xx * self.time_maa_x <add> xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) <add> xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) <add> mw, mk, mv, mr, mg = xxx.unbind(dim=0) <del> return self.jit_func_2(x, g)
# module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060a(MyModule): + @MyFunction def forward(self, x): <0> B, T, C = x.size() <1> H = self.n_head <2> <3> r, k, v, g, w = self.jit_func(x) <4> x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) <5> <6> return self.jit_func_2(x, g) <7>
===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV_Tmix_x060b.__init__ ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========unchanged ref 1=========== pow(self: Union[Number, _complex], exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Union[Number, _complex], *, out: Optional[Tensor]=None) -> Tensor pow(input: Tensor, exponent: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060a(MyModule): - @MyFunction - def jit_func_2(self, x, g): - B, T, C = x.size() - x = x.view(B * T, C) - - x = self.ln_x(x).view(B, T, C) - x = self.output(x * g) - return x - ===========changed ref 1=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): - @MyFunction - def jit_func_2(self, x, g): - B, T, C = x.size() - x = x.view(B * T, C) - - x = self.ln_x(x).view(B, T, C) - x = self.output(x * g) - return x - ===========changed ref 2=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060a(MyModule): - @MyFunction - def jit_func(self, x): - B, T, C = x.size() - - xx = self.time_shift(x) - x - - xxx = x + xx * self.time_maa_x - xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) - xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) - mw, mk, mv, mr, mg = xxx.unbind(dim=0) - - xw = x + xx * (self.time_maa_w + mw) - xk = x + xx * (self.time_maa_k + mk) - xv = x + xx * (self.time_maa_v + mv) - xr = x + xx * (self.time_maa_r + mr) - xg = x + xx * (self.time_maa_g + mg) - - r = self.receptance(xr) - k = self.key(xk) - v = self.value(xv) - g = torch.tanh(xg @ self.gate_w1) @ self.gate_w2 - - ww = torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2 - w = self.time_decay + ww - - return r, k, v, g, w - ===========changed ref 3=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): + @MyFunction def forward(self, x): B, T, C = x.size() - H = self.n_head - r, k, v, g, w = self.jit_func(x) - x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) + xx = self.time_shift(x) - x + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) - return self.jit_func_2(x, g) ===========changed ref 4=========== # module: RWKV-v5.src.model + def RUN_CUDA_RWKV6(r, k, v, w, u): - def RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u): + return WKV_6.apply(r, k, v, w, u) - return WKV_6.apply(B, T, C, H, r, k, v, w, u)
RWKV-v5.src.model/RWKV_Tmix_x060b.forward
Modified
BlinkDL~RWKV-LM
86affd71c53425d1abc5398f940b09fd8c2a3a99
fully compatible with torch jit
<1>:<del> H = self.n_head <3>:<del> r, k, v, w = self.jit_func(x) <4>:<del> x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) <5>:<add> xx = self.time_shift(x) - x <6>:<add> xxx = x + xx * self.time_maa_x <add> xxx = torch.tanh(xxx @ self.time_maa_rkvw_w1).view(B*T, 4, -1).transpose(0, 1) <add> xxx = torch.bmm(xxx, self.time_maa_rkvw_w2).view(4, B, T, C) <del> return self.jit_func_2(x)
# module: RWKV-v5.src.model + ######################################################################################################## class RWKV_Tmix_x060b(MyModule): + @MyFunction def forward(self, x): <0> B, T, C = x.size() <1> H = self.n_head <2> <3> r, k, v, w = self.jit_func(x) <4> x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) <5> <6> return self.jit_func_2(x) <7>
===========unchanged ref 0=========== at: RWKV-v5.src.model.RWKV_Tmix_x060c.__init__ ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ===========unchanged ref 1=========== zeros(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor zeros(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v5.src.model + ######################################################################################################## class RWKV_Tmix_x060b(MyModule): - @MyFunction - def jit_func_2(self, x): - x = self.ln_x(x) - x = self.output(x) - return x - ===========changed ref 1=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060a(MyModule): - @MyFunction - def jit_func_2(self, x, g): - B, T, C = x.size() - x = x.view(B * T, C) - - x = self.ln_x(x).view(B, T, C) - x = self.output(x * g) - return x - ===========changed ref 2=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060a(MyModule): + @MyFunction def forward(self, x): B, T, C = x.size() - H = self.n_head - r, k, v, g, w = self.jit_func(x) - x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) + xx = self.time_shift(x) - x + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) - return self.jit_func_2(x, g) ===========changed ref 3=========== # module: RWKV-v5.src.model + ######################################################################################################## class RWKV_Tmix_x060b(MyModule): - @MyFunction - def jit_func(self, x): - B, T, C = x.size() - - xx = self.time_shift(x) - x - - xxx = x + xx * self.time_maa_x - xxx = torch.tanh(xxx @ self.time_maa_rkvw_w1).view(B*T, 4, -1).transpose(0, 1) - xxx = torch.bmm(xxx, self.time_maa_rkvw_w2).view(4, B, T, C) - - r, k, v, w = xxx.unbind(dim=0) - r = x + xx * (self.time_maa_r + r) - k = x + xx * (self.time_maa_k + k) - v = x + xx * (self.time_maa_v + v) - w = x + xx * (self.time_maa_w + w) - - r = self.receptance(r) - k = self.key(k) - v = self.value(v) - w = self.time_decay + torch.tanh(w @ self.time_decay_w1) @ self.time_decay_w2 - return r, k, v, w - ===========changed ref 4=========== # module: RWKV-v5.src.model class RWKV_Tmix_x060(MyModule): - @MyFunction - def jit_func_2(self, x, g): - B, T, C = x.size() - x = x.view(B * T, C) - - x = self.ln_x(x).view(B, T, C) - x = self.output(x * g) - return x -
RWKV-v5.src.model/RWKV_Tmix_x060c.forward
Modified
BlinkDL~RWKV-LM
86affd71c53425d1abc5398f940b09fd8c2a3a99
fully compatible with torch jit
<1>:<del> H = self.n_head <3>:<del> r, k, v, w = self.jit_func(x) <4>:<del> x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) <5>:<add> xx = self.time_shift(x) - x <6>:<add> xxx = x + xx * self.time_maa_x <add> xxx = torch.tanh(xxx @ self.time_maa_rkvw_w1).view(B*T, 4, -1).transpose(0, 1) <add> xxx = torch.bmm(xxx, self.time_maa_rkvw_w2).view(4, B, T, C) <del> return self.jit_func_2(x)
# module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060c(MyModule): + @MyFunction def forward(self, x): <0> B, T, C = x.size() <1> H = self.n_head <2> <3> r, k, v, w = self.jit_func(x) <4> x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) <5> <6> return self.jit_func_2(x) <7>
===========unchanged ref 0=========== at: torch._C._VariableFunctions ones(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(size: Sequence[Union[_int, SymInt]], *, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor ones(*size: _int, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.nn.modules.padding ZeroPad2d(padding: _size_4_t) ===========changed ref 0=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060c(MyModule): - @MyFunction - def jit_func_2(self, x): - x = self.ln_x(x) - x = self.output(x) - return x - ===========changed ref 1=========== # module: RWKV-v5.src.model + ######################################################################################################## class RWKV_Tmix_x060b(MyModule): - @MyFunction - def jit_func_2(self, x): - x = self.ln_x(x) - x = self.output(x) - return x - ===========changed ref 2=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060a(MyModule): - @MyFunction - def jit_func_2(self, x, g): - B, T, C = x.size() - x = x.view(B * T, C) - - x = self.ln_x(x).view(B, T, C) - x = self.output(x * g) - return x - ===========changed ref 3=========== # module: RWKV-v5.src.model + ######################################################################################################## class RWKV_Tmix_x060b(MyModule): + @MyFunction def forward(self, x): B, T, C = x.size() - H = self.n_head - r, k, v, w = self.jit_func(x) - x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) + xx = self.time_shift(x) - x + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_rkvw_w1).view(B*T, 4, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_rkvw_w2).view(4, B, T, C) - return self.jit_func_2(x) ===========changed ref 4=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060c(MyModule): - @MyFunction - def jit_func(self, x): - B, T, C = x.size() - - xx = self.time_shift(x) - x - - xxx = x + xx * self.time_maa_x - xxx = torch.tanh(xxx @ self.time_maa_rkvw_w1).view(B*T, 4, -1).transpose(0, 1) - xxx = torch.bmm(xxx, self.time_maa_rkvw_w2).view(4, B, T, C) - - r, k, v, w = xxx.unbind(dim=0) - r = x + xx * (self.time_maa_r + r) - k = x + xx * (self.time_maa_k + k) - v = x + xx * (self.time_maa_v + v) - w = x + xx * (self.time_maa_w + w) - - r = self.receptance(r) - k = self.key(k) - v = self.value(v) - w = self.time_decay + torch.tanh(w @ self.time_decay_w1) @ self.time_decay_w2 - - # k = k * (1-(-w.exp()).exp()) # for fp32 - k = k * (1-(-w.float().exp()).exp()).to(dtype=torch.bfloat16) # for bf16 - - return r, k, v, w - ===========changed ref 5=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x060a(MyModule): + @MyFunction def forward(self, x): B, T, C = x.size() - H = self.n_head - r, k, v, g, w = self.jit_func(x) - x = RUN_CUDA_RWKV6(B, T, C, H, r, k, v, w, u=self.time_faaaa) + xx = self.time_shift(x) - x + xxx = x + xx * self.time_maa_x + xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 5, -1).transpose(0, 1) + xxx = torch.bmm(xxx, self.time_maa_w2).view(5, B, T, -1) + mw, mk, mv, mr, mg = xxx.unbind(dim=0) - return self.jit_func_2(x, g)
RWKV-v5.src.trainer/train_callback.on_train_batch_start
Modified
BlinkDL~RWKV-LM
97c8aaee96ae7dee4c86535cf0c0e92b97595ab4
Update trainer.py
# module: RWKV-v5.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): <0> args = self.args <1> # if args.cuda_cleanup > 0: <2> # torch.cuda.empty_cache() <3> real_step = trainer.global_step + args.epoch_begin * args.epoch_steps <4> <5> # LR schedule <6> w_step = args.warmup_steps <7> if args.lr_final == args.lr_init or args.epoch_count == 0: <8> lr = args.lr_init <9> else: <10> decay_step = real_step - args.my_pile_edecay * args.epoch_steps <11> decay_total = (args.epoch_count - args.my_pile_edecay) * args.epoch_steps <12> progress = (decay_step - w_step + 1) / (decay_total - w_step) <13> progress = min(1, max(0, progress)) <14> <15> if args.lr_final == 0 or args.lr_init == 0: # linear decay <16> lr = args.lr_init + (args.lr_final - args.lr_init) * progress <17> else: # exp decay <18> lr = args.lr_init * math.exp(math.log(args.lr_final / args.lr_init) * pow(progress, 1)) <19> # if trainer.is_global_zero: <20> # print(trainer.global_step, decay_step, decay_total, w_step, progress, lr) <21> <22> if args.my_exit_tokens != 0: # cosine decay <23> real_tokens = real_step * args.ctx_len * args.real_bsz <24> warmup_tokens = w_step * args.ctx_len * args.real_bsz <25> progress = (real_tokens - warmup_tokens) / (abs(args.my_exit_tokens) - warmup_tokens) <26> progress</s>
===========below chunk 0=========== # module: RWKV-v5.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 1 lr_final_factor = args.lr_final / args.lr_init lr_mult = (0.5 + lr_final_factor / 2) + (0.5 - lr_final_factor / 2) * math.cos(math.pi * progress) if args.my_exit_tokens > 0: lr = args.lr_init * lr_mult else: lr = (lr + args.lr_init * lr_mult) / 2 if progress >= 1: if (trainer.is_global_zero) or ('deepspeed_stage_3' in args.strategy): my_save( args, trainer, pl_module.state_dict(), f"{args.proj_dir}/rwkv-final.pth", ) exit(0) if trainer.global_step < w_step: lr = lr * (0.2 + 0.8 * trainer.global_step / w_step) if args.weight_decay_final > 0: wd_now = args.weight_decay * math.exp(math.log(args.weight_decay_final / args.weight_decay) * progress) else: wd_now = args.weight_decay for param_group in trainer.optimizers[0].param_groups: if param_group["weight_decay"] > 0: param_group["weight_decay"] = wd_now if args.layerwise_lr > 0: param_group["lr"] = lr * param_group["my_lr_scale"] # print(param_group["lr"], param_group["my_lr_scale"]) else: param_group["lr"] = lr trainer.my_lr = lr trainer.my_wd = wd_now # rank_zero_info(f"{real_step} {lr}")</s> ===========below chunk 1=========== # module: RWKV-v5.src.trainer class train_callback(pl.Callback): def on_train_batch_start(self, trainer, pl_module, batch, batch_idx): # offset: 2 <s> trainer.my_wd = wd_now # rank_zero_info(f"{real_step} {lr}") if trainer.global_step == 0: if trainer.is_global_zero: # logging trainer.my_loss_sum = 0 trainer.my_loss_count = 0 trainer.my_log = open(args.proj_dir + "/train_log.txt", "a") trainer.my_log.write(f"NEW RUN {args.my_timestamp}\n{vars(self.args)}\n") try: print(f"\n{trainer.strategy.config}\n") trainer.my_log.write(f"{trainer.strategy.config}\n") except: pass trainer.my_log.flush() if len(args.wandb) > 0: print("Login to wandb...") import wandb wandb.init( project=args.wandb, name=args.run_name + " " + args.my_timestamp, config=args, save_code=False, ) trainer.my_wandb = wandb ===========unchanged ref 0=========== at: RWKV-v5.src.trainer my_save(args, trainer, dd, ff) at: RWKV-v5.src.trainer.train_callback.__init__ self.args = args at: math pi: float cos(x: SupportsFloat, /) -> float exp(x: SupportsFloat, /) -> float log(x: SupportsFloat, base: SupportsFloat=...) -> float
RWKV-v7.rwkv_v7_demo/RWKV_Tmix_x070.__init__
Modified
BlinkDL~RWKV-LM
bd74d483b3531388a43fc2fc0ca7ffe68143da13
rwkv-7 rc3
# module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.head_size = args.head_size_a <5> self.n_head = args.dim_att // self.head_size <6> assert args.dim_att % self.n_head == 0 <7> <8> with torch.no_grad(): <9> ddd = torch.empty(1, 1, args.n_embd) <10> self.time_maa_x = nn.Parameter(ddd) <11> self.time_maa_r = nn.Parameter(ddd) <12> self.time_maa_w = nn.Parameter(ddd) <13> self.time_maa_k = nn.Parameter(ddd) <14> self.time_maa_v = nn.Parameter(ddd) <15> self.time_maa_a = nn.Parameter(ddd) <16> self.time_maa_g = nn.Parameter(ddd) <17> <18> decay_speed = torch.empty(args.dim_att) <19> self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) <20> <21> self.time_faaaa = nn.Parameter(torch.empty(self.n_head,self.head_size)) <22> self.time_aaaaa = nn.Parameter(torch.empty(1,1,args.dim_att)) <23> <24> D_MIX_LORA = 32 <25> self.time_maa_w1 = nn.Parameter(torch.empty(args.n_embd, D_MIX_LORA*6)) <26> self.time_maa_w2 = nn.Parameter(torch.empty(6, D_MIX_L</s>
===========below chunk 0=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): def __init__(self, args, layer_id): # offset: 1 D_DECAY_LORA = 64 self.time_decay_w1 = nn.Parameter(torch.empty(args.n_embd, D_DECAY_LORA)) self.time_decay_w2 = nn.Parameter(torch.empty(D_DECAY_LORA, args.dim_att)) D_AAA_LORA = 64 self.time_aaa_w1 = nn.Parameter(torch.empty(args.n_embd, D_AAA_LORA)) self.time_aaa_w2 = nn.Parameter(torch.empty(D_AAA_LORA, args.dim_att)) D_KKK_LORA = 64 self.time_kkk_w1 = nn.Parameter(torch.empty(args.n_embd, D_KKK_LORA)) self.time_kkk_w2 = nn.Parameter(torch.empty(D_KKK_LORA, args.dim_att)) D_GATE_LORA = 128 self.gate_w1 = nn.Parameter(torch.empty(args.n_embd, D_GATE_LORA)) self.gate_w2 = nn.Parameter(torch.empty(D_GATE_LORA, args.dim_att)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias</s> ===========below chunk 1=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): def __init__(self, args, layer_id): # offset: 2 <s>att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, args.dim_att, eps=(1e-5)*(args.head_size_divisor**2)) ===========unchanged ref 0=========== at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.nn.modules.module Module() at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] ===========unchanged ref 1=========== _non_persistent_buffers_set: Set[str] _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None __init__(self) -> None __init__() -> None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v7.rwkv_v7_demo + np.set_printoptions(precision=4, suppress=True, linewidth=200) + ''' + This will load RWKV-7 "Goose" x070.rc3-2409-2r7a-d1 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) - This will load RWKV-7 "Goose" x070.rc2-2409-2r7a-b0b4a and inference in GPT-mode (slower than RNN-mode for autoregressive generation) ''' args = types.SimpleNamespace() # model download: https://huggingface.co/BlinkDL/temp-latest-training-models/tree/main + MODEL_PATH = "/mnt/e/rwkv-x070-rc3-172m-pile-20241011-ctx4k.pth" - MODEL_PATH = "/mnt/e/rwkv-x070-rc2-172m-pile-30%trained-20240922-ctx4k.pth" args.n_layer = 12 args.ctx_len = 4096 args.n_embd = 768 args.vocab_size = 50304 # "pile" model: 50277 padded to 50304
RWKV-v7.rwkv_v7_demo/RWKV_Tmix_x070.forward
Modified
BlinkDL~RWKV-LM
bd74d483b3531388a43fc2fc0ca7ffe68143da13
rwkv-7 rc3
# module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): def forward(self, x): <0> B, T, C = x.size() <1> H = self.n_head <2> xx = self.time_shift(x) - x <3> <4> xxx = x + xx * self.time_maa_x <5> xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 6, -1).transpose(0, 1) <6> xxx = torch.bmm(xxx, self.time_maa_w2).view(6, B, T, -1) <7> mr, mw, mk, mv, ma, mg = xxx.unbind(dim=0) <8> <9> xr = x + xx * (self.time_maa_r + mr) <10> xw = x + xx * (self.time_maa_w + mw) <11> xk = x + xx * (self.time_maa_k + mk) <12> xv = x + xx * (self.time_maa_v + mv) <13> xa = x + xx * (self.time_maa_a + ma) <14> xg = x + xx * (self.time_maa_g + mg) <15> <16> r = self.receptance(xr) <17> w = -F.softplus(-(self.time_decay + torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2)) - 0.5 # soft-clamp to (-inf, -0.5) <18> k = self.key(xk) <19> v = self.value(xv) <20> g = torch.tanh(xg @ self.gate_w1) @ self.gate_w2 <21> <22> kk = k + torch.tanh(xk @ self.time_kkk_w1) @ self.time</s>
===========below chunk 0=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): def forward(self, x): # offset: 1 kk = F.normalize(kk, dim=-1, p=2.0) a = torch.sigmoid( self.time_aaaaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2 ) * 2.0 # a is "in-context learning rate" k = k * torch.clamp(w*0.5,max=0).exp() x = RUN_CUDA_RWKV7(r, w, k, v, -kk, kk*a) x = self.ln_x(x.view(B * T, C)).view(B, T, C) x = x + ((r.view(B,T,H,-1)*k.view(B,T,H,-1)*self.time_faaaa).sum(dim=-1, keepdim=True) * v.view(B,T,H,-1)).view(B,T,C) x = self.output(x * g) return x ===========unchanged ref 0=========== at: RWKV-v7.rwkv_v7_demo.RWKV_Tmix_x070.__init__ self.n_head = args.dim_att // self.head_size self.time_maa_x = nn.Parameter(ddd) self.time_maa_r = nn.Parameter(ddd) self.time_maa_w = nn.Parameter(ddd) self.time_maa_k = nn.Parameter(ddd) self.time_maa_v = nn.Parameter(ddd) self.time_maa_a = nn.Parameter(ddd) self.time_maa_g = nn.Parameter(ddd) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) self.time_maa_w1 = nn.Parameter(torch.empty(args.n_embd, D_MIX_LORA*6)) self.time_maa_w2 = nn.Parameter(torch.empty(6, D_MIX_LORA, args.n_embd)) self.time_decay_w1 = nn.Parameter(torch.empty(args.n_embd, D_DECAY_LORA)) self.time_decay_w2 = nn.Parameter(torch.empty(D_DECAY_LORA, args.dim_att)) self.time_kkk_w1 = nn.Parameter(torch.empty(args.n_embd, D_KKK_LORA)) self.time_kkk_w2 = nn.Parameter(torch.empty(D_KKK_LORA, args.dim_att)) self.gate_w1 = nn.Parameter(torch.empty(args.n_embd, D_GATE_LORA)) ===========unchanged ref 1=========== self.gate_w2 = nn.Parameter(torch.empty(D_GATE_LORA, args.dim_att)) at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.modules.module.Module forward: Callable[..., Any] = _forward_unimplemented at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...) ===========changed ref 0=========== # module: RWKV-v7.rwkv_v7_demo + np.set_printoptions(precision=4, suppress=True, linewidth=200) + ''' + This will load RWKV-7 "Goose" x070.rc3-2409-2r7a-d1 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) - This will load RWKV-7 "Goose" x070.rc2-2409-2r7a-b0b4a and inference in GPT-mode (slower than RNN-mode for autoregressive generation) ''' args = types.SimpleNamespace() # model download: https://huggingface.co/BlinkDL/temp-latest-training-models/tree/main + MODEL_PATH = "/mnt/e/rwkv-x070-rc3-172m-pile-20241011-ctx4k.pth" - MODEL_PATH = "/mnt/e/rwkv-x070-rc2-172m-pile-30%trained-20240922-ctx4k.pth" args.n_layer = 12 args.ctx_len = 4096 args.n_embd = 768 args.vocab_size = 50304 # "pile" model: 50277 padded to 50304
RWKV-v7.rwkv_v7_demo/RWKV_Tmix_x070.__init__
Modified
BlinkDL~RWKV-LM
c453b42656baa206cf5e9441482c97448c0d221a
rwkv-7 rc4
<24>:<add> ### TOO MANY LORAs HERE. I WILL REMOVE MOST OF THEM IN RWKV-7 FINAL :) ### <add>
# module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.head_size = args.head_size_a <5> self.n_head = args.dim_att // self.head_size <6> assert args.dim_att % self.n_head == 0 <7> <8> with torch.no_grad(): <9> ddd = torch.empty(1, 1, args.n_embd) <10> self.time_maa_x = nn.Parameter(ddd) <11> self.time_maa_r = nn.Parameter(ddd) <12> self.time_maa_w = nn.Parameter(ddd) <13> self.time_maa_k = nn.Parameter(ddd) <14> self.time_maa_v = nn.Parameter(ddd) <15> self.time_maa_a = nn.Parameter(ddd) <16> self.time_maa_g = nn.Parameter(ddd) <17> <18> decay_speed = torch.empty(args.dim_att) <19> self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) <20> <21> self.time_faaaa = nn.Parameter(torch.empty(self.n_head,self.head_size)) <22> self.time_aaaaa = nn.Parameter(torch.empty(1,1,args.dim_att)) <23> <24> D_MIX_LORA = 32 <25> self.time_maa_w1 = nn.Parameter(torch.empty(args.n_embd, D_MIX_LORA*6)) <26> self.time_maa_w2 = nn.Parameter(torch.empty(6, D_MIX_L</s>
===========below chunk 0=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): def __init__(self, args, layer_id): # offset: 1 D_DECAY_LORA = 64 self.time_decay_w1 = nn.Parameter(torch.empty(args.n_embd, D_DECAY_LORA)) self.time_decay_w2 = nn.Parameter(torch.empty(D_DECAY_LORA, args.dim_att)) D_AAA_LORA = 64 self.time_aaa_w1 = nn.Parameter(torch.empty(args.n_embd, D_AAA_LORA)) self.time_aaa_w2 = nn.Parameter(torch.empty(D_AAA_LORA, args.dim_att)) D_KKK_LORA = 64 self.time_kkk_w1 = nn.Parameter(torch.empty(args.n_embd, D_KKK_LORA)) self.time_kkk_w2 = nn.Parameter(torch.empty(D_KKK_LORA, args.dim_att)) D_GATE_LORA = 128 self.gate_w1 = nn.Parameter(torch.empty(args.n_embd, D_GATE_LORA)) self.gate_w2 = nn.Parameter(torch.empty(D_GATE_LORA, args.dim_att)) D_MK_LORA = 16 self.mk_w1 = nn.Parameter(torch.empty(args.n_embd, D_MK_LORA)) self.mk_w2 = nn.Parameter(torch.empty(D_MK_LORA, args.dim_att)) D_MA_LORA = 16 self.ma_w1 = nn.Parameter(tor</s> ===========below chunk 1=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): def __init__(self, args, layer_id): # offset: 2 <s>.dim_att)) D_MA_LORA = 16 self.ma_w1 = nn.Parameter(torch.empty(args.n_embd, D_MA_LORA)) self.ma_w2 = nn.Parameter(torch.empty(D_MA_LORA, args.dim_att)) self.time_misc_k = nn.Parameter(torch.empty(1,1,args.n_embd)) self.time_misc_a = nn.Parameter(torch.empty(1,1,args.n_embd)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, args.dim_att, eps=(1e-5)*(args.head_size_divisor**2)) ===========unchanged ref 0=========== at: torch._C._VariableFunctions empty(size: _size, *, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(*size: _int, names: Optional[Sequence[Union[str, ellipsis, None]]], memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor empty(size: Sequence[Union[_int, SymInt]], *, memory_format: Optional[memory_format]=None, out: Optional[Tensor]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.autograd.grad_mode no_grad() at: torch.nn.modules.module.Module dump_patches: bool = False _version: int = 1 training: bool _parameters: Dict[str, Optional[Parameter]] _buffers: Dict[str, Optional[Tensor]] _non_persistent_buffers_set: Set[str] ===========unchanged ref 1=========== _backward_pre_hooks: Dict[int, Callable] _backward_hooks: Dict[int, Callable] _is_full_backward_hook: Optional[bool] _forward_hooks: Dict[int, Callable] _forward_hooks_with_kwargs: Dict[int, bool] _forward_hooks_always_called: Dict[int, bool] _forward_pre_hooks: Dict[int, Callable] _forward_pre_hooks_with_kwargs: Dict[int, bool] _state_dict_hooks: Dict[int, Callable] _load_state_dict_pre_hooks: Dict[int, Callable] _state_dict_pre_hooks: Dict[int, Callable] _load_state_dict_post_hooks: Dict[int, Callable] _modules: Dict[str, Optional['Module']] call_super_init: bool = False _compiled_call_impl : Optional[Callable] = None __init__(self) -> None __init__() -> None forward: Callable[..., Any] = _forward_unimplemented __call__ : Callable[..., Any] = _wrapped_call_impl T_destination = TypeVar('T_destination', bound=Dict[str, Any]) at: torch.nn.parameter Parameter(data: Tensor=..., requires_grad: builtins.bool=...)
RWKV-v7.rwkv_v7_demo/RWKV_Tmix_x070.forward
Modified
BlinkDL~RWKV-LM
c453b42656baa206cf5e9441482c97448c0d221a
rwkv-7 rc4
<20>:<add> if self.layer_id == 0: <add> v0 = v <add> else: <add> v = v + (v0 - v) * torch.sigmoid(self.time_misc_v + (xv @ self.mv_w1) @ self.mv_w2) <add> g = torch.sigmoid(xg @ self.gate_w1) @ self.gate_w2 <del> g = torch.tanh(xg @ self.gate_w1) @ self.gate_w2
# module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): + def forward(self, x, v0): - def forward(self, x): <0> B, T, C = x.size() <1> H = self.n_head <2> xx = self.time_shift(x) - x <3> <4> xxx = x + xx * self.time_maa_x <5> xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 6, -1).transpose(0, 1) <6> xxx = torch.bmm(xxx, self.time_maa_w2).view(6, B, T, -1) <7> mr, mw, mk, mv, ma, mg = xxx.unbind(dim=0) <8> <9> xr = x + xx * (self.time_maa_r + mr) <10> xw = x + xx * (self.time_maa_w + mw) <11> xk = x + xx * (self.time_maa_k + mk) <12> xv = x + xx * (self.time_maa_v + mv) <13> xa = x + xx * (self.time_maa_a + ma) <14> xg = x + xx * (self.time_maa_g + mg) <15> <16> r = self.receptance(xr) <17> w = -F.softplus(-(self.time_decay + torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2)) - 0.5 # soft-clamp to (-inf, -0.5) <18> k = self.key(xk) <19> v = self.value(xv) <20> g = torch.tanh(xg @ self.gate_w1) @ self.gate_w2 <21> <22> kk = k + torch.tanh(xk @ self</s>
===========below chunk 0=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): + def forward(self, x, v0): - def forward(self, x): # offset: 1 kk = F.normalize(kk.view(B,T,H,-1), dim=-1, p=2.0).view(B,T,C) a = torch.sigmoid( self.time_aaaaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2 ) # a is "in-context learning rate" ma = torch.sigmoid(self.time_misc_a + (xa @ self.ma_w1) @ self.ma_w2) k = k * ma + k*a * (1 - ma) mk = torch.sigmoid(self.time_misc_k + (xk @ self.mk_w1) @ self.mk_w2) k = k * torch.clamp(w*mk, max=0).exp() x = RUN_CUDA_RWKV7(r, w, k, v, -kk, kk*a) x = self.ln_x(x.view(B * T, C)).view(B, T, C) x = x + ((r.view(B,T,H,-1)*k.view(B,T,H,-1)*self.time_faaaa).sum(dim=-1, keepdim=True) * v.view(B,T,H,-1)).view(B,T,C) x = self.output(x * g) return x ===========unchanged ref 0=========== at: RWKV-v7.rwkv_v7_demo.RWKV_Tmix_x070.__init__ self.layer_id = layer_id self.n_head = args.dim_att // self.head_size self.time_maa_x = nn.Parameter(ddd) self.time_maa_r = nn.Parameter(ddd) self.time_maa_w = nn.Parameter(ddd) self.time_maa_k = nn.Parameter(ddd) self.time_maa_v = nn.Parameter(ddd) self.time_maa_a = nn.Parameter(ddd) self.time_maa_g = nn.Parameter(ddd) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att)) self.time_aaaaa = nn.Parameter(torch.empty(1,1,args.dim_att)) self.time_maa_w1 = nn.Parameter(torch.empty(args.n_embd, D_MIX_LORA*6)) self.time_maa_w2 = nn.Parameter(torch.empty(6, D_MIX_LORA, args.n_embd)) self.time_decay_w1 = nn.Parameter(torch.empty(args.n_embd, D_DECAY_LORA)) self.time_decay_w2 = nn.Parameter(torch.empty(D_DECAY_LORA, args.dim_att)) self.time_aaa_w1 = nn.Parameter(torch.empty(args.n_embd, D_AAA_LORA)) self.time_aaa_w2 = nn.Parameter(torch.empty(D_AAA_LORA, args.dim_att)) ===========unchanged ref 1=========== self.time_kkk_w1 = nn.Parameter(torch.empty(args.n_embd, D_KKK_LORA)) self.time_kkk_w2 = nn.Parameter(torch.empty(D_KKK_LORA, args.dim_att)) self.gate_w1 = nn.Parameter(torch.empty(args.n_embd, D_GATE_LORA)) self.gate_w2 = nn.Parameter(torch.empty(D_GATE_LORA, args.dim_att)) self.ma_w1 = nn.Parameter(torch.empty(args.n_embd, D_MA_LORA)) self.ma_w2 = nn.Parameter(torch.empty(D_MA_LORA, args.dim_att)) self.mv_w1 = nn.Parameter(torch.empty(args.n_embd, D_MV_LORA)) self.mv_w2 = nn.Parameter(torch.empty(D_MV_LORA, args.dim_att)) self.time_misc_a = nn.Parameter(torch.empty(1,1,args.n_embd)) self.time_misc_v = nn.Parameter(torch.empty(1,1,args.n_embd)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) at: torch._C._VariableFunctions bmm(input: Tensor, mat2: Tensor, *, out: Optional[Tensor]=None) -> Tensor sigmoid(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor tanh(input: Tensor, *, out: Optional[Tensor]=None) -> Tensor at: torch._C._nn softplus(input: Tensor, beta: int=..., threshold: int=...) -> Tensor ===========unchanged ref 2=========== at: torch.nn.functional normalize(input: Tensor, p: float=..., dim: int=..., eps: float=..., out: Optional[Tensor]=...) -> Tensor at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) at: torch.nn.modules.module.Module forward: Callable[..., Any] = _forward_unimplemented at: torch.nn.modules.normalization GroupNorm(num_groups: int, num_channels: int, eps: float=1e-5, affine: bool=True, device=None, dtype=None) ===========changed ref 0=========== # module: RWKV-v7.rwkv_v7_demo np.set_printoptions(precision=4, suppress=True, linewidth=200) ''' + This will load RWKV-7 "Goose" x070.rc4-2411 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) - This will load RWKV-7 "Goose" x070.rc3-2409-2r7a-d1 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) ''' args = types.SimpleNamespace() # model download: https://huggingface.co/BlinkDL/temp-latest-training-models/tree/main + MODEL_PATH = "/mnt/e/rwkv-x070-rc4-172m-pile-20241115-ctx4k.pth" - MODEL_PATH = "/mnt/e/rwkv-x070-rc3-172m-pile-20241011-ctx4k.pth" args.n_layer = 12 args.ctx_len = 4096 args.n_embd = 768 args.vocab_size = 50304 # "pile" model: 50277 padded to 50304
RWKV-v7.rwkv_v7_demo/Block.__init__
Modified
BlinkDL~RWKV-LM
c453b42656baa206cf5e9441482c97448c0d221a
rwkv-7 rc4
<11>:<add> self.ffn = RWKV_CMix_x070(args, layer_id) <del> self.ffn = RWKV_CMix_x060(args, layer_id)
# module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV Block ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> <10> self.att = RWKV_Tmix_x070(args, layer_id) <11> self.ffn = RWKV_CMix_x060(args, layer_id) <12>
===========unchanged ref 0=========== at: torch.nn.modules.module Module() at: torch.nn.modules.module.Module __init__(self) -> None __init__() -> None at: torch.nn.modules.normalization LayerNorm(normalized_shape: _shape_t, eps: float=1e-5, elementwise_affine: bool=True, bias: bool=True, device=None, dtype=None) ===========changed ref 0=========== # module: RWKV-v7.rwkv_v7_demo + + ######################################################################################################## + # RWKV ChannelMix + ######################################################################################################## + + class RWKV_CMix_x070(nn.Module): + def forward(self, x): + xx = self.time_shift(x) - x + + k = x + xx * self.time_maa_k + k = torch.relu(self.key(k)) ** 2 + return self.value(k) + ===========changed ref 1=========== # module: RWKV-v7.rwkv_v7_demo - - ######################################################################################################## - # RWKV ChannelMix - ######################################################################################################## - - class RWKV_CMix_x060(nn.Module): - def forward(self, x): - xx = self.time_shift(x) - x - xk = x + xx * self.time_maa_k - xr = x + xx * self.time_maa_r - - k = self.key(xk) - k = torch.relu(k) ** 2 - kv = self.value(k) - return torch.sigmoid(self.receptance(xr)) * kv - ===========changed ref 2=========== # module: RWKV-v7.rwkv_v7_demo + + ######################################################################################################## + # RWKV ChannelMix + ######################################################################################################## + + class RWKV_CMix_x070(nn.Module): + def __init__(self, args, layer_id): + super().__init__() + self.args = args + self.layer_id = layer_id + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + + with torch.no_grad(): + self.time_maa_k = nn.Parameter(torch.empty(1, 1, args.n_embd)) + + self.key = nn.Linear(args.n_embd, args.n_embd * 4, bias=False) + self.value = nn.Linear(args.n_embd * 4, args.n_embd, bias=False) + ===========changed ref 3=========== # module: RWKV-v7.rwkv_v7_demo - - ######################################################################################################## - # RWKV ChannelMix - ######################################################################################################## - - class RWKV_CMix_x060(nn.Module): - def __init__(self, args, layer_id): - super().__init__() - self.args = args - self.layer_id = layer_id - self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) - - with torch.no_grad(): - ddd = torch.empty(1, 1, args.n_embd) - self.time_maa_k = nn.Parameter(ddd) - self.time_maa_r = nn.Parameter(ddd) - - self.key = nn.Linear(args.n_embd, args.dim_ffn, bias=False) - self.receptance = nn.Linear(args.n_embd, args.n_embd, bias=False) - self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False) - ===========changed ref 4=========== # module: RWKV-v7.rwkv_v7_demo np.set_printoptions(precision=4, suppress=True, linewidth=200) ''' + This will load RWKV-7 "Goose" x070.rc4-2411 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) - This will load RWKV-7 "Goose" x070.rc3-2409-2r7a-d1 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) ''' args = types.SimpleNamespace() # model download: https://huggingface.co/BlinkDL/temp-latest-training-models/tree/main + MODEL_PATH = "/mnt/e/rwkv-x070-rc4-172m-pile-20241115-ctx4k.pth" - MODEL_PATH = "/mnt/e/rwkv-x070-rc3-172m-pile-20241011-ctx4k.pth" args.n_layer = 12 args.ctx_len = 4096 args.n_embd = 768 args.vocab_size = 50304 # "pile" model: 50277 padded to 50304 ===========changed ref 5=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV TimeMix ######################################################################################################## class RWKV_Tmix_x070(nn.Module): + def forward(self, x, v0): - def forward(self, x): B, T, C = x.size() H = self.n_head xx = self.time_shift(x) - x xxx = x + xx * self.time_maa_x xxx = torch.tanh(xxx @ self.time_maa_w1).view(B*T, 6, -1).transpose(0, 1) xxx = torch.bmm(xxx, self.time_maa_w2).view(6, B, T, -1) mr, mw, mk, mv, ma, mg = xxx.unbind(dim=0) xr = x + xx * (self.time_maa_r + mr) xw = x + xx * (self.time_maa_w + mw) xk = x + xx * (self.time_maa_k + mk) xv = x + xx * (self.time_maa_v + mv) xa = x + xx * (self.time_maa_a + ma) xg = x + xx * (self.time_maa_g + mg) r = self.receptance(xr) w = -F.softplus(-(self.time_decay + torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2)) - 0.5 # soft-clamp to (-inf, -0.5) k = self.key(xk) v = self.value(xv) + if self.layer_id == 0: + v0 = v + else: + v = v + (v0 - v) * torch.sigmoid(self.time_misc_v + (xv @ self.mv_w1) @ self.mv_</s>
RWKV-v7.rwkv_v7_demo/Block.forward
Modified
BlinkDL~RWKV-LM
c453b42656baa206cf5e9441482c97448c0d221a
rwkv-7 rc4
<3>:<add> xx, v0 = self.att(self.ln1(x), v0) <del> x = x + self.att(self.ln1(x)) <4>:<add> x = x + xx <6>:<add> # if RESCALE_LAYER > 0: <del> if RESCALE_LAYER > 0: <7>:<add> # if (self.layer_id+1) % RESCALE_LAYER == 0: <del> if (self.layer_id+1) % RESCALE_LAYER == 0: <8>:<add> # x = x / 2 <del> x = x / 2 <12>:<add> return x, v0 <del> return x
# module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV Block ######################################################################################################## class Block(nn.Module): + def forward(self, x, v0): - def forward(self, x): <0> if self.layer_id == 0: <1> x = self.ln0(x) <2> <3> x = x + self.att(self.ln1(x)) <4> x = x + self.ffn(self.ln2(x)) <5> <6> if RESCALE_LAYER > 0: <7> if (self.layer_id+1) % RESCALE_LAYER == 0: <8> x = x / 2 <9> # if self.layer_id == args.n_layer-1: <10> # print(torch.min(x).item(), torch.max(x).item()) <11> <12> return x <13>
===========unchanged ref 0=========== at: RWKV-v7.rwkv_v7_demo RWKV_CMix_x070(args, layer_id) at: RWKV-v7.rwkv_v7_demo.Block.__init__ self.layer_id = layer_id self.ln1 = nn.LayerNorm(args.n_embd) self.ln2 = nn.LayerNorm(args.n_embd) self.att = RWKV_Tmix_x070(args, layer_id) at: torch.nn.modules.module.Module forward: Callable[..., Any] = _forward_unimplemented at: torch.nn.modules.normalization LayerNorm(normalized_shape: _shape_t, eps: float=1e-5, elementwise_affine: bool=True, bias: bool=True, device=None, dtype=None) ===========changed ref 0=========== # module: RWKV-v7.rwkv_v7_demo + + ######################################################################################################## + # RWKV ChannelMix + ######################################################################################################## + + class RWKV_CMix_x070(nn.Module): + def forward(self, x): + xx = self.time_shift(x) - x + + k = x + xx * self.time_maa_k + k = torch.relu(self.key(k)) ** 2 + return self.value(k) + ===========changed ref 1=========== # module: RWKV-v7.rwkv_v7_demo - - ######################################################################################################## - # RWKV ChannelMix - ######################################################################################################## - - class RWKV_CMix_x060(nn.Module): - def forward(self, x): - xx = self.time_shift(x) - x - xk = x + xx * self.time_maa_k - xr = x + xx * self.time_maa_r - - k = self.key(xk) - k = torch.relu(k) ** 2 - kv = self.value(k) - return torch.sigmoid(self.receptance(xr)) * kv - ===========changed ref 2=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV Block ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ln1 = nn.LayerNorm(args.n_embd) self.ln2 = nn.LayerNorm(args.n_embd) if self.layer_id == 0: self.ln0 = nn.LayerNorm(args.n_embd) self.att = RWKV_Tmix_x070(args, layer_id) + self.ffn = RWKV_CMix_x070(args, layer_id) - self.ffn = RWKV_CMix_x060(args, layer_id) ===========changed ref 3=========== # module: RWKV-v7.rwkv_v7_demo + + ######################################################################################################## + # RWKV ChannelMix + ######################################################################################################## + + class RWKV_CMix_x070(nn.Module): + def __init__(self, args, layer_id): + super().__init__() + self.args = args + self.layer_id = layer_id + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + + with torch.no_grad(): + self.time_maa_k = nn.Parameter(torch.empty(1, 1, args.n_embd)) + + self.key = nn.Linear(args.n_embd, args.n_embd * 4, bias=False) + self.value = nn.Linear(args.n_embd * 4, args.n_embd, bias=False) + ===========changed ref 4=========== # module: RWKV-v7.rwkv_v7_demo - - ######################################################################################################## - # RWKV ChannelMix - ######################################################################################################## - - class RWKV_CMix_x060(nn.Module): - def __init__(self, args, layer_id): - super().__init__() - self.args = args - self.layer_id = layer_id - self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) - - with torch.no_grad(): - ddd = torch.empty(1, 1, args.n_embd) - self.time_maa_k = nn.Parameter(ddd) - self.time_maa_r = nn.Parameter(ddd) - - self.key = nn.Linear(args.n_embd, args.dim_ffn, bias=False) - self.receptance = nn.Linear(args.n_embd, args.n_embd, bias=False) - self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False) - ===========changed ref 5=========== # module: RWKV-v7.rwkv_v7_demo np.set_printoptions(precision=4, suppress=True, linewidth=200) ''' + This will load RWKV-7 "Goose" x070.rc4-2411 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) - This will load RWKV-7 "Goose" x070.rc3-2409-2r7a-d1 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) ''' args = types.SimpleNamespace() # model download: https://huggingface.co/BlinkDL/temp-latest-training-models/tree/main + MODEL_PATH = "/mnt/e/rwkv-x070-rc4-172m-pile-20241115-ctx4k.pth" - MODEL_PATH = "/mnt/e/rwkv-x070-rc3-172m-pile-20241011-ctx4k.pth" args.n_layer = 12 args.ctx_len = 4096 args.n_embd = 768 args.vocab_size = 50304 # "pile" model: 50277 padded to 50304
RWKV-v7.rwkv_v7_demo/RWKV.forward
Modified
BlinkDL~RWKV-LM
c453b42656baa206cf5e9441482c97448c0d221a
rwkv-7 rc4
<2>:<add> v0 = torch.empty_like(x) <3>:<add> x, v0 = block(x, v0) <del> x = block(x)
# module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV Model ######################################################################################################## class RWKV(nn.Module): def forward(self, idx): <0> x = self.emb(idx) <1> <2> for block in self.blocks: <3> x = block(x) <4> <5> x = self.ln_out(x) <6> x = self.head(x) <7> <8> return x <9>
===========unchanged ref 0=========== at: RWKV-v7.rwkv_v7_demo Block(args, layer_id) at: RWKV-v7.rwkv_v7_demo.RWKV.__init__ self.emb = nn.Embedding(args.vocab_size, args.n_embd) at: torch._C._VariableFunctions empty_like(input: Tensor, *, memory_format: Optional[memory_format]=None, dtype: Optional[_dtype]=None, layout: Optional[_layout]=None, device: Optional[Union[_device, str, None]]=None, pin_memory: Optional[_bool]=False, requires_grad: Optional[_bool]=False) -> Tensor at: torch.nn.modules.container ModuleList(modules: Optional[Iterable[Module]]=None) at: torch.nn.modules.linear Linear(in_features: int, out_features: int, bias: bool=True, device=None, dtype=None) at: torch.nn.modules.module.Module forward: Callable[..., Any] = _forward_unimplemented at: torch.nn.modules.normalization LayerNorm(normalized_shape: _shape_t, eps: float=1e-5, elementwise_affine: bool=True, bias: bool=True, device=None, dtype=None) ===========changed ref 0=========== # module: RWKV-v7.rwkv_v7_demo + + ######################################################################################################## + # RWKV ChannelMix + ######################################################################################################## + + class RWKV_CMix_x070(nn.Module): + def forward(self, x): + xx = self.time_shift(x) - x + + k = x + xx * self.time_maa_k + k = torch.relu(self.key(k)) ** 2 + return self.value(k) + ===========changed ref 1=========== # module: RWKV-v7.rwkv_v7_demo - - ######################################################################################################## - # RWKV ChannelMix - ######################################################################################################## - - class RWKV_CMix_x060(nn.Module): - def forward(self, x): - xx = self.time_shift(x) - x - xk = x + xx * self.time_maa_k - xr = x + xx * self.time_maa_r - - k = self.key(xk) - k = torch.relu(k) ** 2 - kv = self.value(k) - return torch.sigmoid(self.receptance(xr)) * kv - ===========changed ref 2=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV Block ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.ln1 = nn.LayerNorm(args.n_embd) self.ln2 = nn.LayerNorm(args.n_embd) if self.layer_id == 0: self.ln0 = nn.LayerNorm(args.n_embd) self.att = RWKV_Tmix_x070(args, layer_id) + self.ffn = RWKV_CMix_x070(args, layer_id) - self.ffn = RWKV_CMix_x060(args, layer_id) ===========changed ref 3=========== # module: RWKV-v7.rwkv_v7_demo + + ######################################################################################################## + # RWKV ChannelMix + ######################################################################################################## + + class RWKV_CMix_x070(nn.Module): + def __init__(self, args, layer_id): + super().__init__() + self.args = args + self.layer_id = layer_id + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + + with torch.no_grad(): + self.time_maa_k = nn.Parameter(torch.empty(1, 1, args.n_embd)) + + self.key = nn.Linear(args.n_embd, args.n_embd * 4, bias=False) + self.value = nn.Linear(args.n_embd * 4, args.n_embd, bias=False) + ===========changed ref 4=========== # module: RWKV-v7.rwkv_v7_demo ######################################################################################################## # RWKV Block ######################################################################################################## class Block(nn.Module): + def forward(self, x, v0): - def forward(self, x): if self.layer_id == 0: x = self.ln0(x) + xx, v0 = self.att(self.ln1(x), v0) - x = x + self.att(self.ln1(x)) + x = x + xx x = x + self.ffn(self.ln2(x)) + # if RESCALE_LAYER > 0: - if RESCALE_LAYER > 0: + # if (self.layer_id+1) % RESCALE_LAYER == 0: - if (self.layer_id+1) % RESCALE_LAYER == 0: + # x = x / 2 - x = x / 2 # if self.layer_id == args.n_layer-1: # print(torch.min(x).item(), torch.max(x).item()) + return x, v0 - return x ===========changed ref 5=========== # module: RWKV-v7.rwkv_v7_demo - - ######################################################################################################## - # RWKV ChannelMix - ######################################################################################################## - - class RWKV_CMix_x060(nn.Module): - def __init__(self, args, layer_id): - super().__init__() - self.args = args - self.layer_id = layer_id - self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) - - with torch.no_grad(): - ddd = torch.empty(1, 1, args.n_embd) - self.time_maa_k = nn.Parameter(ddd) - self.time_maa_r = nn.Parameter(ddd) - - self.key = nn.Linear(args.n_embd, args.dim_ffn, bias=False) - self.receptance = nn.Linear(args.n_embd, args.n_embd, bias=False) - self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False) - ===========changed ref 6=========== # module: RWKV-v7.rwkv_v7_demo np.set_printoptions(precision=4, suppress=True, linewidth=200) ''' + This will load RWKV-7 "Goose" x070.rc4-2411 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) - This will load RWKV-7 "Goose" x070.rc3-2409-2r7a-d1 and inference in GPT-mode (slower than RNN-mode for autoregressive generation) ''' args = types.SimpleNamespace() # model download: https://huggingface.co/BlinkDL/temp-latest-training-models/tree/main + MODEL_PATH = "/mnt/e/rwkv-x070-rc4-172m-pile-20241115-ctx4k.pth" - MODEL_PATH = "/mnt/e/rwkv-x070-rc3-172m-pile-20241011-ctx4k.pth" args.n_layer = 12 args.ctx_len = 4096 args.n_embd = 768 args.vocab_size = 50304 # "pile" model: 50277 padded to 50304
RWKV-v5.src.model/RWKV_CMix_x060.__init__
Modified
BlinkDL~RWKV-LM
b44816c689f9d37448de1a64cc6a7fa44995d9f2
RWKV-7 (preview) training
<10>:<add> self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0**3)) <del> self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <11>:<add> self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0**3)) <del> self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0))
# module: RWKV-v5.src.model class RWKV_CMix_x060(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) <4> <5> with torch.no_grad(): # fancy init of time_mix <6> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <7> ddd = torch.ones(1, 1, args.n_embd) <8> for i in range(args.n_embd): <9> ddd[0, 0, i] = i / args.n_embd <10> self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <11> self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) <12> <13> self.key = nn.Linear(args.n_embd, args.dim_ffn, bias=False) <14> self.receptance = nn.Linear(args.n_embd, args.n_embd, bias=False) <15> self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False) <16>
===========changed ref 0=========== # module: RWKV-v5.src.model + def RUN_CUDA_RWKV7g(q,w,k,v,a,b): + B,T,HC = q.shape + q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] + return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) + ===========changed ref 1=========== # module: RWKV-v5.src.model + class WindBackstepping(torch.autograd.Function): + @staticmethod + def backward(ctx, dy): + assert all(i.dtype==torch.bfloat16 for i in [dy]) + assert all(i.is_contiguous() for i in [dy]) + w,q,k,v,z,b,s,sa = ctx.saved_tensors + dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] + torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) + return dw,dq,dk,dv,dz,db + ===========changed ref 2=========== # module: RWKV-v5.src.model + class WindBackstepping(torch.autograd.Function): + @staticmethod + def forward(ctx, w,q,k,v,z,b): + B,T,H,C = w.shape + assert T%CHUNK_LEN == 0 + assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) + assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) + y = torch.empty_like(v) + s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) + sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) + torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) + ctx.save_for_backward(w,q,k,v,z,b,s,sa) + return y + ===========changed ref 3=========== # module: RWKV-v5.src.model HEAD_SIZE = int(os.environ["RWKV_HEAD_SIZE_A"]) + if 'x070' in os.environ["RWKV_MY_TESTING"]: + CHUNK_LEN = 16 + + flags = ['-res-usage', f'-D_C_={HEAD_SIZE}', f"-D_CHUNK_LEN_={CHUNK_LEN}", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-device-vectorization"] + load(name="wind_backstepping", sources=[f'cuda/wkv7_cuda.cu', 'cuda/wkv7_op.cpp'], is_python_module=False, verbose=True, extra_cuda_cflags=flags) + + class WindBackstepping(torch.autograd.Function): + @staticmethod + def forward(ctx, w,q,k,v,z,b): + B,T,H,C = w.shape + assert T%CHUNK_LEN == 0 + assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) + assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) + y = torch.empty_like(v) + s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) + sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) + torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) + ctx.save_for_backward(w,q,k,v,z,b,s,sa) + return y + @staticmethod + def backward(ctx, dy): + assert all(i.dtype==torch.bfloat16 for i in [</s> ===========changed ref 4=========== # module: RWKV-v5.src.model # offset: 1 <s> + def backward(ctx, dy): + assert all(i.dtype==torch.bfloat16 for i in [dy]) + assert all(i.is_contiguous() for i in [dy]) + w,q,k,v,z,b,s,sa = ctx.saved_tensors + dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] + torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) + return dw,dq,dk,dv,dz,db + + def RUN_CUDA_RWKV7g(q,w,k,v,a,b): + B,T,HC = q.shape + q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] + return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) + + elif 'x060' in os.environ["RWKV_MY_TESTING"]: - if 'x060' in os.environ["RWKV_MY_TESTING"]: if os.environ["RWKV_TRAIN_TYPE"] == 'states': wkv6state_cuda = load(name="wkv6state", sources=["cuda/wkv6state_op.cpp", f"cuda/wkv6state_cuda.cu"], verbose=True, extra_cuda_cflags=["-res-usage", "--use_fast_math", "-O3", "-Xptxas -O3", "--extra-</s>
RWKV-v5.src.model/Block.__init__
Modified
BlinkDL~RWKV-LM
b44816c689f9d37448de1a64cc6a7fa44995d9f2
RWKV-7 (preview) training
<16>:<add> if 'x070' in os.environ["RWKV_MY_TESTING"]: <add> self.att = RWKV_Tmix_x070(args, layer_id) <add> elif 'x060a' in os.environ["RWKV_MY_TESTING"]: <del> if 'x060a' in os.environ["RWKV_MY_TESTING"]:
# module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> <4> self.ln1 = nn.LayerNorm(args.n_embd) <5> self.ln2 = nn.LayerNorm(args.n_embd) <6> <7> if self.layer_id == 0: <8> self.ln0 = nn.LayerNorm(args.n_embd) <9> if args.my_pos_emb > 0: <10> self.pos_emb_x = nn.Parameter(torch.zeros((1,args.my_pos_emb,args.n_embd))) <11> self.pos_emb_y = nn.Parameter(torch.zeros((args.my_pos_emb,1,args.n_embd))) <12> <13> if self.layer_id == 0 and self.args.pre_ffn > 0: <14> self.ffnPre = RWKV_ChannelMix(args, 0) <15> else: <16> if 'x060a' in os.environ["RWKV_MY_TESTING"]: <17> self.att = RWKV_Tmix_x060a(args, layer_id) <18> elif 'x060b' in os.environ["RWKV_MY_TESTING"]: <19> self.att = RWKV_Tmix_x060b(args, layer_id) <20> elif 'x060' in os.environ["RWKV_MY_TESTING"]: <21> if os.environ["RWKV_TRAIN_TYPE"] == 'states': <22> self.att = RWKV_Tmix_x060_state(args, layer_id) <23> else: <24> self.att = RWKV_Tmix_x060(args, layer_id) <25> elif 'x052' in os.environ["RWKV</s>
===========below chunk 0=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 1 self.att = RWKV_Tmix_x052(args, layer_id) elif 'mamba' in os.environ["RWKV_MY_TESTING"]: self.att = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params if 'g' in os.environ["RWKV_MY_TESTING"]: self.ffn = MishGLU(args, layer_id) elif 'x060' in os.environ["RWKV_MY_TESTING"]: self.ffn = RWKV_CMix_x060(args, layer_id) elif 'x052' in os.environ["RWKV_MY_TESTING"]: self.ffn = RWKV_CMix_x052(args, layer_id) elif 'mamba' in os.environ["RWKV_MY_TESTING"]: self.ffn = Mamba(d_model=args.n_embd, d_state=16, d_conv=4, expand=2.125) # match rwkv6 #params if args.tiny_att_dim > 0 and self.layer_id == args.tiny_att_layer: self.tiny_ln = nn.LayerNorm(args.n_embd) self.tiny_q = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_k = nn.Linear(args.n_embd, args.tiny_att_dim, bias=False) self.tiny_v = nn.Linear(args.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask",</s> ===========below chunk 1=========== # module: RWKV-v5.src.model ######################################################################################################## # The RWKV Model with our blocks ######################################################################################################## class Block(nn.Module): def __init__(self, args, layer_id): # offset: 2 <s>.n_embd, args.n_embd, bias=False) self.register_buffer("tiny_mask", torch.tril(torch.ones(args.ctx_len, args.ctx_len))) if args.dropout > 0: self.drop0 = nn.Dropout(p = args.dropout) self.drop1 = nn.Dropout(p = args.dropout) ===========changed ref 0=========== # module: RWKV-v5.src.model class RWKV_CMix_x060(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) with torch.no_grad(): # fancy init of time_mix ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd + self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0**3)) - self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) + self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0**3)) - self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.key = nn.Linear(args.n_embd, args.dim_ffn, bias=False) self.receptance = nn.Linear(args.n_embd, args.n_embd, bias=False) self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False) ===========changed ref 1=========== # module: RWKV-v5.src.model + def RUN_CUDA_RWKV7g(q,w,k,v,a,b): + B,T,HC = q.shape + q,w,k,v,a,b = [i.view(B,T,HC//64,64) for i in [q,w,k,v,a,b]] + return WindBackstepping.apply(w,q,k,v,a,b).view(B,T,HC) + ===========changed ref 2=========== # module: RWKV-v5.src.model + class WindBackstepping(torch.autograd.Function): + @staticmethod + def backward(ctx, dy): + assert all(i.dtype==torch.bfloat16 for i in [dy]) + assert all(i.is_contiguous() for i in [dy]) + w,q,k,v,z,b,s,sa = ctx.saved_tensors + dw,dq,dk,dv,dz,db = [torch.empty_like(x) for x in [w,q,k,v,z,b]] + torch.ops.wind_backstepping.backward(w,q,k,v,z,b, dy,s,sa, dw,dq,dk,dv,dz,db) + return dw,dq,dk,dv,dz,db + ===========changed ref 3=========== # module: RWKV-v5.src.model + class WindBackstepping(torch.autograd.Function): + @staticmethod + def forward(ctx, w,q,k,v,z,b): + B,T,H,C = w.shape + assert T%CHUNK_LEN == 0 + assert all(i.dtype==torch.bfloat16 for i in [w,q,k,v,z,b]) + assert all(i.is_contiguous() for i in [w,q,k,v,z,b]) + y = torch.empty_like(v) + s = torch.empty(B,H,T//CHUNK_LEN,C,C, dtype=torch.float32,device=w.device) + sa = torch.empty(B,T,H,C, dtype=torch.float32,device=w.device) + torch.ops.wind_backstepping.forward(w,q,k,v,z,b, y,s,sa) + ctx.save_for_backward(w,q,k,v,z,b,s,sa) + return y +
RWKV-v5.src.model/RWKV.forward
Modified
BlinkDL~RWKV-LM
b44816c689f9d37448de1a64cc6a7fa44995d9f2
RWKV-7 (preview) training
<16>:<add> if 'x070' in os.environ["RWKV_MY_TESTING"]: <add> v0 = torch.empty_like(x) <add> for block in self.blocks: <del> for block in self.blocks: <17>:<add> if args.grad_cp == 1: <del> if args.grad_cp == 1: <18>:<add> x, v0 = deepspeed.checkpointing.checkpoint(block, x, v0) <add> else: <add> x, v0 = block(x, v0) <add> else: <add> for block in self.blocks: <add> if args.grad_cp == 1: <add> x = deepspeed.checkpointing.checkpoint(block, x) <del> x = deepspeed.checkpointing.checkpoint(block, x) <19>:<add> else: <del> else: <20>:<add> x = block(x) <del> x = block(x)
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def forward(self, idx): <0> args = self.args <1> B, T = idx.size() <2> assert T <= args.ctx_len, "Cannot forward, model ctx_len is exhausted." <3> <4> x = self.emb(idx) <5> x_emb = x <6> <7> if args.dropout > 0: <8> x = self.drop0(x) <9> if args.tiny_att_dim > 0: <10> for block in self.blocks: <11> if args.grad_cp == 1: <12> x = deepspeed.checkpointing.checkpoint(block, x, x_emb) <13> else: <14> x = block(x, x_emb) <15> else: <16> for block in self.blocks: <17> if args.grad_cp == 1: <18> x = deepspeed.checkpointing.checkpoint(block, x) <19> else: <20> x = block(x) <21> <22> x = self.ln_out(x) <23> <24> if args.head_qk > 0: <25> q = self.head_q(x)[:, :T, :] <26> k = self.head_k(x)[:, :T, :] <27> c = (q @ k.transpose(-2, -1)) * (1.0 / args.head_qk) <28> c = c.masked_fill(self.copy_mask[:T, :T] == 0, 0) <29> <30> if "32" in os.environ["RWKV_FLOAT_MODE"]: <31> c = c @ F.one_hot(idx, num_classes=args.vocab_size) <32> elif os.environ["RWKV_FLOAT_MODE"] == "fp16": <33> c = c @ F.one_hot(idx, num_classes=args.vocab_size).half() <34> elif os.environ["RWKV_FLOAT_MODE"] == "bf16": <35> c = c @ F.one_hot</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def forward(self, idx): # offset: 1 x = self.head(x) + c else: x = self.head(x) return x ===========changed ref 0=========== <s>5.src.model + class RWKV_CMix_x070(MyModule): + # !!! initialize if you are using RWKV_Tmix_x070 in your code !!! + # self.key.weight.data.uniform_(-0.5/(args.n_embd**0.5), 0.5/(args.n_embd**0.5)) + # self.value.weight.data.zero_() + + @MyFunction + def forward(self, x): + xx = self.time_shift(x) - x + + k = x + xx * self.time_maa_k + k = torch.relu(self.key(k)) ** 2 + + return self.value(k) + ===========changed ref 1=========== # module: RWKV-v5.src.model + class RWKV_CMix_x070(MyModule): + def __init__(self, args, layer_id): + super().__init__() + self.args = args + self.layer_id = layer_id + self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) + + with torch.no_grad(): + ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 + ddd = torch.ones(1, 1, args.n_embd) + for i in range(args.n_embd): + ddd[0, 0, i] = i / args.n_embd + self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0**4)) + + self.key = nn.Linear(args.n_embd, args.n_embd * 4, bias=False) + self.value = nn.Linear(args.n_embd * 4, args.n_embd, bias=False) + ===========changed ref 2=========== # module: RWKV-v5.src.model class RWKV_CMix_x060(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) with torch.no_grad(): # fancy init of time_mix ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 ddd = torch.ones(1, 1, args.n_embd) for i in range(args.n_embd): ddd[0, 0, i] = i / args.n_embd + self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0**3)) - self.time_maa_k = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) + self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0**3)) - self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, ratio_1_to_almost0)) self.key = nn.Linear(args.n_embd, args.dim_ffn, bias=False) self.receptance = nn.Linear(args.n_embd, args.n_embd, bias=False) self.value = nn.Linear(args.dim_ffn, args.n_embd, bias=False) ===========changed ref 3=========== <s> self.key.weight.data.uniform_(-0.05/(args.n_embd**0.5), 0.05/(args.n_embd**0.5)) + # self.value.weight.data.uniform_(-0.5/(args.n_embd**0.5), 0.5/(args.n_embd**0.5)) + # self.output.weight.data.zero_() + + @MyFunction + def forward(self, x, v0): + B, T, C = x.size() + H = self.n_head + xx = self.time_shift(x) - x + + xr = x + xx * self.time_maa_r + xw = x + xx * self.time_maa_w + xk = x + xx * self.time_maa_k + xv = x + xx * self.time_maa_v + xa = x + xx * self.time_maa_a + xg = x + xx * self.time_maa_g + + r = self.receptance(xr) + w = -F.softplus(-(self.time_decay + torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2)) - 0.5 + k = self.key(xk) + v = self.value(xv) + if self.layer_id == 0: + v0 = v + else: + v = v + (v0 - v) * torch.sigmoid(self.time_misc_v + (xv @ self.mv_w1) @ self.mv_w2) + a = torch.sigmoid(self.time_aaaaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2) + g = torch.sigmoid(xg @ self.gate_w1) @ self.gate_w2 + + kk = k * self.time</s> ===========changed ref 4=========== <s>.data.uniform_(-0.05/(args.n_embd**0.5), 0.05/(args.n_embd**0.5)) + # self.value.weight.data.uniform_(-0.5/(args.n_embd**0.5), 0.5/(args.n_embd**0.5)) + # self.output.weight.data.zero_() + + @MyFunction + def forward(self, x, v0): # offset: 1 <s>(xg @ self.gate_w1) @ self.gate_w2 + + kk = k * self.time_misc_kkk + kk = F.normalize(kk.view(B,T,H,-1), dim=-1, p=2.0).view(B,T,C) + k = k * (1 + (a-1) * self.time_misc_a) + + x = RUN_CUDA_RWKV7g(r, w, k, v, -kk, kk*a) + x = self.ln_x(x.view(B * T, C)).view(B, T, C) + + x = x + ((r.view(B,T,H,-1)*k.view(B,T,H,-1)*self.time_faaaa).sum(dim=-1, keepdim=True) * v.view(B,T,H,-1)).view(B,T,C) + x = self.output(x * g) + return x, v0 +
RWKV-v5.src.model/RWKV_Tmix_x070.__init__
Modified
BlinkDL~RWKV-LM
6b44d244e60143635c073507a4db4808b1731a32
less params for x070
<8>:<add> H = self.n_head <add> N = self.head_size <add> C = args.n_embd <12>:<add> ddd = torch.ones(1, 1, C) <del> ddd = torch.ones(1, 1, args.n_embd) <13>:<add> for i in range(C): <del> for i in range(args.n_embd): <14>:<add> ddd[0, 0, i] = i / C <del> ddd[0, 0, i] = i / args.n_embd <16>:<del> self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, 0.6 * ratio_1_to_almost0 ** 0.9))
# module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x070(MyModule): def __init__(self, args, layer_id): <0> super().__init__() <1> self.args = args <2> self.layer_id = layer_id <3> self.my_testing = args.my_testing <4> <5> self.head_size = args.head_size_a <6> self.n_head = args.dim_att // self.head_size <7> assert args.dim_att % self.n_head == 0 <8> <9> with torch.no_grad(): <10> ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 <11> ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 <12> ddd = torch.ones(1, 1, args.n_embd) <13> for i in range(args.n_embd): <14> ddd[0, 0, i] = i / args.n_embd <15> <16> self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, 0.6 * ratio_1_to_almost0 ** 0.9)) <17> self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, 0.2 * ratio_1_to_almost0)) <18> self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, 0.9 * ratio_1_to_almost0)) <19> self.time_maa_k = nn.Parameter(1.0 - (torch.pow(ddd, 0.9 * ratio_1_to_almost0) + 0.4 * ratio_0_to_1)) <20> self.time_maa_v = nn.Parameter(1.0 - (torch.pow(ddd, 0.4 * ratio_1_to_almost0) + 0.</s>
===========below chunk 0=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x070(MyModule): def __init__(self, args, layer_id): # offset: 1 self.time_maa_a = nn.Parameter(1.0 - torch.pow(ddd, 0.9 * ratio_1_to_almost0)) self.time_maa_g = nn.Parameter(1.0 - torch.pow(ddd, 0.2 * ratio_1_to_almost0)) decay_speed = torch.ones(args.dim_att) for n in range(args.dim_att): decay_speed[n] = -7 + 5 * (n / (args.dim_att - 1)) ** (0.85 + 1.0 * ratio_0_to_1 ** 0.5) self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att) + 0.5) # !!! 0.5 comes from F.softplus !!! self.time_faaaa = nn.Parameter(torch.zeros(1,1,self.n_head,self.head_size)) def ortho_init(x, scale): with torch.no_grad(): shape = x.shape if len(shape) == 2: gain = math.sqrt(shape[0] / shape[1]) if shape[0] > shape[1] else 1 nn.init.orthogonal_(x, gain=gain * scale) elif len(shape) == 3: gain = math.sqrt(shape[1] / shape[2]) if shape[1] > shape[2] else 1 for i in range(shape[0]): nn.init.orthogonal_(x[i], gain=gain * scale) else: assert False return x D_DECAY_LORA = 64 # dim 64 for emb 768, change it for smaller/larger models self.time_decay_w1 = nn.Parameter(torch.</s> ===========below chunk 1=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x070(MyModule): def __init__(self, args, layer_id): # offset: 2 <s> emb 768, change it for smaller/larger models self.time_decay_w1 = nn.Parameter(torch.zeros(args.n_embd, D_DECAY_LORA)) self.time_decay_w2 = nn.Parameter(ortho_init(torch.zeros(D_DECAY_LORA, args.dim_att), 0.1)) D_AAA_LORA = 32 # dim 32 for emb 768, change it for smaller/larger models self.time_aaa_w1 = nn.Parameter(torch.zeros(args.n_embd, D_AAA_LORA)) self.time_aaa_w2 = nn.Parameter(ortho_init(torch.zeros(D_AAA_LORA, args.dim_att), 0.1)) self.time_aaaaa = nn.Parameter(torch.zeros(1,1,args.dim_att)) D_GATE_LORA = 128 # dim 128 for emb 768, change it for smaller/larger models self.gate_w1 = nn.Parameter(torch.zeros(args.n_embd, D_GATE_LORA)) self.gate_w2 = nn.Parameter(ortho_init(torch.zeros(D_GATE_LORA, args.dim_att), 0.1)) D_MV_LORA = 64 # dim 64 for emb 768, change it for smaller/larger models self.mv_w1 = nn.Parameter(torch.zeros(args.n_embd, D_MV_LORA)) self.mv_w2 = nn.Parameter(ortho_init(torch.zeros(D_MV_LORA</s> ===========below chunk 2=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x070(MyModule): def __init__(self, args, layer_id): # offset: 3 <s>.dim_att), 0.1)) self.time_misc_v = nn.Parameter(torch.zeros(1,1,args.n_embd)+1.0) self.time_misc_kkk = nn.Parameter(torch.ones(1,1,args.n_embd)) self.time_misc_a = nn.Parameter(torch.ones(1,1,args.n_embd)) self.time_shift = nn.ZeroPad2d((0, 0, 1, -1)) self.receptance = nn.Linear(args.n_embd, args.dim_att, bias=False) self.key = nn.Linear(args.n_embd, args.dim_att, bias=False) self.value = nn.Linear(args.n_embd, args.dim_att, bias=False) self.output = nn.Linear(args.dim_att, args.n_embd, bias=False) self.ln_x = nn.GroupNorm(self.n_head, args.dim_att, eps=(1e-5)*(args.head_size_divisor**2))
RWKV-v5.src.model/RWKV_Tmix_x070.forward
Modified
BlinkDL~RWKV-LM
6b44d244e60143635c073507a4db4808b1731a32
less params for x070
<18>:<add> v = v + (v0 - v) * torch.sigmoid(self.time_misc_v + (xv @ self.mv_w).view(B,T,H,1)).view(B,T,C) <del> v = v + (v0 - v) * torch.sigmoid(self.time_misc_v + (xv @ self.mv_w1) @ self.mv_w2) <19>:<add> a = torch.sigmoid(self.time_aaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2) <del> a = torch.sigmoid(self.time_aaaaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2) <22>:<add> kk = k * self.time_misc_k <del> kk = k * self.time
<s>.n_embd**0.5)) + # self.value.weight.data.uniform_(-0.5/(C**0.5), 0.5/(C**0.5)) - # self.value.weight.data.uniform_(-0.5/(args.n_embd**0.5), 0.5/(args.n_embd**0.5)) # self.output.weight.data.zero_() @MyFunction def forward(self, x, v0): <0> B, T, C = x.size() <1> H = self.n_head <2> xx = self.time_shift(x) - x <3> <4> xr = x + xx * self.time_maa_r <5> xw = x + xx * self.time_maa_w <6> xk = x + xx * self.time_maa_k <7> xv = x + xx * self.time_maa_v <8> xa = x + xx * self.time_maa_a <9> xg = x + xx * self.time_maa_g <10> <11> r = self.receptance(xr) <12> w = -F.softplus(-(self.time_decay + torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2)) - 0.5 <13> k = self.key(xk) <14> v = self.value(xv) <15> if self.layer_id == 0: <16> v0 = v <17> else: <18> v = v + (v0 - v) * torch.sigmoid(self.time_misc_v + (xv @ self.mv_w1) @ self.mv_w2) <19> a = torch.sigmoid(self.time_aaaaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2) <20> g = torch.sigmoid(xg @ self.gate_w1) @ self.gate_w2 <21> <22> kk = k * self.time</s>
===========below chunk 0=========== <s>**0.5)) + # self.value.weight.data.uniform_(-0.5/(C**0.5), 0.5/(C**0.5)) - # self.value.weight.data.uniform_(-0.5/(args.n_embd**0.5), 0.5/(args.n_embd**0.5)) # self.output.weight.data.zero_() @MyFunction def forward(self, x, v0): # offset: 1 kk = F.normalize(kk.view(B,T,H,-1), dim=-1, p=2.0).view(B,T,C) k = k * (1 + (a-1) * self.time_misc_a) x = RUN_CUDA_RWKV7g(r, w, k, v, -kk, kk*a) x = self.ln_x(x.view(B * T, C)).view(B, T, C) x = x + ((r.view(B,T,H,-1)*k.view(B,T,H,-1)*self.time_faaaa).sum(dim=-1, keepdim=True) * v.view(B,T,H,-1)).view(B,T,C) x = self.output(x * g) return x, v0 ===========changed ref 0=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x070(MyModule): def __init__(self, args, layer_id): super().__init__() self.args = args self.layer_id = layer_id self.my_testing = args.my_testing self.head_size = args.head_size_a self.n_head = args.dim_att // self.head_size assert args.dim_att % self.n_head == 0 + H = self.n_head + N = self.head_size + C = args.n_embd with torch.no_grad(): ratio_0_to_1 = layer_id / (args.n_layer - 1) # 0 to 1 ratio_1_to_almost0 = 1.0 - (layer_id / args.n_layer) # 1 to ~0 + ddd = torch.ones(1, 1, C) - ddd = torch.ones(1, 1, args.n_embd) + for i in range(C): - for i in range(args.n_embd): + ddd[0, 0, i] = i / C - ddd[0, 0, i] = i / args.n_embd - self.time_maa_x = nn.Parameter(1.0 - torch.pow(ddd, 0.6 * ratio_1_to_almost0 ** 0.9)) self.time_maa_r = nn.Parameter(1.0 - torch.pow(ddd, 0.2 * ratio_1_to_almost0)) self.time_maa_w = nn.Parameter(1.0 - torch.pow(ddd, 0.9 * ratio_1_to_almost0)) self.time_maa_k = nn.Parameter(1.0 - (torch.pow(ddd, 0.9 * ratio_1_to_almost0) + 0</s> ===========changed ref 1=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x070(MyModule): def __init__(self, args, layer_id): # offset: 1 <s>(1.0 - (torch.pow(ddd, 0.9 * ratio_1_to_almost0) + 0.4 * ratio_0_to_1)) self.time_maa_v = nn.Parameter(1.0 - (torch.pow(ddd, 0.4 * ratio_1_to_almost0) + 0.6 * ratio_0_to_1)) self.time_maa_a = nn.Parameter(1.0 - torch.pow(ddd, 0.9 * ratio_1_to_almost0)) self.time_maa_g = nn.Parameter(1.0 - torch.pow(ddd, 0.2 * ratio_1_to_almost0)) - - decay_speed = torch.ones(args.dim_att) - for n in range(args.dim_att): - decay_speed[n] = -7 + 5 * (n / (args.dim_att - 1)) ** (0.85 + 1.0 * ratio_0_to_1 ** 0.5) - self.time_decay = nn.Parameter(decay_speed.reshape(1,1,args.dim_att) + 0.5) # !!! 0.5 comes from F.softplus !!! - - self.time_faaaa = nn.Parameter(torch.zeros(1,1,self.n_head,self.head_size)) def ortho_init(x, scale): with torch.no_grad(): shape = x.shape if len(shape) == 2: gain = math.sqrt(shape[0] / shape[1]) if shape[0] > shape[1] else 1 nn.init.orthogonal_(x,</s> ===========changed ref 2=========== # module: RWKV-v5.src.model ######################################################################################################## class RWKV_Tmix_x070(MyModule): def __init__(self, args, layer_id): # offset: 2 <s>gain * scale) elif len(shape) == 3: gain = math.sqrt(shape[1] / shape[2]) if shape[1] > shape[2] else 1 for i in range(shape[0]): nn.init.orthogonal_(x[i], gain=gain * scale) else: assert False return x D_DECAY_LORA = 64 # dim 64 for emb 768, change it for smaller/larger models + self.time_decay_w1 = nn.Parameter(torch.zeros(C, D_DECAY_LORA)) - self.time_decay_w1 = nn.Parameter(torch.zeros(args.n_embd, D_DECAY_LORA)) + self.time_decay_w2 = nn.Parameter(ortho_init(torch.zeros(D_DECAY_LORA, C), 0.1)) - self.time_decay_w2 = nn.Parameter(ortho_init(torch.zeros(D_DECAY_LORA, args.dim_att), 0.1)) + decay_speed = torch.ones(C) + for n in range(C): + decay_speed[n] = -7 + 5 * (n / (C - 1)) ** (0.85 + 1.0 * ratio_0_to_1 ** 0.5) + self.time_decay = nn.Parameter(decay_speed.reshape(1,1,C) + 0.5) # !!! 0.5 comes from F.softplus !!! D_AAA_LORA = 32 # dim 32 for emb 768, change it for smaller/larger models + self.time_</s>
RWKV-v5.src.model/RWKV.generate_init_weight
Modified
BlinkDL~RWKV-LM
6b44d244e60143635c073507a4db4808b1731a32
less params for x070
<18>:<add> s3 = str(shape[3]) if len(shape) > 3 else "" <add> print(f"{s0.ljust(5)} {s1.ljust(5)} {s2.ljust(5)} {s3.ljust(5)} {n}", end="") <del> print(f"{s0.ljust(5)} {s1.ljust(5)} {s2.ljust(5)} {n}", end="")
# module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): <0> print( <1> f""" <2> ############################################################################ <3> # <4> # Init model weight (slow for large models)... <5> # <6> ############################################################################ <7> """ <8> ) <9> m = {} <10> n_params = 0 <11> for n in self.state_dict(): <12> p = self.state_dict()[n] <13> shape = p.shape <14> <15> s0 = str(shape[0]) if len(shape) > 0 else "" <16> s1 = str(shape[1]) if len(shape) > 1 else "" <17> s2 = str(shape[2]) if len(shape) > 2 else "" <18> print(f"{s0.ljust(5)} {s1.ljust(5)} {s2.ljust(5)} {n}", end="") <19> <20> scale = 1.0 <21> if "ln_" in n or ".ln" in n or "time_" in n or "_mask" in n or "pos_emb" in n or '.mask.' in n or n.endswith('_w') or n.endswith('_w1') or n.endswith('_w2') or n.endswith('_bias'): <22> if 'ln_x.weight' in n: <23> layer_scale = (1+int(n.split('.')[1])) / self.args.n_layer <24> m[n] = (p * 0.0) + (layer_scale ** 0.7) <25> else: <26> m[n] = p <27> print() <28> elif n == "emb.weight": <29> m[n] = p <30> scale = -1e-4 <31> nn.init.uniform_(m[n], a=scale, b=-scale) <32> print(f" [scale {scale}]") <33> elif n == "head.weight": <34> m[n] = p <35> if self.args.vocab_size > self.</s>
===========below chunk 0=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 1 scale = 0.5 * math.sqrt(self.args.vocab_size / self.args.n_embd) else: scale = 0.5 nn.init.orthogonal_(m[n], gain=scale) print(f" [scale {scale}]") else: if 'mamba' in os.environ["RWKV_MY_TESTING"]: m[n] = p if '.out_proj.weight' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") elif '.bias' in n: scale = 0 nn.init.zeros_(m[n]) print(f" [scale {scale}]") else: print() else: assert n.endswith('.weight') # should always be true zero = [".att.output.", ".ffn.value.", ".ffn.receptance.", ".ffnPre.value.", ".ffnPre.receptance.", "head_q.", '.oo.', '.rr.'] for kk in zero: if kk in n: scale = 0 if "head_k." in n: scale = 0.1 if "head_q." in n: scale = 0 for kk in [".att.key."]: if kk in n: scale = 0.1 for kk in [".att.gate."]: if kk in n: scale = 0.1 print(f" [scale {scale}]") if self.args.accelerator.upper() == "GPU": m[n] = torch.empty((shape[0], shape[1]), device="cuda") else: m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.</s> ===========below chunk 1=========== # module: RWKV-v5.src.model class RWKV(pl.LightningModule): def generate_init_weight(self): # offset: 2 <s> m[n] = torch.empty((shape[0], shape[1])) if scale == 0: nn.init.zeros_(m[n]) elif scale < 0: nn.init.uniform_(m[n], a=scale, b=-scale) else: nn.init.orthogonal_(m[n], gain=scale) m[n] = m[n].cpu() if os.environ["RWKV_FLOAT_MODE"] == "fp16": m[n] = m[n].half() elif os.environ["RWKV_FLOAT_MODE"] == "bf16": m[n] = m[n].bfloat16() n_params += m[n].numel() # if n == "emb.weight": # print(m[n]) print('model params', n_params) gc.collect() torch.cuda.empty_cache() return m ===========changed ref 0=========== <s>.n_embd**0.5)) + # self.value.weight.data.uniform_(-0.5/(C**0.5), 0.5/(C**0.5)) - # self.value.weight.data.uniform_(-0.5/(args.n_embd**0.5), 0.5/(args.n_embd**0.5)) # self.output.weight.data.zero_() @MyFunction def forward(self, x, v0): B, T, C = x.size() H = self.n_head xx = self.time_shift(x) - x xr = x + xx * self.time_maa_r xw = x + xx * self.time_maa_w xk = x + xx * self.time_maa_k xv = x + xx * self.time_maa_v xa = x + xx * self.time_maa_a xg = x + xx * self.time_maa_g r = self.receptance(xr) w = -F.softplus(-(self.time_decay + torch.tanh(xw @ self.time_decay_w1) @ self.time_decay_w2)) - 0.5 k = self.key(xk) v = self.value(xv) if self.layer_id == 0: v0 = v else: + v = v + (v0 - v) * torch.sigmoid(self.time_misc_v + (xv @ self.mv_w).view(B,T,H,1)).view(B,T,C) - v = v + (v0 - v) * torch.sigmoid(self.time_misc_v + (xv @ self.mv_w1) @ self.mv_w2) + a = torch.sigmoid(self.time_aaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2) - </s> ===========changed ref 1=========== <s>**0.5)) + # self.value.weight.data.uniform_(-0.5/(C**0.5), 0.5/(C**0.5)) - # self.value.weight.data.uniform_(-0.5/(args.n_embd**0.5), 0.5/(args.n_embd**0.5)) # self.output.weight.data.zero_() @MyFunction def forward(self, x, v0): # offset: 1 <s>self.time_aaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2) - a = torch.sigmoid(self.time_aaaaa + (xa @ self.time_aaa_w1) @ self.time_aaa_w2) g = torch.sigmoid(xg @ self.gate_w1) @ self.gate_w2 + kk = k * self.time_misc_k - kk = k * self.time_misc_kkk kk = F.normalize(kk.view(B,T,H,-1), dim=-1, p=2.0).view(B,T,C) k = k * (1 + (a-1) * self.time_misc_a) x = RUN_CUDA_RWKV7g(r, w, k, v, -kk, kk*a) x = self.ln_x(x.view(B * T, C)).view(B, T, C) x = x + ((r.view(B,T,H,-1)*k.view(B,T,H,-1)*self.time_faaaa).sum(dim=-1, keepdim=True) * v.view(B,T,H,-1)).view(B,T,C) x = self.output(x * g) return x, v0