aditeyabaral commited on
Commit
348f984
·
verified ·
1 Parent(s): aa67553

Training Step 5625

Browse files
Files changed (5) hide show
  1. README.md +10 -0
  2. config.json +5 -0
  3. model.safetensors +3 -0
  4. trainer_state.pt +3 -0
  5. training_config.json +98 -0
README.md ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - model_hub_mixin
4
+ - pytorch_model_hub_mixin
5
+ ---
6
+
7
+ This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration:
8
+ - Code: [More Information Needed]
9
+ - Paper: [More Information Needed]
10
+ - Docs: [More Information Needed]
config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "embed_size": 32,
3
+ "hidden_size": 32,
4
+ "vocab_size": 50257
5
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:088f0e0957bc54fc551512a235b1231bb92b1aad1a0e2f8044462ab3a24c2646
3
+ size 13101212
trainer_state.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eff8365666727017a6c01f58360902c1c3a87e9a885bccee32f3470f37598b11
3
+ size 26209926
training_config.json ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_hub_mixin_config": {
3
+ "optimizer_class": "adamw",
4
+ "scheduler_class": "cosine",
5
+ "input_files": null,
6
+ "dataset_subset_size": 1000000,
7
+ "block_size": 128,
8
+ "dataset_type": "fixed",
9
+ "train_ratio": 0.98,
10
+ "val_ratio": 0.01,
11
+ "encoding_name": "gpt2",
12
+ "n_heads": 12,
13
+ "n_blocks": 12,
14
+ "norm": "prenorm",
15
+ "pos_embed_type": "absolute",
16
+ "dropout": 0.1,
17
+ "embed_size": 32,
18
+ "hidden_size": 32,
19
+ "k": 3,
20
+ "num_inner_layers": 1,
21
+ "embedding_type": "full",
22
+ "checkpoint": null,
23
+ "batch_size": 64,
24
+ "num_epochs": 5,
25
+ "learning_rate": 0.0003,
26
+ "warmup_ratio": 0.1,
27
+ "log_interval_steps": 250,
28
+ "save_interval_steps": 5625,
29
+ "save_dir": "./saved_models/lstm_pico",
30
+ "save_latest": true,
31
+ "save_best": true,
32
+ "loss_metric_for_best_model": "val",
33
+ "prompt": "Once upon a",
34
+ "max_new_tokens": 50,
35
+ "top_p": 0.9,
36
+ "use_wandb": true,
37
+ "wandb_entity": "pico-llm",
38
+ "wandb_project": "training",
39
+ "wandb_name": "lstm-pico",
40
+ "upload_model_to_hub": true,
41
+ "repo_id": "pico-llm/lstm-pico",
42
+ "device": "cuda:0",
43
+ "seed": 42,
44
+ "monosemantic_analysis": true,
45
+ "num_steps": 76565
46
+ },
47
+ "hf_api": "<huggingface_hub.hf_api.HfApi object at 0x153dd3661040>",
48
+ "wandb_writer": "<wandb.sdk.wandb_run.Run object at 0x153dd36ad940>",
49
+ "wandb_table": "<wandb.sdk.data_types.table.Table object at 0x153dd36ae900>",
50
+ "optimizer": "AdamW (\nParameter Group 0\n amsgrad: False\n betas: (0.9, 0.999)\n capturable: False\n decoupled_weight_decay: True\n differentiable: False\n eps: 1e-08\n foreach: None\n fused: None\n initial_lr: 0.0003\n lr: 0.00022121120689655424\n maximize: False\n weight_decay: 0.05\n)",
51
+ "scheduler": "<torch.optim.lr_scheduler.SequentialLR object at 0x153e820e67b0>",
52
+ "optimizer_class": "adamw",
53
+ "scheduler_class": "cosine",
54
+ "model": "LSTMSeqModel(\n (embedding): Embedding(50257, 32)\n (lstm): LSTM(32, 32)\n (linear): Linear(in_features=32, out_features=50257, bias=True)\n)",
55
+ "learning_rate": 0.0003,
56
+ "_init_kwargs": {
57
+ "input_files": null,
58
+ "dataset_subset_size": 1000000,
59
+ "block_size": 128,
60
+ "dataset_type": "fixed",
61
+ "train_ratio": 0.98,
62
+ "val_ratio": 0.01,
63
+ "encoding_name": "gpt2",
64
+ "n_heads": 12,
65
+ "n_blocks": 12,
66
+ "norm": "prenorm",
67
+ "pos_embed_type": "absolute",
68
+ "dropout": 0.1,
69
+ "embed_size": 32,
70
+ "hidden_size": 32,
71
+ "k": 3,
72
+ "num_inner_layers": 1,
73
+ "embedding_type": "full",
74
+ "checkpoint": null,
75
+ "batch_size": 64,
76
+ "num_epochs": 5,
77
+ "warmup_ratio": 0.1,
78
+ "log_interval_steps": 250,
79
+ "save_interval_steps": 5625,
80
+ "save_dir": "./saved_models/lstm_pico",
81
+ "save_latest": true,
82
+ "save_best": true,
83
+ "loss_metric_for_best_model": "val",
84
+ "prompt": "Once upon a",
85
+ "max_new_tokens": 50,
86
+ "top_p": 0.9,
87
+ "use_wandb": true,
88
+ "wandb_entity": "pico-llm",
89
+ "wandb_project": "training",
90
+ "wandb_name": "lstm-pico",
91
+ "upload_model_to_hub": true,
92
+ "repo_id": "pico-llm/lstm-pico",
93
+ "device": "cuda:0",
94
+ "seed": 42,
95
+ "monosemantic_analysis": true,
96
+ "num_steps": 76565
97
+ }
98
+ }