zhangYuanHui commited on
Commit
a376b52
·
verified ·
1 Parent(s): e36da3e

Update config.json

Browse files
Files changed (1) hide show
  1. config.json +73 -156
config.json CHANGED
@@ -28,165 +28,82 @@
28
  "vision_config": {
29
  "_name_or_path": "openai/clip-vit-base-patch16",
30
  "architectures": [
31
- "CLIPModel"
32
  ],
33
  "logit_scale_init_value": 2.6592,
34
- "model_type": "clip",
35
- "projection_dim": 512,
36
- "text_config": {
37
- "_name_or_path": "",
38
- "add_cross_attention": false,
39
- "architectures": null,
40
- "attention_dropout": 0.0,
41
- "bad_words_ids": null,
42
- "begin_suppress_tokens": null,
43
- "bos_token_id": 0,
44
- "chunk_size_feed_forward": 0,
45
- "cross_attention_hidden_size": null,
46
- "decoder_start_token_id": null,
47
- "diversity_penalty": 0.0,
48
- "do_sample": false,
49
- "dropout": 0.0,
50
- "early_stopping": false,
51
- "encoder_no_repeat_ngram_size": 0,
52
- "eos_token_id": 2,
53
- "exponential_decay_length_penalty": null,
54
- "finetuning_task": null,
55
- "forced_bos_token_id": null,
56
- "forced_eos_token_id": null,
57
- "hidden_act": "quick_gelu",
58
- "hidden_size": 512,
59
- "id2label": {
60
- "0": "LABEL_0",
61
- "1": "LABEL_1"
62
- },
63
- "initializer_factor": 1.0,
64
- "initializer_range": 0.02,
65
- "intermediate_size": 2048,
66
- "is_decoder": false,
67
- "is_encoder_decoder": false,
68
- "label2id": {
69
- "LABEL_0": 0,
70
- "LABEL_1": 1
71
- },
72
- "layer_norm_eps": 1e-05,
73
- "length_penalty": 1.0,
74
- "max_length": 20,
75
- "max_position_embeddings": 77,
76
- "min_length": 0,
77
- "model_type": "clip_text_model",
78
- "no_repeat_ngram_size": 0,
79
- "num_attention_heads": 8,
80
- "num_beam_groups": 1,
81
- "num_beams": 1,
82
- "num_hidden_layers": 12,
83
- "num_return_sequences": 1,
84
- "output_attentions": false,
85
- "output_hidden_states": false,
86
- "output_scores": false,
87
- "pad_token_id": 1,
88
- "prefix": null,
89
- "problem_type": null,
90
- "projection_dim": 512,
91
- "pruned_heads": {},
92
- "remove_invalid_values": false,
93
- "repetition_penalty": 1.0,
94
- "return_dict": true,
95
- "return_dict_in_generate": false,
96
- "sep_token_id": null,
97
- "suppress_tokens": null,
98
- "task_specific_params": null,
99
- "temperature": 1.0,
100
- "tf_legacy_loss": false,
101
- "tie_encoder_decoder": false,
102
- "tie_word_embeddings": true,
103
- "tokenizer_class": null,
104
- "top_k": 50,
105
- "top_p": 1.0,
106
- "torch_dtype": null,
107
- "torchscript": false,
108
- "typical_p": 1.0,
109
- "use_bfloat16": false,
110
- "vocab_size": 49408
111
- },
112
  "torch_dtype": "float32",
113
- "vision_config": {
114
- "_name_or_path": "",
115
- "add_cross_attention": false,
116
- "architectures": null,
117
- "attention_dropout": 0.0,
118
- "bad_words_ids": null,
119
- "begin_suppress_tokens": null,
120
- "bos_token_id": null,
121
- "chunk_size_feed_forward": 0,
122
- "cross_attention_hidden_size": null,
123
- "decoder_start_token_id": null,
124
- "diversity_penalty": 0.0,
125
- "do_sample": false,
126
- "dropout": 0.0,
127
- "early_stopping": false,
128
- "encoder_no_repeat_ngram_size": 0,
129
- "eos_token_id": null,
130
- "exponential_decay_length_penalty": null,
131
- "finetuning_task": null,
132
- "forced_bos_token_id": null,
133
- "forced_eos_token_id": null,
134
- "hidden_act": "quick_gelu",
135
- "hidden_size": 768,
136
- "id2label": {
137
- "0": "LABEL_0",
138
- "1": "LABEL_1"
139
- },
140
- "image_size": 224,
141
- "initializer_factor": 1.0,
142
- "initializer_range": 0.02,
143
- "intermediate_size": 3072,
144
- "is_decoder": false,
145
- "is_encoder_decoder": false,
146
- "label2id": {
147
- "LABEL_0": 0,
148
- "LABEL_1": 1
149
- },
150
- "layer_norm_eps": 1e-05,
151
- "length_penalty": 1.0,
152
- "max_length": 20,
153
- "min_length": 0,
154
- "model_type": "clip_vision_model",
155
- "no_repeat_ngram_size": 0,
156
- "num_attention_heads": 12,
157
- "num_beam_groups": 1,
158
- "num_beams": 1,
159
- "num_channels": 3,
160
- "num_hidden_layers": 12,
161
- "num_return_sequences": 1,
162
- "output_attentions": false,
163
- "output_hidden_states": false,
164
- "output_scores": false,
165
- "pad_token_id": null,
166
- "patch_size": 16,
167
- "prefix": null,
168
- "problem_type": null,
169
- "projection_dim": 512,
170
- "pruned_heads": {},
171
- "remove_invalid_values": false,
172
- "repetition_penalty": 1.0,
173
- "return_dict": true,
174
- "return_dict_in_generate": false,
175
- "sep_token_id": null,
176
- "suppress_tokens": null,
177
- "task_specific_params": null,
178
- "temperature": 1.0,
179
- "tf_legacy_loss": false,
180
- "tie_encoder_decoder": false,
181
- "tie_word_embeddings": true,
182
- "tokenizer_class": null,
183
- "top_k": 50,
184
- "top_p": 1.0,
185
- "torch_dtype": null,
186
- "torchscript": false,
187
- "typical_p": 1.0,
188
- "use_bfloat16": false
189
- }
190
  },
191
  "vision_feature_layer": -2,
192
  "vision_feature_select_strategy": "default"
 
28
  "vision_config": {
29
  "_name_or_path": "openai/clip-vit-base-patch16",
30
  "architectures": [
31
+ "CLIPVisionModel"
32
  ],
33
  "logit_scale_init_value": 2.6592,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  "torch_dtype": "float32",
35
+ "add_cross_attention": false,
36
+ "attention_dropout": 0.0,
37
+ "bad_words_ids": null,
38
+ "begin_suppress_tokens": null,
39
+ "bos_token_id": null,
40
+ "chunk_size_feed_forward": 0,
41
+ "cross_attention_hidden_size": null,
42
+ "decoder_start_token_id": null,
43
+ "diversity_penalty": 0.0,
44
+ "do_sample": false,
45
+ "dropout": 0.0,
46
+ "early_stopping": false,
47
+ "encoder_no_repeat_ngram_size": 0,
48
+ "eos_token_id": null,
49
+ "exponential_decay_length_penalty": null,
50
+ "finetuning_task": null,
51
+ "forced_bos_token_id": null,
52
+ "forced_eos_token_id": null,
53
+ "hidden_act": "quick_gelu",
54
+ "hidden_size": 768,
55
+ "id2label": {
56
+ "0": "LABEL_0",
57
+ "1": "LABEL_1"
58
+ },
59
+ "image_size": 224,
60
+ "initializer_factor": 1.0,
61
+ "initializer_range": 0.02,
62
+ "intermediate_size": 3072,
63
+ "is_decoder": false,
64
+ "is_encoder_decoder": false,
65
+ "label2id": {
66
+ "LABEL_0": 0,
67
+ "LABEL_1": 1
68
+ },
69
+ "layer_norm_eps": 1e-05,
70
+ "length_penalty": 1.0,
71
+ "max_length": 20,
72
+ "min_length": 0,
73
+ "model_type": "clip_vision_model",
74
+ "no_repeat_ngram_size": 0,
75
+ "num_attention_heads": 12,
76
+ "num_beam_groups": 1,
77
+ "num_beams": 1,
78
+ "num_channels": 3,
79
+ "num_hidden_layers": 12,
80
+ "num_return_sequences": 1,
81
+ "output_attentions": false,
82
+ "output_hidden_states": false,
83
+ "output_scores": false,
84
+ "pad_token_id": null,
85
+ "patch_size": 16,
86
+ "prefix": null,
87
+ "problem_type": null,
88
+ "projection_dim": 512,
89
+ "pruned_heads": {},
90
+ "remove_invalid_values": false,
91
+ "repetition_penalty": 1.0,
92
+ "return_dict": true,
93
+ "return_dict_in_generate": false,
94
+ "sep_token_id": null,
95
+ "suppress_tokens": null,
96
+ "task_specific_params": null,
97
+ "temperature": 1.0,
98
+ "tf_legacy_loss": false,
99
+ "tie_encoder_decoder": false,
100
+ "tie_word_embeddings": true,
101
+ "tokenizer_class": null,
102
+ "top_k": 50,
103
+ "top_p": 1.0,
104
+ "torchscript": false,
105
+ "typical_p": 1.0,
106
+ "use_bfloat16": false
 
 
 
 
 
107
  },
108
  "vision_feature_layer": -2,
109
  "vision_feature_select_strategy": "default"