Update tokenizer_config.json
Browse files- tokenizer_config.json +71 -26
tokenizer_config.json
CHANGED
|
@@ -1,10 +1,9 @@
|
|
| 1 |
{
|
| 2 |
-
"add_bos_token":
|
| 3 |
-
"
|
| 4 |
-
"add_prefix_space": null,
|
| 5 |
"added_tokens_decoder": {
|
| 6 |
"151643": {
|
| 7 |
-
"content": "
|
| 8 |
"lstrip": false,
|
| 9 |
"normalized": false,
|
| 10 |
"rstrip": false,
|
|
@@ -12,23 +11,23 @@
|
|
| 12 |
"special": true
|
| 13 |
},
|
| 14 |
"151644": {
|
| 15 |
-
"content": "
|
| 16 |
"lstrip": false,
|
| 17 |
"normalized": false,
|
| 18 |
"rstrip": false,
|
| 19 |
"single_word": false,
|
| 20 |
-
"special":
|
| 21 |
},
|
| 22 |
"151645": {
|
| 23 |
-
"content": "
|
| 24 |
"lstrip": false,
|
| 25 |
"normalized": false,
|
| 26 |
"rstrip": false,
|
| 27 |
"single_word": false,
|
| 28 |
-
"special":
|
| 29 |
},
|
| 30 |
"151646": {
|
| 31 |
-
"content": "
|
| 32 |
"lstrip": false,
|
| 33 |
"normalized": false,
|
| 34 |
"rstrip": false,
|
|
@@ -36,28 +35,28 @@
|
|
| 36 |
"special": true
|
| 37 |
},
|
| 38 |
"151647": {
|
| 39 |
-
"content": "<|
|
| 40 |
"lstrip": false,
|
| 41 |
"normalized": false,
|
| 42 |
"rstrip": false,
|
| 43 |
"single_word": false,
|
| 44 |
-
"special":
|
| 45 |
},
|
| 46 |
"151648": {
|
| 47 |
-
"content": "
|
| 48 |
"lstrip": false,
|
| 49 |
"normalized": false,
|
| 50 |
"rstrip": false,
|
| 51 |
"single_word": false,
|
| 52 |
-
"special":
|
| 53 |
},
|
| 54 |
"151649": {
|
| 55 |
-
"content": "
|
| 56 |
"lstrip": false,
|
| 57 |
"normalized": false,
|
| 58 |
"rstrip": false,
|
| 59 |
"single_word": false,
|
| 60 |
-
"special":
|
| 61 |
},
|
| 62 |
"151650": {
|
| 63 |
"content": "<|quad_start|>",
|
|
@@ -178,18 +177,64 @@
|
|
| 178 |
"rstrip": false,
|
| 179 |
"single_word": false,
|
| 180 |
"special": false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
}
|
| 182 |
},
|
| 183 |
-
"
|
| 184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
"clean_up_tokenization_spaces": false,
|
| 186 |
-
"eos_token": "
|
| 187 |
-
"
|
|
|
|
| 188 |
"model_max_length": 131072,
|
| 189 |
-
"pad_token": "
|
| 190 |
-
"
|
| 191 |
-
"
|
| 192 |
-
"
|
| 193 |
-
|
| 194 |
-
"use_default_system_prompt": false
|
| 195 |
-
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"add_bos_token": false,
|
| 3 |
+
"add_prefix_space": false,
|
|
|
|
| 4 |
"added_tokens_decoder": {
|
| 5 |
"151643": {
|
| 6 |
+
"content": "<|endoftext|>",
|
| 7 |
"lstrip": false,
|
| 8 |
"normalized": false,
|
| 9 |
"rstrip": false,
|
|
|
|
| 11 |
"special": true
|
| 12 |
},
|
| 13 |
"151644": {
|
| 14 |
+
"content": "<|im_start|>",
|
| 15 |
"lstrip": false,
|
| 16 |
"normalized": false,
|
| 17 |
"rstrip": false,
|
| 18 |
"single_word": false,
|
| 19 |
+
"special": true
|
| 20 |
},
|
| 21 |
"151645": {
|
| 22 |
+
"content": "<|im_end|>",
|
| 23 |
"lstrip": false,
|
| 24 |
"normalized": false,
|
| 25 |
"rstrip": false,
|
| 26 |
"single_word": false,
|
| 27 |
+
"special": true
|
| 28 |
},
|
| 29 |
"151646": {
|
| 30 |
+
"content": "<|object_ref_start|>",
|
| 31 |
"lstrip": false,
|
| 32 |
"normalized": false,
|
| 33 |
"rstrip": false,
|
|
|
|
| 35 |
"special": true
|
| 36 |
},
|
| 37 |
"151647": {
|
| 38 |
+
"content": "<|object_ref_end|>",
|
| 39 |
"lstrip": false,
|
| 40 |
"normalized": false,
|
| 41 |
"rstrip": false,
|
| 42 |
"single_word": false,
|
| 43 |
+
"special": true
|
| 44 |
},
|
| 45 |
"151648": {
|
| 46 |
+
"content": "<|box_start|>",
|
| 47 |
"lstrip": false,
|
| 48 |
"normalized": false,
|
| 49 |
"rstrip": false,
|
| 50 |
"single_word": false,
|
| 51 |
+
"special": true
|
| 52 |
},
|
| 53 |
"151649": {
|
| 54 |
+
"content": "<|box_end|>",
|
| 55 |
"lstrip": false,
|
| 56 |
"normalized": false,
|
| 57 |
"rstrip": false,
|
| 58 |
"single_word": false,
|
| 59 |
+
"special": true
|
| 60 |
},
|
| 61 |
"151650": {
|
| 62 |
"content": "<|quad_start|>",
|
|
|
|
| 177 |
"rstrip": false,
|
| 178 |
"single_word": false,
|
| 179 |
"special": false
|
| 180 |
+
},
|
| 181 |
+
"151665": {
|
| 182 |
+
"content": "<tool_response>",
|
| 183 |
+
"lstrip": false,
|
| 184 |
+
"normalized": false,
|
| 185 |
+
"rstrip": false,
|
| 186 |
+
"single_word": false,
|
| 187 |
+
"special": false
|
| 188 |
+
},
|
| 189 |
+
"151666": {
|
| 190 |
+
"content": "</tool_response>",
|
| 191 |
+
"lstrip": false,
|
| 192 |
+
"normalized": false,
|
| 193 |
+
"rstrip": false,
|
| 194 |
+
"single_word": false,
|
| 195 |
+
"special": false
|
| 196 |
+
},
|
| 197 |
+
"151667": {
|
| 198 |
+
"content": "<think>",
|
| 199 |
+
"lstrip": false,
|
| 200 |
+
"normalized": false,
|
| 201 |
+
"rstrip": false,
|
| 202 |
+
"single_word": false,
|
| 203 |
+
"special": false
|
| 204 |
+
},
|
| 205 |
+
"151668": {
|
| 206 |
+
"content": "</think>",
|
| 207 |
+
"lstrip": false,
|
| 208 |
+
"normalized": false,
|
| 209 |
+
"rstrip": false,
|
| 210 |
+
"single_word": false,
|
| 211 |
+
"special": false
|
| 212 |
}
|
| 213 |
},
|
| 214 |
+
"additional_special_tokens": [
|
| 215 |
+
"<|im_start|>",
|
| 216 |
+
"<|im_end|>",
|
| 217 |
+
"<|object_ref_start|>",
|
| 218 |
+
"<|object_ref_end|>",
|
| 219 |
+
"<|box_start|>",
|
| 220 |
+
"<|box_end|>",
|
| 221 |
+
"<|quad_start|>",
|
| 222 |
+
"<|quad_end|>",
|
| 223 |
+
"<|vision_start|>",
|
| 224 |
+
"<|vision_end|>",
|
| 225 |
+
"<|vision_pad|>",
|
| 226 |
+
"<|image_pad|>",
|
| 227 |
+
"<|video_pad|>"
|
| 228 |
+
],
|
| 229 |
+
"bos_token": null,
|
| 230 |
+
"chat_template": "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0]['role'] == 'system' %}\n {{- messages[0]['content'] }}\n {%- else %}\n {{- '' }}\n {%- endif %}\n {{- \"\\n\\n# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0]['role'] == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0]['content'] + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- for message in messages %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + message.content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" and not message.tool_calls %}\n {%- set content = message.content %}\n {%- if not loop.last %}\n {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set content = message.content %}\n {%- if not loop.last %}\n {%- set content = message.content.split('</think>')[-1].lstrip('\\n') %}\n {%- endif %}\n {{- '<|im_start|>' + message.role }}\n {%- if message.content %}\n {{- '\\n' + content }}\n {%- endif %}\n {%- for tool_call in message.tool_calls %}\n {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '\\n<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- message.content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n<think>\\n' }}\n{%- endif %}\n",
|
| 231 |
"clean_up_tokenization_spaces": false,
|
| 232 |
+
"eos_token": "<|im_end|>",
|
| 233 |
+
"errors": "replace",
|
| 234 |
+
"extra_special_tokens": {},
|
| 235 |
"model_max_length": 131072,
|
| 236 |
+
"pad_token": "<|endoftext|>",
|
| 237 |
+
"split_special_tokens": false,
|
| 238 |
+
"tokenizer_class": "Qwen2Tokenizer",
|
| 239 |
+
"unk_token": null
|
| 240 |
+
}
|
|
|
|
|
|