{ "architectures": [ "ASRModel" ], "attn_implementation": "flash_attention_2", "audio_config": { "_name_or_path": "openai/whisper-large-v3-turbo", "activation_dropout": 0.0, "activation_function": "gelu", "apply_spec_augment": false, "architectures": [ "WhisperForConditionalGeneration" ], "attention_dropout": 0.0, "bos_token_id": 50257, "classifier_proj_size": 256, "d_model": 1280, "decoder_attention_heads": 20, "decoder_ffn_dim": 5120, "decoder_layerdrop": 0.0, "decoder_layers": 4, "decoder_start_token_id": 50258, "dropout": 0.0, "dtype": "bfloat16", "encoder_attention_heads": 20, "encoder_ffn_dim": 5120, "encoder_layerdrop": 0.0, "encoder_layers": 32, "eos_token_id": 50257, "init_std": 0.02, "mask_feature_length": 10, "mask_feature_min_masks": 0, "mask_feature_prob": 0.0, "mask_time_length": 10, "mask_time_min_masks": 2, "mask_time_prob": 0.05, "max_source_positions": 1500, "max_target_positions": 448, "median_filter_width": 7, "model_type": "whisper", "num_hidden_layers": 32, "num_mel_bins": 128, "pad_token_id": 50257, "scale_embedding": false, "use_cache": true, "use_weighted_layer_sum": false, "vocab_size": 51866 }, "audio_model_id": "openai/whisper-large-v3-turbo", "audio_sample_rate": 16000, "auto_map": { "AutoConfig": "asr_config.ASRConfig", "AutoModel": "asr_modeling.ASRModel", "AutoModelForSpeechSeq2Seq": "asr_modeling.ASRModel", "AutoProcessor": "asr_processing.ASRProcessor" }, "custom_pipelines": { "automatic-speech-recognition": { "impl": "asr_pipeline.ASRPipeline", "pt": [ "AutoModelForSpeechSeq2Seq" ], "tf": [], "type": "audio" } }, "downsample_rate": 5, "dtype": "bfloat16", "encoder_dim": 1280, "inference_warmup_tokens": 10, "label_smoothing": 0.0, "llm_dim": 2048, "max_new_tokens": 96, "model_dtype": "bfloat16", "model_type": "asr_model", "num_experts": 4, "num_experts_per_tok": 2, "pipeline_tag": "automatic-speech-recognition", "pretrained_model_path": "mazesmazes/tiny-audio", "projector_dropout": 0.0, "projector_hidden_dim": null, "projector_init_std": 0.02, "projector_input_noise": 0.0, "projector_num_layers": 2, "projector_pool_stride": 4, "projector_type": "mlp", "qformer_hidden_size": null, "qformer_intermediate_size": null, "qformer_num_heads": 16, "qformer_num_layers": 2, "qformer_window_size": 15, "router_aux_loss_coef": 0.01, "system_prompt": "/no_think /system_override", "text_config": { "_name_or_path": "HuggingFaceTB/SmolLM3-3B", "architectures": [ "SmolLM3ForCausalLM" ], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": null, "dtype": "bfloat16", "eos_token_id": 128012, "hidden_act": "silu", "hidden_size": 2048, "initializer_range": 0.02, "intermediate_size": 11008, "layer_types": [ "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention", "full_attention" ], "max_position_embeddings": 65536, "max_window_layers": 28, "mlp_bias": false, "model_type": "smollm3", "no_rope_layer_interval": 4, "no_rope_layers": [ 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0 ], "num_attention_heads": 16, "num_hidden_layers": 36, "num_key_value_heads": 4, "pretraining_tp": 2, "rms_norm_eps": 1e-06, "rope_scaling": null, "rope_theta": 5000000.0, "sliding_window": null, "use_cache": false, "use_sliding_window": false, "vocab_size": 128257 }, "text_model_id": "HuggingFaceTB/SmolLM3-3B", "transformers_version": "4.57.3", "use_cache": false, "use_specaugment": true, "user_prompt": "Transcribe: