Qwen: Qwen3 30B A3B Instruct 2507
Server-rendered model summary page for indexing/share previews. Use the interactive explorer for full filtering and comparison.
Identifiers & provenance
- Primary ID
- qwen/qwen3-30b-a3b-instruct-2507
- OpenRouter ID
- qwen/qwen3-30b-a3b-instruct-2507
- Arena ID
- qwen3-30b-a3b-instruct-2507
- Canonical slug
- qwen/qwen3-30b-a3b-instruct-2507
- Match method
- openrouter_name
- Match key
- qwen3-30b-a3b-instruct-2507
Source semantics
- Arena rank is a human-preference leaderboard signal, not a universal truth metric.
- OpenRouter usage/popularity reflects adoption/traffic, not benchmark quality.
- Pricing fields may differ by provider and can include extra modes beyond prompt/completion.
Read more on Methodology & data sources.
Description
Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference. It operates in non-thinking mode and is designed for high-quality instruction following, multilingual understanding, and agentic tool use. Post-trained on instruction data, it demonstrates competitive performance across reasoning (AIME, ZebraLogic), coding (MultiPL-E, LiveCodeBench), and alignment (IFEval, WritingBench) benchmarks. It outperforms its non-instruct variant on subjective and open-ended tasks while retaining strong factual and coding performance.
Raw fields snapshot
{
"id": "qwen/qwen3-30b-a3b-instruct-2507",
"name": "Qwen: Qwen3 30B A3B Instruct 2507",
"description": "Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference. It operates in non-thinking mode and is designed for high-quality instruction following, multilingual understanding, and agentic tool use. Post-trained on instruction data, it demonstrates competitive performance across reasoning (AIME, ZebraLogic), coding (MultiPL-E, LiveCodeBench), and alignment (IFEval, WritingBench) benchmarks. It outperforms its non-instruct variant on subjective and open-ended tasks while retaining strong factual and coding performance.",
"created": 1753806965,
"canonical_slug": "qwen/qwen3-30b-a3b-instruct-2507",
"hugging_face_id": "Qwen/Qwen3-30B-A3B-Instruct-2507",
"source_type": "both",
"context_length": 262144,
"max_completion_tokens": 262144,
"is_moderated": false,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "Qwen3",
"instruct_type": null
},
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"modality": "text->text",
"tokenizer": "Qwen3",
"instruct_type": null,
"supported_parameters": [
"frequency_penalty",
"max_tokens",
"presence_penalty",
"repetition_penalty",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
],
"default_parameters": {},
"per_request_limits": null,
"top_provider": {
"context_length": 262144,
"max_completion_tokens": 262144,
"is_moderated": false
},
"pricing": {
"prompt": "0.00000009",
"completion": "0.0000003"
},
"PPM": {
"prompt": 0.09,
"completion": 0.3
},
"openrouter_raw": {
"id": "qwen/qwen3-30b-a3b-instruct-2507",
"canonical_slug": "qwen/qwen3-30b-a3b-instruct-2507",
"hugging_face_id": "Qwen/Qwen3-30B-A3B-Instruct-2507",
"name": "Qwen: Qwen3 30B A3B Instruct 2507",
"created": 1753806965,
"description": "Qwen3-30B-A3B-Instruct-2507 is a 30.5B-parameter mixture-of-experts language model from Qwen, with 3.3B active parameters per inference. It operates in non-thinking mode and is designed for high-quality instruction following, multilingual understanding, and agentic tool use. Post-trained on instruction data, it demonstrates competitive performance across reasoning (AIME, ZebraLogic), coding (MultiPL-E, LiveCodeBench), and alignment (IFEval, WritingBench) benchmarks. It outperforms its non-instruct variant on subjective and open-ended tasks while retaining strong factual and coding performance.",
"context_length": 262144,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "Qwen3",
"instruct_type": null
},
"pricing": {
"prompt": "0.00000009",
"completion": "0.0000003"
},
"top_provider": {
"context_length": 262144,
"max_completion_tokens": 262144,
"is_moderated": false
},
"per_request_limits": null,
"supported_parameters": [
"frequency_penalty",
"max_tokens",
"presence_penalty",
"repetition_penalty",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
],
"default_parameters": {},
"expiration_date": null
},
"categories": {
"text-expert": {
"score": 1391,
"rank_ub": 93,
"votes": 1150,
"ci_95": "±17"
},
"text-industry-software-and-it-services": {
"score": 1429,
"rank_ub": 77,
"votes": 8224,
"ci_95": "±7"
},
"text-industry-writing-and-literature-and-language": {
"score": 1343,
"rank_ub": 111,
"votes": 5181,
"ci_95": "±8"
},
"text-overall": {
"score": 1384,
"rank_ub": 94,
"votes": 23940,
"ci_95": "±5"
},
"text-industry-life-and-physical-and-social-science": {
"score": 1395,
"rank_ub": 98,
"votes": 3722,
"ci_95": "±10"
},
"text-industry-mathematical": {
"score": 1406,
"rank_ub": 78,
"votes": 1218,
"ci_95": "±17"
},
"text-industry-entertainment-and-sports-and-media": {
"score": 1334,
"rank_ub": 106,
"votes": 4202,
"ci_95": "±9"
},
"text-industry-business-and-management-and-financial-operations": {
"score": 1396,
"rank_ub": 74,
"votes": 4172,
"ci_95": "±9"
},
"text-industry-medicine-and-healthcare": {
"score": 1400,
"rank_ub": 96,
"votes": 1360,
"ci_95": "±16"
},
"text-industry-legal-and-government": {
"score": 1381,
"rank_ub": 104,
"votes": 1509,
"ci_95": "±15"
},
"text-instruction-following": {
"score": 1367,
"rank_ub": 99,
"votes": 5988,
"ci_95": "±8"
},
"text-math": {
"score": 1386,
"rank_ub": 97,
"votes": 1433,
"ci_95": "±15"
},
"text-creative-writing": {
"score": 1325,
"rank_ub": 118,
"votes": 3039,
"ci_95": "±11"
},
"text-multi-turn": {
"score": 1384,
"rank_ub": 94,
"votes": 4082,
"ci_95": "±9"
},
"text-hard-prompts": {
"score": 1408,
"rank_ub": 86,
"votes": 11112,
"ci_95": "±6"
},
"text-coding": {
"score": 1440,
"rank_ub": 77,
"votes": 4661,
"ci_95": "±9"
},
"text-hard-prompts-english": {
"score": 1417,
"rank_ub": 88,
"votes": 5652,
"ci_95": "±8"
},
"text-longer-query": {
"score": 1382,
"rank_ub": 97,
"votes": 4918,
"ci_95": "±9"
},
"text-english": {
"score": 1395,
"rank_ub": 96,
"votes": 11170,
"ci_95": "±6"
},
"text-chinese": {
"score": 1438,
"rank_ub": 61,
"votes": 1323,
"ci_95": "±17"
},
"text-french": {
"score": 1411,
"rank_ub": 65,
"votes": 317,
"ci_95": "±33"
},
"text-spanish": {
"score": 1387,
"rank_ub": 70,
"votes": 641,
"ci_95": "±25"
},
"text-russian": {
"score": 1366,
"rank_ub": 96,
"votes": 1221,
"ci_95": "±16"
},
"text-german": {
"score": 1351,
"rank_ub": 86,
"votes": 487,
"ci_95": "±27"
},
"text-japanese": {
"score": 1322,
"rank_ub": 67,
"votes": 714,
"ci_95": "±24"
},
"text-korean": {
"score": 1314,
"rank_ub": 87,
"votes": 548,
"ci_95": "±27"
},
"text-exclude-ties": {
"score": 1360,
"rank_ub": 92,
"votes": 16808,
"ci_95": "±7"
}
},
"arena_model_id": "qwen3-30b-a3b-instruct-2507",
"leaderboard_name": "qwen3-30b-a3b-instruct-2507",
"match_method": "openrouter_name",
"match_key": "qwen3-30b-a3b-instruct-2507",
"match_input": "Qwen: Qwen3 30B A3B Instruct 2507",
"arena_aliases": []
}