Qwen: Qwen3 235B A22B Thinking 2507
Server-rendered model summary page for indexing/share previews. Use the interactive explorer for full filtering and comparison.
Identifiers & provenance
- Primary ID
- qwen/qwen3-235b-a22b-thinking-2507
- OpenRouter ID
- qwen/qwen3-235b-a22b-thinking-2507
- Arena ID
- qwen3-235b-a22b-thinking-2507
- Canonical slug
- qwen/qwen3-235b-a22b-thinking-2507
- Match method
- openrouter_name
- Match key
- qwen3-235b-a22b-thinking-2507
Source semantics
- Arena rank is a human-preference leaderboard signal, not a universal truth metric.
- OpenRouter usage/popularity reflects adoption/traffic, not benchmark quality.
- Pricing fields may differ by provider and can include extra modes beyond prompt/completion.
Read more on Methodology & data sources.
Description
Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks. It activates 22B of its 235B parameters per forward pass and natively supports up to 262,144 tokens of context. This "thinking-only" variant enhances structured logical reasoning, mathematics, science, and long-form generation, showing strong benchmark performance across AIME, SuperGPQA, LiveCodeBench, and MMLU-Redux. It enforces a special reasoning mode (</think>) and is designed for high-token outputs (up to 81,920 tokens) in challenging domains. The model is instruction-tuned and excels at step-by-step reasoning, tool use, agentic workflows, and multilingual tasks. This release represents the most capable open-source variant in the Qwen3-235B series, surpassing many closed models in structured reasoning use cases.
Raw fields snapshot
{
"id": "qwen/qwen3-235b-a22b-thinking-2507",
"name": "Qwen: Qwen3 235B A22B Thinking 2507",
"description": "Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks. It activates 22B of its 235B parameters per forward pass and natively supports up to 262,144 tokens of context. This \"thinking-only\" variant enhances structured logical reasoning, mathematics, science, and long-form generation, showing strong benchmark performance across AIME, SuperGPQA, LiveCodeBench, and MMLU-Redux. It enforces a special reasoning mode (</think>) and is designed for high-token outputs (up to 81,920 tokens) in challenging domains.\n\nThe model is instruction-tuned and excels at step-by-step reasoning, tool use, agentic workflows, and multilingual tasks. This release represents the most capable open-source variant in the Qwen3-235B series, surpassing many closed models in structured reasoning use cases.",
"created": 1753449557,
"canonical_slug": "qwen/qwen3-235b-a22b-thinking-2507",
"hugging_face_id": "Qwen/Qwen3-235B-A22B-Thinking-2507",
"source_type": "both",
"context_length": 131072,
"max_completion_tokens": null,
"is_moderated": false,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "Qwen3",
"instruct_type": "qwen3"
},
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"modality": "text->text",
"tokenizer": "Qwen3",
"instruct_type": "qwen3",
"supported_parameters": [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
],
"default_parameters": {
"temperature": null,
"top_p": null,
"frequency_penalty": null
},
"per_request_limits": null,
"top_provider": {
"context_length": 131072,
"max_completion_tokens": null,
"is_moderated": false
},
"pricing": {
"prompt": "0",
"completion": "0",
"request": "0",
"image": "0",
"web_search": "0",
"internal_reasoning": "0"
},
"PPM": {
"prompt": 0,
"completion": 0,
"request": 0,
"image": 0,
"web_search": 0,
"internal_reasoning": 0
},
"openrouter_raw": {
"id": "qwen/qwen3-235b-a22b-thinking-2507",
"canonical_slug": "qwen/qwen3-235b-a22b-thinking-2507",
"hugging_face_id": "Qwen/Qwen3-235B-A22B-Thinking-2507",
"name": "Qwen: Qwen3 235B A22B Thinking 2507",
"created": 1753449557,
"description": "Qwen3-235B-A22B-Thinking-2507 is a high-performance, open-weight Mixture-of-Experts (MoE) language model optimized for complex reasoning tasks. It activates 22B of its 235B parameters per forward pass and natively supports up to 262,144 tokens of context. This \"thinking-only\" variant enhances structured logical reasoning, mathematics, science, and long-form generation, showing strong benchmark performance across AIME, SuperGPQA, LiveCodeBench, and MMLU-Redux. It enforces a special reasoning mode (</think>) and is designed for high-token outputs (up to 81,920 tokens) in challenging domains.\n\nThe model is instruction-tuned and excels at step-by-step reasoning, tool use, agentic workflows, and multilingual tasks. This release represents the most capable open-source variant in the Qwen3-235B series, surpassing many closed models in structured reasoning use cases.",
"context_length": 131072,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "Qwen3",
"instruct_type": "qwen3"
},
"pricing": {
"prompt": "0",
"completion": "0",
"request": "0",
"image": "0",
"web_search": "0",
"internal_reasoning": "0"
},
"top_provider": {
"context_length": 131072,
"max_completion_tokens": null,
"is_moderated": false
},
"per_request_limits": null,
"supported_parameters": [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
],
"default_parameters": {
"temperature": null,
"top_p": null,
"frequency_penalty": null
},
"expiration_date": null
},
"categories": {
"text-expert": {
"score": 1465,
"rank_ub": 25,
"votes": 417,
"ci_95": "±29"
},
"text-industry-software-and-it-services": {
"score": 1431,
"rank_ub": 74,
"votes": 3035,
"ci_95": "±11"
},
"text-industry-writing-and-literature-and-language": {
"score": 1377,
"rank_ub": 76,
"votes": 1932,
"ci_95": "±13"
},
"text-overall": {
"score": 1399,
"rank_ub": 75,
"votes": 9186,
"ci_95": "±6"
},
"text-industry-life-and-physical-and-social-science": {
"score": 1411,
"rank_ub": 80,
"votes": 1544,
"ci_95": "±15"
},
"text-industry-mathematical": {
"score": 1430,
"rank_ub": 38,
"votes": 508,
"ci_95": "±25"
},
"text-industry-entertainment-and-sports-and-media": {
"score": 1351,
"rank_ub": 90,
"votes": 1600,
"ci_95": "±15"
},
"text-industry-business-and-management-and-financial-operations": {
"score": 1399,
"rank_ub": 70,
"votes": 1569,
"ci_95": "±15"
},
"text-industry-medicine-and-healthcare": {
"score": 1434,
"rank_ub": 68,
"votes": 533,
"ci_95": "±26"
},
"text-industry-legal-and-government": {
"score": 1388,
"rank_ub": 94,
"votes": 561,
"ci_95": "±24"
},
"text-instruction-following": {
"score": 1388,
"rank_ub": 74,
"votes": 2140,
"ci_95": "±12"
},
"text-math": {
"score": 1407,
"rank_ub": 68,
"votes": 505,
"ci_95": "±24"
},
"text-creative-writing": {
"score": 1372,
"rank_ub": 75,
"votes": 1127,
"ci_95": "±18"
},
"text-multi-turn": {
"score": 1393,
"rank_ub": 81,
"votes": 1416,
"ci_95": "±15"
},
"text-hard-prompts": {
"score": 1418,
"rank_ub": 74,
"votes": 3912,
"ci_95": "±9"
},
"text-coding": {
"score": 1441,
"rank_ub": 75,
"votes": 1625,
"ci_95": "±15"
},
"text-hard-prompts-english": {
"score": 1427,
"rank_ub": 71,
"votes": 2054,
"ci_95": "±13"
},
"text-longer-query": {
"score": 1402,
"rank_ub": 80,
"votes": 1667,
"ci_95": "±14"
},
"text-english": {
"score": 1405,
"rank_ub": 83,
"votes": 4478,
"ci_95": "±9"
},
"text-chinese": {
"score": 1452,
"rank_ub": 45,
"votes": 425,
"ci_95": "±29"
},
"text-spanish": {
"score": 1373,
"rank_ub": 87,
"votes": 155,
"ci_95": "±44"
},
"text-russian": {
"score": 1400,
"rank_ub": 64,
"votes": 440,
"ci_95": "±26"
},
"text-german": {
"score": 1391,
"rank_ub": 48,
"votes": 209,
"ci_95": "±40"
},
"text-japanese": {
"score": 1363,
"rank_ub": 45,
"votes": 432,
"ci_95": "±30"
},
"text-korean": {
"score": 1340,
"rank_ub": 62,
"votes": 203,
"ci_95": "±42"
},
"text-exclude-ties": {
"score": 1380,
"rank_ub": 75,
"votes": 6473,
"ci_95": "±9"
}
},
"arena_model_id": "qwen3-235b-a22b-thinking-2507",
"leaderboard_name": "qwen3-235b-a22b-thinking-2507",
"match_method": "openrouter_name",
"match_key": "qwen3-235b-a22b-thinking-2507",
"match_input": "Qwen: Qwen3 235B A22B Thinking 2507",
"arena_aliases": []
}