← Back to explorer

Qwen: Qwen3 235B A22B Instruct 2507

Server-rendered model summary page for indexing/share previews. Use the interactive explorer for full filtering and comparison.

Match confidence: Name matchSource type: both
Context window
262.1K
Arena overall rank
Input price
$0.000 / 1M
Output price
$0.000 / 1M

Identifiers & provenance

Primary ID
qwen/qwen3-235b-a22b-2507
OpenRouter ID
qwen/qwen3-235b-a22b-2507
Arena ID
qwen3-235b-a22b-instruct-2507
Canonical slug
qwen/qwen3-235b-a22b-07-25
Match method
openrouter_name
Match key
qwen3-235b-a22b-instruct-2507

Source semantics

  • Arena rank is a human-preference leaderboard signal, not a universal truth metric.
  • OpenRouter usage/popularity reflects adoption/traffic, not benchmark quality.
  • Pricing fields may differ by provider and can include extra modes beyond prompt/completion.

Read more on Methodology & data sources.

Description

Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass. It is optimized for general-purpose text generation, including instruction following, logical reasoning, math, code, and tool usage. The model supports a native 262K context length and does not implement "thinking mode" (<think> blocks). Compared to its base variant, this version delivers significant gains in knowledge coverage, long-context reasoning, coding benchmarks, and alignment with open-ended tasks. It is particularly strong on multilingual understanding, math reasoning (e.g., AIME, HMMT), and alignment evaluations like Arena-Hard and WritingBench.

Raw fields snapshot

{
  "id": "qwen/qwen3-235b-a22b-2507",
  "name": "Qwen: Qwen3 235B A22B Instruct 2507",
  "description": "Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass. It is optimized for general-purpose text generation, including instruction following, logical reasoning, math, code, and tool usage. The model supports a native 262K context length and does not implement \"thinking mode\" (<think> blocks).\n\nCompared to its base variant, this version delivers significant gains in knowledge coverage, long-context reasoning, coding benchmarks, and alignment with open-ended tasks. It is particularly strong on multilingual understanding, math reasoning (e.g., AIME, HMMT), and alignment evaluations like Arena-Hard and WritingBench.",
  "created": 1753119555,
  "canonical_slug": "qwen/qwen3-235b-a22b-07-25",
  "hugging_face_id": "Qwen/Qwen3-235B-A22B-Instruct-2507",
  "source_type": "both",
  "context_length": 262144,
  "max_completion_tokens": null,
  "is_moderated": false,
  "architecture": {
    "modality": "text->text",
    "input_modalities": [
      "text"
    ],
    "output_modalities": [
      "text"
    ],
    "tokenizer": "Qwen3",
    "instruct_type": null
  },
  "input_modalities": [
    "text"
  ],
  "output_modalities": [
    "text"
  ],
  "modality": "text->text",
  "tokenizer": "Qwen3",
  "instruct_type": null,
  "supported_parameters": [
    "frequency_penalty",
    "include_reasoning",
    "logit_bias",
    "logprobs",
    "max_tokens",
    "min_p",
    "presence_penalty",
    "reasoning",
    "reasoning_effort",
    "repetition_penalty",
    "response_format",
    "seed",
    "stop",
    "structured_outputs",
    "temperature",
    "tool_choice",
    "tools",
    "top_k",
    "top_logprobs",
    "top_p"
  ],
  "default_parameters": {},
  "per_request_limits": null,
  "top_provider": {
    "context_length": 262144,
    "max_completion_tokens": null,
    "is_moderated": false
  },
  "pricing": {
    "prompt": "0.000000071",
    "completion": "0.0000001"
  },
  "PPM": {
    "prompt": 0.071,
    "completion": 0.1
  },
  "openrouter_raw": {
    "id": "qwen/qwen3-235b-a22b-2507",
    "canonical_slug": "qwen/qwen3-235b-a22b-07-25",
    "hugging_face_id": "Qwen/Qwen3-235B-A22B-Instruct-2507",
    "name": "Qwen: Qwen3 235B A22B Instruct 2507",
    "created": 1753119555,
    "description": "Qwen3-235B-A22B-Instruct-2507 is a multilingual, instruction-tuned mixture-of-experts language model based on the Qwen3-235B architecture, with 22B active parameters per forward pass. It is optimized for general-purpose text generation, including instruction following, logical reasoning, math, code, and tool usage. The model supports a native 262K context length and does not implement \"thinking mode\" (<think> blocks).\n\nCompared to its base variant, this version delivers significant gains in knowledge coverage, long-context reasoning, coding benchmarks, and alignment with open-ended tasks. It is particularly strong on multilingual understanding, math reasoning (e.g., AIME, HMMT), and alignment evaluations like Arena-Hard and WritingBench.",
    "context_length": 262144,
    "architecture": {
      "modality": "text->text",
      "input_modalities": [
        "text"
      ],
      "output_modalities": [
        "text"
      ],
      "tokenizer": "Qwen3",
      "instruct_type": null
    },
    "pricing": {
      "prompt": "0.000000071",
      "completion": "0.0000001"
    },
    "top_provider": {
      "context_length": 262144,
      "max_completion_tokens": null,
      "is_moderated": false
    },
    "per_request_limits": null,
    "supported_parameters": [
      "frequency_penalty",
      "include_reasoning",
      "logit_bias",
      "logprobs",
      "max_tokens",
      "min_p",
      "presence_penalty",
      "reasoning",
      "reasoning_effort",
      "repetition_penalty",
      "response_format",
      "seed",
      "stop",
      "structured_outputs",
      "temperature",
      "tool_choice",
      "tools",
      "top_k",
      "top_logprobs",
      "top_p"
    ],
    "default_parameters": {},
    "expiration_date": null
  },
  "categories": {
    "text-expert": {
      "score": 1451,
      "rank_ub": 34,
      "votes": 3718,
      "ci_95": "±10"
    },
    "text-industry-software-and-it-services": {
      "score": 1461,
      "rank_ub": 38,
      "votes": 24542,
      "ci_95": "±5"
    },
    "text-industry-writing-and-literature-and-language": {
      "score": 1397,
      "rank_ub": 54,
      "votes": 15710,
      "ci_95": "±5"
    },
    "text-overall": {
      "score": 1422,
      "rank_ub": 44,
      "votes": 71551,
      "ci_95": "±3"
    },
    "text-industry-life-and-physical-and-social-science": {
      "score": 1445,
      "rank_ub": 43,
      "votes": 11213,
      "ci_95": "±6"
    },
    "text-industry-mathematical": {
      "score": 1440,
      "rank_ub": 33,
      "votes": 3531,
      "ci_95": "±10"
    },
    "text-industry-entertainment-and-sports-and-media": {
      "score": 1376,
      "rank_ub": 66,
      "votes": 12653,
      "ci_95": "±6"
    },
    "text-industry-business-and-management-and-financial-operations": {
      "score": 1431,
      "rank_ub": 35,
      "votes": 13078,
      "ci_95": "±6"
    },
    "text-industry-medicine-and-healthcare": {
      "score": 1460,
      "rank_ub": 34,
      "votes": 3931,
      "ci_95": "±10"
    },
    "text-industry-legal-and-government": {
      "score": 1430,
      "rank_ub": 44,
      "votes": 4624,
      "ci_95": "±9"
    },
    "text-instruction-following": {
      "score": 1415,
      "rank_ub": 38,
      "votes": 18808,
      "ci_95": "±5"
    },
    "text-math": {
      "score": 1426,
      "rank_ub": 37,
      "votes": 4364,
      "ci_95": "±9"
    },
    "text-creative-writing": {
      "score": 1383,
      "rank_ub": 60,
      "votes": 9598,
      "ci_95": "±6"
    },
    "text-multi-turn": {
      "score": 1438,
      "rank_ub": 36,
      "votes": 12468,
      "ci_95": "±6"
    },
    "text-hard-prompts": {
      "score": 1447,
      "rank_ub": 37,
      "votes": 35982,
      "ci_95": "±4"
    },
    "text-coding": {
      "score": 1471,
      "rank_ub": 40,
      "votes": 14337,
      "ci_95": "±6"
    },
    "text-hard-prompts-english": {
      "score": 1452,
      "rank_ub": 43,
      "votes": 17687,
      "ci_95": "±5"
    },
    "text-longer-query": {
      "score": 1436,
      "rank_ub": 42,
      "votes": 16224,
      "ci_95": "±5"
    },
    "text-english": {
      "score": 1432,
      "rank_ub": 48,
      "votes": 33224,
      "ci_95": "±4"
    },
    "text-chinese": {
      "score": 1471,
      "rank_ub": 30,
      "votes": 3717,
      "ci_95": "±11"
    },
    "text-french": {
      "score": 1461,
      "rank_ub": 24,
      "votes": 1175,
      "ci_95": "±20"
    },
    "text-spanish": {
      "score": 1424,
      "rank_ub": 33,
      "votes": 1725,
      "ci_95": "±16"
    },
    "text-russian": {
      "score": 1412,
      "rank_ub": 46,
      "votes": 5552,
      "ci_95": "±8"
    },
    "text-german": {
      "score": 1416,
      "rank_ub": 33,
      "votes": 1293,
      "ci_95": "±18"
    },
    "text-japanese": {
      "score": 1378,
      "rank_ub": 30,
      "votes": 1082,
      "ci_95": "±19"
    },
    "text-korean": {
      "score": 1378,
      "rank_ub": 27,
      "votes": 1250,
      "ci_95": "±19"
    },
    "text-exclude-ties": {
      "score": 1415,
      "rank_ub": 43,
      "votes": 50208,
      "ci_95": "±4"
    }
  },
  "arena_model_id": "qwen3-235b-a22b-instruct-2507",
  "leaderboard_name": "qwen3-235b-a22b-instruct-2507",
  "match_method": "openrouter_name",
  "match_key": "qwen3-235b-a22b-instruct-2507",
  "match_input": "Qwen: Qwen3 235B A22B Instruct 2507",
  "arena_aliases": []
}
Qwen: Qwen3 235B A22B Instruct 2507 · NNZen