← Back to explorer

Qwen: Qwen3 Next 80B A3B Thinking

Server-rendered model summary page for indexing/share previews. Use the interactive explorer for full filtering and comparison.

Match confidence: Name matchSource type: both
Context window
128K
Arena overall rank
Input price
$0.000 / 1M
Output price
$0.000 / 1M

Identifiers & provenance

Primary ID
qwen/qwen3-next-80b-a3b-thinking
OpenRouter ID
qwen/qwen3-next-80b-a3b-thinking
Arena ID
qwen3-next-80b-a3b-thinking
Canonical slug
qwen/qwen3-next-80b-a3b-thinking-2509
Match method
openrouter_name
Match key
qwen3-next-80b-a3b-thinking

Source semantics

  • Arena rank is a human-preference leaderboard signal, not a universal truth metric.
  • OpenRouter usage/popularity reflects adoption/traffic, not benchmark quality.
  • Pricing fields may differ by provider and can include extra modes beyond prompt/completion.

Read more on Methodology & data sources.

Description

Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default. It’s designed for hard multi-step problems; math proofs, code synthesis/debugging, logic, and agentic planning, and reports strong results across knowledge, reasoning, coding, alignment, and multilingual evaluations. Compared with prior Qwen3 variants, it emphasizes stability under long chains of thought and efficient scaling during inference, and it is tuned to follow complex instructions while reducing repetitive or off-task behavior. The model is suitable for agent frameworks and tool use (function calling), retrieval-heavy workflows, and standardized benchmarking where step-by-step solutions are required. It supports long, detailed completions and leverages throughput-oriented techniques (e.g., multi-token prediction) for faster generation. Note that it operates in thinking-only mode.

Raw fields snapshot

{
  "id": "qwen/qwen3-next-80b-a3b-thinking",
  "name": "Qwen: Qwen3 Next 80B A3B Thinking",
  "description": "Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default. It’s designed for hard multi-step problems; math proofs, code synthesis/debugging, logic, and agentic planning, and reports strong results across knowledge, reasoning, coding, alignment, and multilingual evaluations. Compared with prior Qwen3 variants, it emphasizes stability under long chains of thought and efficient scaling during inference, and it is tuned to follow complex instructions while reducing repetitive or off-task behavior.\n\nThe model is suitable for agent frameworks and tool use (function calling), retrieval-heavy workflows, and standardized benchmarking where step-by-step solutions are required. It supports long, detailed completions and leverages throughput-oriented techniques (e.g., multi-token prediction) for faster generation. Note that it operates in thinking-only mode.",
  "created": 1757612284,
  "canonical_slug": "qwen/qwen3-next-80b-a3b-thinking-2509",
  "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Thinking",
  "source_type": "both",
  "context_length": 128000,
  "max_completion_tokens": null,
  "is_moderated": false,
  "architecture": {
    "modality": "text->text",
    "input_modalities": [
      "text"
    ],
    "output_modalities": [
      "text"
    ],
    "tokenizer": "Qwen3",
    "instruct_type": null
  },
  "input_modalities": [
    "text"
  ],
  "output_modalities": [
    "text"
  ],
  "modality": "text->text",
  "tokenizer": "Qwen3",
  "instruct_type": null,
  "supported_parameters": [
    "frequency_penalty",
    "include_reasoning",
    "logit_bias",
    "max_tokens",
    "min_p",
    "presence_penalty",
    "reasoning",
    "repetition_penalty",
    "response_format",
    "seed",
    "stop",
    "structured_outputs",
    "temperature",
    "tool_choice",
    "tools",
    "top_k",
    "top_p"
  ],
  "default_parameters": {
    "temperature": null,
    "top_p": null,
    "frequency_penalty": null
  },
  "per_request_limits": null,
  "top_provider": {
    "context_length": 128000,
    "max_completion_tokens": null,
    "is_moderated": false
  },
  "pricing": {
    "prompt": "0.00000015",
    "completion": "0.0000012"
  },
  "PPM": {
    "prompt": 0.15,
    "completion": 1.2
  },
  "openrouter_raw": {
    "id": "qwen/qwen3-next-80b-a3b-thinking",
    "canonical_slug": "qwen/qwen3-next-80b-a3b-thinking-2509",
    "hugging_face_id": "Qwen/Qwen3-Next-80B-A3B-Thinking",
    "name": "Qwen: Qwen3 Next 80B A3B Thinking",
    "created": 1757612284,
    "description": "Qwen3-Next-80B-A3B-Thinking is a reasoning-first chat model in the Qwen3-Next line that outputs structured “thinking” traces by default. It’s designed for hard multi-step problems; math proofs, code synthesis/debugging, logic, and agentic planning, and reports strong results across knowledge, reasoning, coding, alignment, and multilingual evaluations. Compared with prior Qwen3 variants, it emphasizes stability under long chains of thought and efficient scaling during inference, and it is tuned to follow complex instructions while reducing repetitive or off-task behavior.\n\nThe model is suitable for agent frameworks and tool use (function calling), retrieval-heavy workflows, and standardized benchmarking where step-by-step solutions are required. It supports long, detailed completions and leverages throughput-oriented techniques (e.g., multi-token prediction) for faster generation. Note that it operates in thinking-only mode.",
    "context_length": 128000,
    "architecture": {
      "modality": "text->text",
      "input_modalities": [
        "text"
      ],
      "output_modalities": [
        "text"
      ],
      "tokenizer": "Qwen3",
      "instruct_type": null
    },
    "pricing": {
      "prompt": "0.00000015",
      "completion": "0.0000012"
    },
    "top_provider": {
      "context_length": 128000,
      "max_completion_tokens": null,
      "is_moderated": false
    },
    "per_request_limits": null,
    "supported_parameters": [
      "frequency_penalty",
      "include_reasoning",
      "logit_bias",
      "max_tokens",
      "min_p",
      "presence_penalty",
      "reasoning",
      "repetition_penalty",
      "response_format",
      "seed",
      "stop",
      "structured_outputs",
      "temperature",
      "tool_choice",
      "tools",
      "top_k",
      "top_p"
    ],
    "default_parameters": {
      "temperature": null,
      "top_p": null,
      "frequency_penalty": null
    },
    "expiration_date": null
  },
  "categories": {
    "text-expert": {
      "score": 1386,
      "rank_ub": 96,
      "votes": 619,
      "ci_95": "±24"
    },
    "text-industry-software-and-it-services": {
      "score": 1409,
      "rank_ub": 101,
      "votes": 4853,
      "ci_95": "±9"
    },
    "text-industry-writing-and-literature-and-language": {
      "score": 1337,
      "rank_ub": 118,
      "votes": 3068,
      "ci_95": "±11"
    },
    "text-overall": {
      "score": 1369,
      "rank_ub": 106,
      "votes": 13767,
      "ci_95": "±6"
    },
    "text-industry-life-and-physical-and-social-science": {
      "score": 1380,
      "rank_ub": 112,
      "votes": 2130,
      "ci_95": "±13"
    },
    "text-industry-mathematical": {
      "score": 1396,
      "rank_ub": 84,
      "votes": 659,
      "ci_95": "±22"
    },
    "text-industry-entertainment-and-sports-and-media": {
      "score": 1317,
      "rank_ub": 123,
      "votes": 2447,
      "ci_95": "±12"
    },
    "text-industry-business-and-management-and-financial-operations": {
      "score": 1364,
      "rank_ub": 110,
      "votes": 2493,
      "ci_95": "±12"
    },
    "text-industry-medicine-and-healthcare": {
      "score": 1395,
      "rank_ub": 103,
      "votes": 725,
      "ci_95": "±22"
    },
    "text-industry-legal-and-government": {
      "score": 1355,
      "rank_ub": 128,
      "votes": 861,
      "ci_95": "±20"
    },
    "text-instruction-following": {
      "score": 1358,
      "rank_ub": 106,
      "votes": 3521,
      "ci_95": "±10"
    },
    "text-math": {
      "score": 1398,
      "rank_ub": 81,
      "votes": 824,
      "ci_95": "±20"
    },
    "text-creative-writing": {
      "score": 1325,
      "rank_ub": 119,
      "votes": 1781,
      "ci_95": "±14"
    },
    "text-multi-turn": {
      "score": 1351,
      "rank_ub": 118,
      "votes": 2322,
      "ci_95": "±13"
    },
    "text-hard-prompts": {
      "score": 1384,
      "rank_ub": 106,
      "votes": 6704,
      "ci_95": "±8"
    },
    "text-coding": {
      "score": 1421,
      "rank_ub": 102,
      "votes": 2688,
      "ci_95": "±11"
    },
    "text-hard-prompts-english": {
      "score": 1396,
      "rank_ub": 109,
      "votes": 3495,
      "ci_95": "±10"
    },
    "text-longer-query": {
      "score": 1370,
      "rank_ub": 109,
      "votes": 2811,
      "ci_95": "±11"
    },
    "text-english": {
      "score": 1389,
      "rank_ub": 102,
      "votes": 6668,
      "ci_95": "±8"
    },
    "text-chinese": {
      "score": 1409,
      "rank_ub": 83,
      "votes": 702,
      "ci_95": "±22"
    },
    "text-french": {
      "score": 1373,
      "rank_ub": 98,
      "votes": 209,
      "ci_95": "±40"
    },
    "text-spanish": {
      "score": 1354,
      "rank_ub": 105,
      "votes": 408,
      "ci_95": "±30"
    },
    "text-russian": {
      "score": 1350,
      "rank_ub": 112,
      "votes": 755,
      "ci_95": "±20"
    },
    "text-german": {
      "score": 1358,
      "rank_ub": 80,
      "votes": 289,
      "ci_95": "±33"
    },
    "text-japanese": {
      "score": 1294,
      "rank_ub": 83,
      "votes": 187,
      "ci_95": "±45"
    },
    "text-korean": {
      "score": 1301,
      "rank_ub": 91,
      "votes": 319,
      "ci_95": "±35"
    },
    "text-exclude-ties": {
      "score": 1337,
      "rank_ub": 106,
      "votes": 9763,
      "ci_95": "±8"
    }
  },
  "arena_model_id": "qwen3-next-80b-a3b-thinking",
  "leaderboard_name": "qwen3-next-80b-a3b-thinking",
  "match_method": "openrouter_name",
  "match_key": "qwen3-next-80b-a3b-thinking",
  "match_input": "Qwen: Qwen3 Next 80B A3B Thinking",
  "arena_aliases": []
}
Qwen: Qwen3 Next 80B A3B Thinking · NNZen