← Back to explorer

LiquidAI: LFM2-24B-A2B

Server-rendered model summary page for indexing/share previews. Use the interactive explorer for full filtering and comparison.

Match confidence: UnmatchedSource type: openrouter_only
Context window
32.8K
Arena overall rank
Input price
$0.000 / 1M
Output price
$0.000 / 1M

Identifiers & provenance

Primary ID
liquid/lfm-2-24b-a2b
OpenRouter ID
liquid/lfm-2-24b-a2b
Canonical slug
liquid/lfm-2-24b-a2b-20260224

Source semantics

  • Arena rank is a human-preference leaderboard signal, not a universal truth metric.
  • OpenRouter usage/popularity reflects adoption/traffic, not benchmark quality.
  • Pricing fields may differ by provider and can include extra modes beyond prompt/completion.

Read more on Methodology & data sources.

Description

LFM2-24B-A2B is the largest model in the LFM2 family of hybrid architectures designed for efficient on-device deployment. Built as a 24B parameter Mixture-of-Experts model with only 2B active parameters per token, it delivers high-quality generation while maintaining low inference costs. The model fits within 32 GB of RAM, making it practical to run on consumer laptops and desktops without sacrificing capability.

Raw fields snapshot

{
  "id": "liquid/lfm-2-24b-a2b",
  "name": "LiquidAI: LFM2-24B-A2B",
  "description": "LFM2-24B-A2B is the largest model in the LFM2 family of hybrid architectures designed for efficient on-device deployment. Built as a 24B parameter Mixture-of-Experts model with only 2B active parameters per token, it delivers high-quality generation while maintaining low inference costs. The model fits within 32 GB of RAM, making it practical to run on consumer laptops and desktops without sacrificing capability.",
  "created": 1772048711,
  "canonical_slug": "liquid/lfm-2-24b-a2b-20260224",
  "hugging_face_id": "LiquidAI/LFM2-24B-A2B",
  "source_type": "openrouter_only",
  "context_length": 32768,
  "max_completion_tokens": null,
  "is_moderated": false,
  "architecture": {
    "modality": "text->text",
    "input_modalities": [
      "text"
    ],
    "output_modalities": [
      "text"
    ],
    "tokenizer": "Other",
    "instruct_type": null
  },
  "input_modalities": [
    "text"
  ],
  "output_modalities": [
    "text"
  ],
  "modality": "text->text",
  "tokenizer": "Other",
  "instruct_type": null,
  "supported_parameters": [
    "frequency_penalty",
    "logit_bias",
    "max_tokens",
    "min_p",
    "presence_penalty",
    "repetition_penalty",
    "stop",
    "temperature",
    "top_k",
    "top_p"
  ],
  "default_parameters": {
    "temperature": 0.1,
    "top_p": null,
    "top_k": 50,
    "frequency_penalty": null,
    "presence_penalty": null,
    "repetition_penalty": 1.05
  },
  "per_request_limits": null,
  "top_provider": {
    "context_length": 32768,
    "max_completion_tokens": null,
    "is_moderated": false
  },
  "pricing": {
    "prompt": "0.00000003",
    "completion": "0.00000012"
  },
  "PPM": {
    "prompt": 0.03,
    "completion": 0.12
  },
  "openrouter_raw": {
    "id": "liquid/lfm-2-24b-a2b",
    "canonical_slug": "liquid/lfm-2-24b-a2b-20260224",
    "hugging_face_id": "LiquidAI/LFM2-24B-A2B",
    "name": "LiquidAI: LFM2-24B-A2B",
    "created": 1772048711,
    "description": "LFM2-24B-A2B is the largest model in the LFM2 family of hybrid architectures designed for efficient on-device deployment. Built as a 24B parameter Mixture-of-Experts model with only 2B active parameters per token, it delivers high-quality generation while maintaining low inference costs. The model fits within 32 GB of RAM, making it practical to run on consumer laptops and desktops without sacrificing capability.",
    "context_length": 32768,
    "architecture": {
      "modality": "text->text",
      "input_modalities": [
        "text"
      ],
      "output_modalities": [
        "text"
      ],
      "tokenizer": "Other",
      "instruct_type": null
    },
    "pricing": {
      "prompt": "0.00000003",
      "completion": "0.00000012"
    },
    "top_provider": {
      "context_length": 32768,
      "max_completion_tokens": null,
      "is_moderated": false
    },
    "per_request_limits": null,
    "supported_parameters": [
      "frequency_penalty",
      "logit_bias",
      "max_tokens",
      "min_p",
      "presence_penalty",
      "repetition_penalty",
      "stop",
      "temperature",
      "top_k",
      "top_p"
    ],
    "default_parameters": {
      "temperature": 0.1,
      "top_p": null,
      "top_k": 50,
      "frequency_penalty": null,
      "presence_penalty": null,
      "repetition_penalty": 1.05
    },
    "expiration_date": null
  }
}