← Back to explorer
LiquidAI: LFM2-8B-A1B
Server-rendered model summary page for indexing/share previews. Use the interactive explorer for full filtering and comparison.
Match confidence: UnmatchedSource type: openrouter_only
Context window
32.8K
Arena overall rank
—
Input price
$0.000 / 1M
Output price
$0.000 / 1M
Identifiers & provenance
- Primary ID
- liquid/lfm2-8b-a1b
- OpenRouter ID
- liquid/lfm2-8b-a1b
- Canonical slug
- liquid/lfm2-8b-a1b
Source semantics
- Arena rank is a human-preference leaderboard signal, not a universal truth metric.
- OpenRouter usage/popularity reflects adoption/traffic, not benchmark quality.
- Pricing fields may differ by provider and can include extra modes beyond prompt/completion.
Read more on Methodology & data sources.
Description
LFM2-8B-A1B is an efficient on-device Mixture-of-Experts (MoE) model from Liquid AI’s LFM2 family, built for fast, high-quality inference on edge hardware. It uses 8.3B total parameters with only ~1.5B active per token, delivering strong performance while keeping compute and memory usage low—making it ideal for phones, tablets, and laptops.
Raw fields snapshot
{
"id": "liquid/lfm2-8b-a1b",
"name": "LiquidAI: LFM2-8B-A1B",
"description": "LFM2-8B-A1B is an efficient on-device Mixture-of-Experts (MoE) model from Liquid AI’s LFM2 family, built for fast, high-quality inference on edge hardware. It uses 8.3B total parameters with only ~1.5B active per token, delivering strong performance while keeping compute and memory usage low—making it ideal for phones, tablets, and laptops.",
"created": 1760970984,
"canonical_slug": "liquid/lfm2-8b-a1b",
"hugging_face_id": "LiquidAI/LFM2-8B-A1B",
"source_type": "openrouter_only",
"context_length": 32768,
"max_completion_tokens": null,
"is_moderated": false,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "Other",
"instruct_type": null
},
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"modality": "text->text",
"tokenizer": "Other",
"instruct_type": null,
"supported_parameters": [
"frequency_penalty",
"max_tokens",
"min_p",
"presence_penalty",
"repetition_penalty",
"seed",
"stop",
"temperature",
"top_k",
"top_p"
],
"default_parameters": {
"temperature": null,
"top_p": null,
"frequency_penalty": null
},
"per_request_limits": null,
"top_provider": {
"context_length": 32768,
"max_completion_tokens": null,
"is_moderated": false
},
"pricing": {
"prompt": "0.00000001",
"completion": "0.00000002"
},
"PPM": {
"prompt": 0.01,
"completion": 0.02
},
"openrouter_raw": {
"id": "liquid/lfm2-8b-a1b",
"canonical_slug": "liquid/lfm2-8b-a1b",
"hugging_face_id": "LiquidAI/LFM2-8B-A1B",
"name": "LiquidAI: LFM2-8B-A1B",
"created": 1760970984,
"description": "LFM2-8B-A1B is an efficient on-device Mixture-of-Experts (MoE) model from Liquid AI’s LFM2 family, built for fast, high-quality inference on edge hardware. It uses 8.3B total parameters with only ~1.5B active per token, delivering strong performance while keeping compute and memory usage low—making it ideal for phones, tablets, and laptops.",
"context_length": 32768,
"architecture": {
"modality": "text->text",
"input_modalities": [
"text"
],
"output_modalities": [
"text"
],
"tokenizer": "Other",
"instruct_type": null
},
"pricing": {
"prompt": "0.00000001",
"completion": "0.00000002"
},
"top_provider": {
"context_length": 32768,
"max_completion_tokens": null,
"is_moderated": false
},
"per_request_limits": null,
"supported_parameters": [
"frequency_penalty",
"max_tokens",
"min_p",
"presence_penalty",
"repetition_penalty",
"seed",
"stop",
"temperature",
"top_k",
"top_p"
],
"default_parameters": {
"temperature": null,
"top_p": null,
"frequency_penalty": null
},
"expiration_date": null
}
}