{% extends "main.html" %}
{% block tabs %}
{{ super() }}
{% set lang = config.theme.language | default("en") %}
{% set t = {
"hero_title": {
"en": "Which LLM? Which engine?
Which combo wins on your Mac?",
"fr": "Quel LLM ? Quel moteur ?
Quelle combo gagne sur votre Mac ?",
"de": "Welches LLM? Welche Engine?
Welche Kombi gewinnt auf Ihrem Mac?",
"es": "¿Cuál LLM? ¿Cuál motor?
¿Qué combo gana en tu Mac?",
"it": "Quale LLM? Quale motore?
Quale combo vince sul tuo Mac?",
"pt": "Qual LLM? Qual motor?
Qual combo vence no seu Mac?",
"zh": "哪个LLM?哪个引擎?
哪种组合在你的Mac上最强?",
"ja": "どのLLM?どのエンジン?
あなたのMacで最強の組み合わせは?",
"ko": "어떤 LLM? 어떤 엔진?
당신의 Mac에서 최강 조합은?"
},
"hero_tagline": {
"en": "Benchmark to choose. Dashboard to monitor. History to spot problems.",
"fr": "Benchmarker pour choisir. Dashboard pour monitorer. Historique pour repérer les problèmes.",
"de": "Benchmarken zum Auswählen. Dashboard zum Überwachen. Verlauf zum Erkennen von Problemen.",
"es": "Benchmark para elegir. Dashboard para monitorear. Historial para detectar problemas.",
"it": "Benchmark per scegliere. Dashboard per monitorare. Storico per individuare i problemi.",
"pt": "Benchmark para escolher. Dashboard para monitorar. Histórico para detectar problemas.",
"zh": "跑分来选择。仪表盘来监控。历史记录来发现问题。",
"ja": "ベンチマークで選ぶ。ダッシュボードで監視。履歴で問題を発見。",
"ko": "벤치마크로 선택하고. 대시보드로 모니터링하고. 이력으로 문제를 발견하세요."
},
"cta_start": {
"en": "Get Started", "fr": "Démarrer", "de": "Loslegen",
"es": "Empezar", "it": "Inizia", "pt": "Começar",
"zh": "开始", "ja": "始める", "ko": "시작하기"
},
"cta_github": {
"en": "View on GitHub", "fr": "Voir sur GitHub", "de": "Auf GitHub ansehen",
"es": "Ver en GitHub", "it": "Vedi su GitHub", "pt": "Ver no GitHub",
"zh": "在GitHub上查看", "ja": "GitHubで見る", "ko": "GitHub에서 보기"
},
"pain_title": {
"en": "The Local LLM Problem", "fr": "Le problème des LLM locaux", "de": "Das Problem lokaler LLMs",
"es": "El problema de los LLM locales", "it": "Il problema dei LLM locali", "pt": "O problema dos LLMs locais",
"zh": "本地LLM的问题", "ja": "ローカルLLMの課題", "ko": "로컬 LLM의 문제"
},
"pain_subtitle": {
"en": "Sound familiar?", "fr": "Ça vous dit quelque chose ?", "de": "Kommt Ihnen das bekannt vor?",
"es": "¿Te suena?", "it": "Ti suona familiare?", "pt": "Parece familiar?",
"zh": "听起来熟悉吗?", "ja": "心当たりありませんか?", "ko": "익숙하지 않으세요?"
},
"pain1_title": {
"en": "Fragmented", "fr": "Fragmenté", "de": "Fragmentiert",
"es": "Fragmentado", "it": "Frammentato", "pt": "Fragmentado",
"zh": "碎片化", "ja": "断片化", "ko": "파편화"
},
"pain1_desc": {
"en": "Ollama, LM Studio, mlx-lm — each with its own CLI, formats, and metrics. No common ground.",
"fr": "Ollama, LM Studio, mlx-lm — chacun avec son CLI, ses formats et ses métriques. Aucun terrain commun.",
"de": "Ollama, LM Studio, mlx-lm — jede mit eigenem CLI, Formaten und Metriken. Kein gemeinsamer Nenner.",
"es": "Ollama, LM Studio, mlx-lm — cada uno con su CLI, formatos y métricas. Sin terreno común.",
"it": "Ollama, LM Studio, mlx-lm — ognuno con il proprio CLI, formati e metriche. Nessun terreno comune.",
"pt": "Ollama, LM Studio, mlx-lm — cada um com seu CLI, formatos e métricas. Nenhum terreno comum.",
"zh": "Ollama、LM Studio、mlx-lm — 各有各的CLI、格式和指标。毫无统一标准。",
"ja": "Ollama、LM Studio、mlx-lm — それぞれ独自のCLI、フォーマット、指標。共通基盤なし。",
"ko": "Ollama, LM Studio, mlx-lm — 각각 다른 CLI, 형식, 지표. 공통 기반 없음."
},
"pain2_title": {
"en": "Blind", "fr": "À l'aveugle", "de": "Blind",
"es": "A ciegas", "it": "Alla cieca", "pt": "Às cegas",
"zh": "盲目", "ja": "盲目", "ko": "맹목"
},
"pain2_desc": {
"en": "No real-time VRAM monitoring, no power tracking, no thermal alerts. You're flying blind.",
"fr": "Pas de monitoring VRAM temps réel, pas de suivi de puissance, pas d'alertes thermiques. Vous volez à l'aveugle.",
"de": "Kein Echtzeit-VRAM-Monitoring, kein Stromverbrauch-Tracking, keine Thermal-Warnungen. Sie fliegen blind.",
"es": "Sin monitoreo VRAM en tiempo real, sin seguimiento de consumo, sin alertas térmicas. Vuelas a ciegas.",
"it": "Nessun monitoraggio VRAM in tempo reale, nessun tracking energetico, nessun allarme termico. Voli alla cieca.",
"pt": "Sem monitoramento VRAM em tempo real, sem rastreamento de energia, sem alertas térmicos. Você voa às cegas.",
"zh": "没有实时VRAM监控,没有功耗追踪,没有温度告警。完全在盲飞。",
"ja": "リアルタイムVRAM監視なし、電力追跡なし、温度アラートなし。完全に盲目飛行。",
"ko": "실시간 VRAM 모니터링 없음, 전력 추적 없음, 온도 알림 없음. 눈을 감고 날고 있는 셈."
},
"pain3_title": {
"en": "Manual", "fr": "Manuel", "de": "Manuell",
"es": "Manual", "it": "Manuale", "pt": "Manual",
"zh": "手动", "ja": "手動", "ko": "수동"
},
"pain3_desc": {
"en": "Benchmarking means curl scripts, copy-pasting numbers, and comparing in spreadsheets.",
"fr": "Benchmarker = scripts curl, copier-coller des chiffres, comparer dans des tableurs.",
"de": "Benchmarking bedeutet curl-Skripte, Zahlen kopieren und in Tabellen vergleichen.",
"es": "Benchmarking significa scripts curl, copiar números y comparar en hojas de cálculo.",
"it": "Benchmark significa script curl, copiare numeri e confrontare nei fogli di calcolo.",
"pt": "Benchmark significa scripts curl, copiar números e comparar em planilhas.",
"zh": "跑分意味着curl脚本、复制粘贴数字、用电子表格比较。",
"ja": "ベンチマーク=curlスクリプト、数値のコピペ、スプレッドシートで比較。",
"ko": "벤치마크란 curl 스크립트, 숫자 복붙, 스프레드시트에서 비교하는 것."
},
"feat_title": {
"en": "Built for Apple Silicon Power Users", "fr": "Conçu pour les power users Apple Silicon",
"de": "Entwickelt für Apple Silicon Power User", "es": "Construido para power users de Apple Silicon",
"it": "Progettato per power user Apple Silicon", "pt": "Feito para power users Apple Silicon",
"zh": "为Apple Silicon高级用户打造", "ja": "Apple Siliconパワーユーザーのために設計", "ko": "Apple Silicon 파워유저를 위해 제작"
},
"feat_subtitle": {
"en": "Everything you need to benchmark, monitor, and optimize local inference.",
"fr": "Tout ce qu'il faut pour benchmarker, monitorer et optimiser l'inférence locale.",
"de": "Alles was Sie brauchen, um lokale Inferenz zu benchmarken, überwachen und optimieren.",
"es": "Todo lo que necesitas para benchmark, monitoreo y optimización de inferencia local.",
"it": "Tutto ciò che serve per benchmark, monitoraggio e ottimizzazione dell'inferenza locale.",
"pt": "Tudo que você precisa para benchmark, monitoramento e otimização de inferência local.",
"zh": "跑分、监控、优化本地推理所需的一切。",
"ja": "ローカル推論のベンチマーク、監視、最適化に必要なすべて。",
"ko": "로컬 추론의 벤치마크, 모니터링, 최적화에 필요한 모든 것."
},
"feat1_t": {"en": "Head-to-Head Benchmarks", "fr": "Benchmarks face-à-face", "de": "Direkte Vergleichs-Benchmarks", "es": "Benchmarks cara a cara", "it": "Benchmark testa a testa", "pt": "Benchmarks frente a frente", "zh": "一对一跑分对决", "ja": "ヘッドツーヘッド・ベンチマーク", "ko": "1:1 벤치마크 대결"},
"feat1_d": {
"en": "Same model on Ollama vs LM Studio vs mlx-lm. One command, real numbers. No vibes.",
"fr": "Même modèle sur Ollama vs LM Studio vs mlx-lm. Une commande, des vrais chiffres.",
"de": "Gleiches Modell auf Ollama vs LM Studio vs mlx-lm. Ein Befehl, echte Zahlen.",
"es": "Mismo modelo en Ollama vs LM Studio vs mlx-lm. Un comando, números reales.",
"it": "Stesso modello su Ollama vs LM Studio vs mlx-lm. Un comando, numeri reali.",
"pt": "Mesmo modelo no Ollama vs LM Studio vs mlx-lm. Um comando, números reais.",
"zh": "同一模型在Ollama vs LM Studio vs mlx-lm上对比。一条命令,真实数据。",
"ja": "同じモデルをOllama vs LM Studio vs mlx-lmで比較。コマンド一発、実測値。",
"ko": "같은 모델을 Ollama vs LM Studio vs mlx-lm에서 비교. 명령어 하나, 실측 수치."
},
"feat2_t": {"en": "Energy Efficiency", "fr": "Efficacité énergétique", "de": "Energieeffizienz", "es": "Eficiencia energética", "it": "Efficienza energetica", "pt": "Eficiência energética", "zh": "能效测量", "ja": "エネルギー効率", "ko": "에너지 효율"},
"feat2_d": {
"en": "Measure GPU power during inference. Know your tok/s per watt — nobody else does this.",
"fr": "Mesurez la puissance GPU pendant l'inférence. Connaissez vos tok/s par watt — personne d'autre ne le fait.",
"de": "GPU-Leistung während der Inferenz messen. Kennen Sie Ihre tok/s pro Watt — das macht sonst niemand.",
"es": "Mide el consumo GPU durante la inferencia. Conoce tus tok/s por vatio — nadie más lo hace.",
"it": "Misura la potenza GPU durante l'inferenza. Conosci i tuoi tok/s per watt — nessun altro lo fa.",
"pt": "Meça a potência da GPU durante a inferência. Conheça seus tok/s por watt — ninguém mais faz isso.",
"zh": "测量推理时的GPU功耗。了解你的tok/s每瓦特 — 没有其他工具能做到。",
"ja": "推論中のGPU消費電力を測定。tok/sあたりのワット数を把握 — 他にはない機能。",
"ko": "추론 중 GPU 소비전력 측정. 와트당 tok/s를 파악 — 다른 도구에는 없는 기능."
},
"feat3_t": {"en": "7 Engines, One CLI", "fr": "7 moteurs, un seul CLI", "de": "7 Engines, ein CLI", "es": "7 motores, un CLI", "it": "7 motori, un CLI", "pt": "7 motores, um CLI", "zh": "7个引擎,一个CLI", "ja": "7エンジン、1つのCLI", "ko": "7개 엔진, 하나의 CLI"},
"feat3_d": {
"en": "Ollama, LM Studio, mlx-lm, llama.cpp, oMLX, vllm-mlx, Exo. Auto-detected, auto-configured.",
"fr": "Ollama, LM Studio, mlx-lm, llama.cpp, oMLX, vllm-mlx, Exo. Détection et configuration automatiques.",
"de": "Ollama, LM Studio, mlx-lm, llama.cpp, oMLX, vllm-mlx, Exo. Automatisch erkannt und konfiguriert.",
"es": "Ollama, LM Studio, mlx-lm, llama.cpp, oMLX, vllm-mlx, Exo. Detección y configuración automática.",
"it": "Ollama, LM Studio, mlx-lm, llama.cpp, oMLX, vllm-mlx, Exo. Rilevamento e configurazione automatici.",
"pt": "Ollama, LM Studio, mlx-lm, llama.cpp, oMLX, vllm-mlx, Exo. Detecção e configuração automáticas.",
"zh": "Ollama、LM Studio、mlx-lm、llama.cpp、oMLX、vllm-mlx、Exo。自动检测,自动配置。",
"ja": "Ollama、LM Studio、mlx-lm、llama.cpp、oMLX、vllm-mlx、Exo。自動検出、自動設定。",
"ko": "Ollama, LM Studio, mlx-lm, llama.cpp, oMLX, vllm-mlx, Exo. 자동 감지, 자동 설정."
},
"feat4_t": {"en": "Zero Dependencies", "fr": "Zéro dépendance", "de": "Keine Abhängigkeiten", "es": "Sin dependencias", "it": "Zero dipendenze", "pt": "Zero dependências", "zh": "零依赖", "ja": "依存関係ゼロ", "ko": "의존성 제로"},
"feat4_d": {
"en": "stdlib Python only. No requests, no psutil, no rich. Installs in seconds.",
"fr": "Stdlib Python uniquement. Pas de requests, pas de psutil, pas de rich. Installation en secondes.",
"de": "Nur Python-Standardbibliothek. Kein requests, kein psutil, kein rich. Installation in Sekunden.",
"es": "Solo stdlib Python. Sin requests, sin psutil, sin rich. Se instala en segundos.",
"it": "Solo stdlib Python. Nessun requests, nessun psutil, nessun rich. Si installa in secondi.",
"pt": "Apenas stdlib Python. Sem requests, sem psutil, sem rich. Instala em segundos.",
"zh": "仅用Python标准库。无需requests、psutil、rich。秒级安装。",
"ja": "Python標準ライブラリのみ。requests不要、psutil不要、rich不要。秒速インストール。",
"ko": "Python 표준 라이브러리만 사용. requests, psutil, rich 불필요. 초 단위 설치."
},
"feat5_t": {"en": "GPU Observability", "fr": "Observabilité GPU", "de": "GPU-Observability", "es": "Observabilidad GPU", "it": "Osservabilità GPU", "pt": "Observabilidade GPU", "zh": "GPU可观测性", "ja": "GPUオブザーバビリティ", "ko": "GPU 관측성"},
"feat5_d": {
"en": "Real-time GPU utilization, renderer, tiler, and memory — via passive IOReport. Live gauges, sparklines, historical charts. See your Apple Silicon GPU like never before.",
"fr": "Utilisation GPU en temps réel, renderer, tiler et mémoire — via IOReport passif. Jauges live, sparklines, courbes historiques. Voyez votre GPU Apple Silicon comme jamais.",
"de": "GPU-Auslastung, Renderer, Tiler und Speicher in Echtzeit — via passivem IOReport. Live-Gauges, Sparklines, historische Charts. Sehen Sie Ihre Apple Silicon GPU wie nie zuvor.",
"es": "Utilización GPU en tiempo real, renderer, tiler y memoria — via IOReport pasivo. Gauges en vivo, sparklines, gráficos históricos. Ve tu GPU Apple Silicon como nunca.",
"it": "Utilizzo GPU in tempo reale, renderer, tiler e memoria — tramite IOReport passivo. Gauge live, sparkline, grafici storici. Guarda la tua GPU Apple Silicon come mai prima.",
"pt": "Utilização GPU em tempo real, renderer, tiler e memória — via IOReport passivo. Gauges ao vivo, sparklines, gráficos históricos. Veja sua GPU Apple Silicon como nunca.",
"zh": "实时GPU利用率、渲染器、Tiler和内存 — 通过被动IOReport。实时仪表、迷你图、历史图表。以前所未有的方式观察Apple Silicon GPU。",
"ja": "GPU使用率、レンダラー、タイラー、メモリをリアルタイム計測 — パッシブIOReportで。ライブゲージ、スパークライン、履歴チャート。Apple Silicon GPUをかつてない精度で可視化。",
"ko": "실시간 GPU 사용률, 렌더러, 타일러, 메모리 — 패시브 IOReport 방식. 라이브 게이지, 스파크라인, 히스토리 차트. Apple Silicon GPU를 전례 없는 수준으로 확인."
},
"feat6_t": {"en": "Regression Detection", "fr": "Détection de régression", "de": "Regressionserkennung", "es": "Detección de regresiones", "it": "Rilevamento regressioni", "pt": "Detecção de regressões", "zh": "回归检测", "ja": "リグレッション検出", "ko": "회귀 감지"},
"feat6_d": {
"en": "Auto-detects performance drops after OS or engine updates. SQLite history with 90-day retention.",
"fr": "Détecte les baisses de performances après mise à jour OS ou moteur. Historique SQLite avec rétention 90 jours.",
"de": "Erkennt Leistungseinbrüche nach OS- oder Engine-Updates automatisch. SQLite-Verlauf mit 90 Tagen Aufbewahrung.",
"es": "Detecta caídas de rendimiento tras actualizaciones de OS o motor. Historial SQLite con retención de 90 días.",
"it": "Rileva automaticamente cali di prestazioni dopo aggiornamenti OS o motore. Storico SQLite con ritenzione 90 giorni.",
"pt": "Detecta quedas de desempenho após atualizações de OS ou motor. Histórico SQLite com retenção de 90 dias.",
"zh": "OS或引擎更新后自动检测性能下降。SQLite历史记录,保留90天。",
"ja": "OSやエンジンの更新後にパフォーマンス低下を自動検出。SQLite履歴、90日保持。",
"ko": "OS 또는 엔진 업데이트 후 성능 저하 자동 감지. SQLite 이력, 90일 보존."
},
"feat7_t": {"en": "REST API", "fr": "API REST", "de": "REST-API", "es": "API REST", "it": "API REST", "pt": "API REST", "zh": "REST API", "ja": "REST API", "ko": "REST API"},
"feat7_d": {
"en": "Full JSON API for automation. /api/snapshot, /api/status, /api/metrics — integrate with any stack.",
"fr": "API JSON complète pour l'automatisation. /api/snapshot, /api/status, /api/metrics — intégration avec n'importe quel stack.",
"de": "Vollständige JSON-API für Automatisierung. /api/snapshot, /api/status, /api/metrics — Integration mit jedem Stack.",
"es": "API JSON completa para automatización. /api/snapshot, /api/status, /api/metrics — integración con cualquier stack.",
"it": "API JSON completa per l'automazione. /api/snapshot, /api/status, /api/metrics — integrazione con qualsiasi stack.",
"pt": "API JSON completa para automação. /api/snapshot, /api/status, /api/metrics — integração com qualquer stack.",
"zh": "完整JSON API用于自动化。/api/snapshot、/api/status、/api/metrics — 集成任何技术栈。",
"ja": "自動化のための完全なJSON API。/api/snapshot、/api/status、/api/metrics — あらゆるスタックと統合。",
"ko": "자동화를 위한 완전한 JSON API. /api/snapshot, /api/status, /api/metrics — 모든 스택과 통합."
},
"feat8_t": {"en": "Prometheus Native", "fr": "Prometheus natif", "de": "Prometheus nativ", "es": "Prometheus nativo", "it": "Prometheus nativo", "pt": "Prometheus nativo", "zh": "原生Prometheus支持", "ja": "Prometheusネイティブ", "ko": "Prometheus 네이티브"},
"feat8_d": {
"en": "Built-in /metrics endpoint. Plug into Grafana, Datadog, or any Prometheus-compatible tool. Zero config.",
"fr": "Endpoint /metrics intégré. Connectez Grafana, Datadog ou tout outil compatible Prometheus. Zéro config.",
"de": "Integrierter /metrics-Endpunkt. Anbindung an Grafana, Datadog oder jedes Prometheus-kompatible Tool. Null Konfiguration.",
"es": "Endpoint /metrics integrado. Conecta Grafana, Datadog o cualquier herramienta compatible con Prometheus. Sin configuración.",
"it": "Endpoint /metrics integrato. Collega Grafana, Datadog o qualsiasi strumento compatibile con Prometheus. Zero configurazione.",
"pt": "Endpoint /metrics integrado. Conecte Grafana, Datadog ou qualquer ferramenta compatível com Prometheus. Zero configuração.",
"zh": "内置/metrics端点。连接Grafana、Datadog或任何兼容Prometheus的工具。零配置。",
"ja": "組み込み/metricsエンドポイント。Grafana、Datadog、その他Prometheus互換ツールに接続。設定不要。",
"ko": "내장 /metrics 엔드포인트. Grafana, Datadog 또는 Prometheus 호환 도구에 연결. 설정 불필요."
},
"feat9_t": {"en": "Alert Webhooks", "fr": "Alertes webhook", "de": "Alert-Webhooks", "es": "Alertas webhook", "it": "Alert webhook", "pt": "Alertas webhook", "zh": "告警Webhook", "ja": "アラートWebhook", "ko": "알림 Webhook"},
"feat9_d": {
"en": "POST to Slack, Discord, or any URL on memory pressure, thermal throttling, or engine down. Transition-based — no spam.",
"fr": "POST vers Slack, Discord ou toute URL en cas de pression mémoire, throttling thermique ou moteur down. Basé sur les transitions — pas de spam.",
"de": "POST an Slack, Discord oder jede URL bei Speicherdruck, thermischer Drosselung oder Engine-Ausfall. Transitionsbasiert — kein Spam.",
"es": "POST a Slack, Discord o cualquier URL ante presión de memoria, throttling térmico o motor caído. Basado en transiciones — sin spam.",
"it": "POST a Slack, Discord o qualsiasi URL in caso di pressione memoria, throttling termico o motore down. Basato su transizioni — niente spam.",
"pt": "POST para Slack, Discord ou qualquer URL em caso de pressão de memória, throttling térmico ou motor indisponível. Baseado em transições — sem spam.",
"zh": "内存压力、温度降频或引擎宕机时POST到Slack、Discord或任意URL。基于状态转换 — 不刷屏。",
"ja": "メモリ逼迫、サーマルスロットリング、エンジンダウン時にSlack、Discord、任意URLへPOST。状態遷移ベース — スパムなし。",
"ko": "메모리 압력, 서멀 스로틀링, 엔진 다운 시 Slack, Discord 또는 모든 URL로 POST. 상태 전환 기반 — 스팸 없음."
},
"feat10_t": {"en": "Community Leaderboard", "fr": "Classement communautaire", "de": "Community-Rangliste", "es": "Clasificación comunitaria", "it": "Classifica community", "pt": "Ranking comunitário", "zh": "社区排行榜", "ja": "コミュニティリーダーボード", "ko": "커뮤니티 리더보드"},
"feat10_d": {
"en": "Share benchmarks anonymously. Compare your Mac against the community. See what others achieve on the same chip.",
"fr": "Partagez vos benchmarks anonymement. Comparez votre Mac à la communauté. Voyez ce que d'autres obtiennent sur la même puce.",
"de": "Benchmarks anonym teilen. Vergleichen Sie Ihren Mac mit der Community. Sehen Sie, was andere auf dem gleichen Chip erreichen.",
"es": "Comparte benchmarks anónimamente. Compara tu Mac con la comunidad. Mira lo que otros logran con el mismo chip.",
"it": "Condividi benchmark in modo anonimo. Confronta il tuo Mac con la community. Scopri cosa ottengono gli altri sullo stesso chip.",
"pt": "Compartilhe benchmarks anonimamente. Compare seu Mac com a comunidade. Veja o que outros alcançam no mesmo chip.",
"zh": "匿名分享基准测试。将你的Mac与社区比较。查看其他人在相同芯片上的表现。",
"ja": "ベンチマークを匿名で共有。コミュニティとMacを比較。同じチップでの他のユーザーの結果を確認。",
"ko": "벤치마크를 익명으로 공유. 커뮤니티와 Mac 비교. 같은 칩에서 다른 사용자의 결과 확인."
},
"feat11_t": {"en": "Smart Recommendations", "fr": "Recommandations intelligentes", "de": "Intelligente Empfehlungen", "es": "Recomendaciones inteligentes", "it": "Raccomandazioni intelligenti", "pt": "Recomendações inteligentes", "zh": "智能推荐", "ja": "スマート推薦", "ko": "스마트 추천"},
"feat11_d": {
"en": "\"On your M4 Pro 64 GB, for code: Qwen3.5-35B on mlx-lm at 71 tok/s.\" Data-driven answers to the #1 question on r/LocalLLaMA.",
"fr": "\"Sur ton M4 Pro 64 Go, pour du code : Qwen3.5-35B sur mlx-lm à 71 tok/s.\" Des réponses chiffrées à la question n°1 de r/LocalLLaMA.",
"de": "\"Auf deinem M4 Pro 64 GB, für Code: Qwen3.5-35B auf mlx-lm mit 71 tok/s.\" Datenbasierte Antworten auf die #1-Frage von r/LocalLLaMA.",
"es": "\"En tu M4 Pro 64 GB, para código: Qwen3.5-35B en mlx-lm a 71 tok/s.\" Respuestas basadas en datos a la pregunta #1 de r/LocalLLaMA.",
"it": "\"Sul tuo M4 Pro 64 GB, per codice: Qwen3.5-35B su mlx-lm a 71 tok/s.\" Risposte basate sui dati alla domanda #1 di r/LocalLLaMA.",
"pt": "\"No seu M4 Pro 64 GB, para código: Qwen3.5-35B no mlx-lm a 71 tok/s.\" Respostas baseadas em dados à pergunta #1 do r/LocalLLaMA.",
"zh": "\"在你的M4 Pro 64GB上,写代码用:Qwen3.5-35B + mlx-lm,71 tok/s。\" 用数据回答r/LocalLLaMA的第一大问题。",
"ja": "\"M4 Pro 64GBで、コード用:Qwen3.5-35B + mlx-lm、71 tok/s。\" r/LocalLLaMAの#1質問にデータで回答。",
"ko": "\"M4 Pro 64GB에서 코드용: Qwen3.5-35B + mlx-lm, 71 tok/s.\" r/LocalLLaMA의 #1 질문에 데이터로 답변."
},
"feat12_t": {"en": "Distributed Inference", "fr": "Inférence distribuée", "de": "Verteilte Inferenz", "es": "Inferencia distribuida", "it": "Inferenza distribuita", "pt": "Inferência distribuída", "zh": "分布式推理", "ja": "分散推論", "ko": "분산 추론"},
"feat12_d": {
"en": "Benchmark Exo clusters. 2 Mac Minis = Llama 3.3 70B. asiai measures the swarm like a single machine.",
"fr": "Benchmark des clusters Exo. 2 Mac Mini = Llama 3.3 70B. asiai mesure le swarm comme une seule machine.",
"de": "Benchmark von Exo-Clustern. 2 Mac Minis = Llama 3.3 70B. asiai misst den Schwarm wie eine einzelne Maschine.",
"es": "Benchmark de clústeres Exo. 2 Mac Mini = Llama 3.3 70B. asiai mide el swarm como una sola máquina.",
"it": "Benchmark dei cluster Exo. 2 Mac Mini = Llama 3.3 70B. asiai misura lo swarm come una singola macchina.",
"pt": "Benchmark de clusters Exo. 2 Mac Mini = Llama 3.3 70B. asiai mede o swarm como uma única máquina.",
"zh": "基准测试Exo集群。2台Mac Mini = Llama 3.3 70B。asiai像测量单台机器一样测量集群。",
"ja": "Exoクラスターのベンチマーク。Mac Mini 2台 = Llama 3.3 70B。asiaiはスウォームを1台のマシンのように計測。",
"ko": "Exo 클러스터 벤치마크. Mac Mini 2대 = Llama 3.3 70B. asiai는 스웜을 단일 머신처럼 측정."
},
"uc_title": {
"en": "What Will You Discover?", "fr": "Qu'allez-vous découvrir ?", "de": "Was werden Sie entdecken?",
"es": "¿Qué vas a descubrir?", "it": "Cosa scoprirai?", "pt": "O que você vai descobrir?",
"zh": "你会发现什么?", "ja": "何を発見できる?", "ko": "무엇을 발견하게 될까?"
},
"uc_subtitle": {
"en": "Real questions from r/LocalLLaMA, answered in one command.",
"fr": "Les vraies questions de r/LocalLLaMA, une commande suffit.",
"de": "Echte Fragen von r/LocalLLaMA, mit einem Befehl beantwortet.",
"es": "Preguntas reales de r/LocalLLaMA, respondidas con un comando.",
"it": "Domande reali da r/LocalLLaMA, risposte con un comando.",
"pt": "Perguntas reais do r/LocalLLaMA, respondidas com um comando.",
"zh": "来自r/LocalLLaMA的真实问题,一条命令解答。",
"ja": "r/LocalLLaMAからのリアルな疑問に、コマンド一発で回答。",
"ko": "r/LocalLLaMA의 실제 질문, 명령어 하나로 해결."
},
"uc1_t": {"en": "Which engine is fastest?", "fr": "Quel moteur est le plus rapide ?", "de": "Welche Engine ist am schnellsten?", "es": "¿Cuál motor es el más rápido?", "it": "Quale motore è il più veloce?", "pt": "Qual motor é o mais rápido?", "zh": "哪个引擎最快?", "ja": "どのエンジンが最速?", "ko": "어떤 엔진이 가장 빠를까?"},
"uc1_d": {"en": "Head-to-head comparison — the #1 question on r/LocalLLaMA.", "fr": "Comparaison face-à-face — la question n°1 sur r/LocalLLaMA.", "de": "Direktvergleich — die Frage Nr. 1 auf r/LocalLLaMA.", "es": "Comparación directa — la pregunta n°1 en r/LocalLLaMA.", "it": "Confronto diretto — la domanda n°1 su r/LocalLLaMA.", "pt": "Comparação direta — a pergunta n°1 no r/LocalLLaMA.", "zh": "一对一对比 — r/LocalLLaMA上的头号问题。", "ja": "直接対決 — r/LocalLLaMAで最も多い質問。", "ko": "1:1 비교 — r/LocalLLaMA에서 가장 많은 질문."},
"uc2_t": {"en": "Monitor a multi-agent swarm", "fr": "Monitorer un essaim multi-agents", "de": "Multi-Agent-Schwarm überwachen", "es": "Monitorear un enjambre multi-agente", "it": "Monitorare uno sciame multi-agente", "pt": "Monitorar um enxame multi-agente", "zh": "监控多智能体集群", "ja": "マルチエージェント群の監視", "ko": "멀티 에이전트 스웜 모니터링"},
"uc2_d": {"en": "LLMs running 24/7 for AI agents — track VRAM, thermal, and performance.", "fr": "LLMs tournant 24/7 pour des agents IA — suivez VRAM, thermique et performances.", "de": "LLMs laufen 24/7 für KI-Agenten — VRAM, Temperatur und Leistung verfolgen.", "es": "LLMs ejecutándose 24/7 para agentes IA — monitorea VRAM, temperatura y rendimiento.", "it": "LLM in esecuzione 24/7 per agenti IA — monitora VRAM, temperatura e prestazioni.", "pt": "LLMs rodando 24/7 para agentes IA — acompanhe VRAM, temperatura e desempenho.", "zh": "LLM全天候运行AI智能体 — 追踪VRAM、温度和性能。", "ja": "AIエージェント用LLMを24時間稼働 — VRAM、温度、パフォーマンスを追跡。", "ko": "AI 에이전트용 LLM 24시간 가동 — VRAM, 온도, 성능 추적."},
"uc3_t": {"en": "Compare energy efficiency", "fr": "Comparer l'efficacité énergétique", "de": "Energieeffizienz vergleichen", "es": "Comparar eficiencia energética", "it": "Confrontare l'efficienza energetica", "pt": "Comparar eficiência energética", "zh": "对比能效", "ja": "エネルギー効率を比較", "ko": "에너지 효율 비교"},
"uc3_d": {"en": "tok/s per watt between engines. Critical for 24/7 Mac Mini homelabs.", "fr": "tok/s par watt entre moteurs. Essentiel pour les homelabs Mac Mini 24/7.", "de": "tok/s pro Watt zwischen Engines. Kritisch für 24/7 Mac Mini Homelabs.", "es": "tok/s por vatio entre motores. Crítico para homelabs Mac Mini 24/7.", "it": "tok/s per watt tra motori. Critico per homelab Mac Mini 24/7.", "pt": "tok/s por watt entre motores. Crítico para homelabs Mac Mini 24/7.", "zh": "不同引擎间的tok/s每瓦特。对7x24小时Mac Mini家庭实验室至关重要。", "ja": "エンジン間のワットあたりtok/s。24時間稼働のMac Miniホームラボに不可欠。", "ko": "엔진 간 와트당 tok/s. 24시간 Mac Mini 홈랩에 필수."},
"uc4_t": {"en": "Detect regressions after updates", "fr": "Détecter les régressions après mise à jour", "de": "Regressionen nach Updates erkennen", "es": "Detectar regresiones tras actualizaciones", "it": "Rilevare regressioni dopo aggiornamenti", "pt": "Detectar regressões após atualizações", "zh": "更新后检测回归", "ja": "更新後のリグレッション検出", "ko": "업데이트 후 회귀 감지"},
"uc4_d": {"en": "Did the Ollama or macOS update break your performance? Auto-detection via SQLite.", "fr": "La mise à jour Ollama ou macOS a cassé vos performances ? Détection auto via SQLite.", "de": "Hat das Ollama- oder macOS-Update Ihre Leistung verschlechtert? Automatische Erkennung via SQLite.", "es": "¿La actualización de Ollama o macOS rompió tu rendimiento? Detección automática via SQLite.", "it": "L'aggiornamento Ollama o macOS ha peggiorato le prestazioni? Rilevamento automatico via SQLite.", "pt": "A atualização do Ollama ou macOS quebrou seu desempenho? Detecção automática via SQLite.", "zh": "Ollama或macOS更新导致性能下降?通过SQLite自动检测。", "ja": "OllamaやmacOSのアップデートでパフォーマンスが低下?SQLiteで自動検出。", "ko": "Ollama 또는 macOS 업데이트로 성능이 떨어졌나요? SQLite로 자동 감지."},
"uc5_t": {"en": "Test long context support", "fr": "Tester le support long contexte", "de": "Langkontext-Unterstützung testen", "es": "Probar soporte de contexto largo", "it": "Testare il supporto contesto lungo", "pt": "Testar suporte a contexto longo", "zh": "测试长上下文支持", "ja": "ロングコンテキスト対応をテスト", "ko": "긴 컨텍스트 지원 테스트"},
"uc5_d": {"en": "--context-size 64k benchmarks. Does your model survive 256k context?", "fr": "Benchmarks --context-size 64k. Votre modèle survit-il à 256k de contexte ?", "de": "--context-size 64k Benchmarks. Übersteht Ihr Modell 256k Kontext?", "es": "Benchmarks --context-size 64k. Sobrevive tu modelo a 256k de contexto?", "it": "Benchmark --context-size 64k. Il tuo modello sopravvive a 256k di contesto?", "pt": "Benchmarks --context-size 64k. Seu modelo sobrevive a 256k de contexto?", "zh": "--context-size 64k跑分。你的模型能扛住256k上下文吗?", "ja": "--context-size 64kベンチマーク。あなたのモデルは256kコンテキストに耐えられる?", "ko": "--context-size 64k 벤치마크. 당신의 모델이 256k 컨텍스트를 견딜 수 있을까?"},
"uc6_t": {"en": "Is my Mac thermal throttling?", "fr": "Mon Mac est-il en throttling thermique ?", "de": "Drosselt mein Mac thermisch?", "es": "¿Mi Mac tiene throttling térmico?", "it": "Il mio Mac fa throttling termico?", "pt": "Meu Mac está com throttling térmico?", "zh": "我的Mac是否在温度降频?", "ja": "Macがサーマルスロットリングしてる?", "ko": "내 Mac이 서멀 스로틀링 중인가?"},
"uc6_d": {"en": "Drift detection across benchmark runs. Unique to asiai.", "fr": "Détection de dérive thermique entre les runs. Unique à asiai.", "de": "Drift-Erkennung über Benchmark-Läufe hinweg. Einzigartig bei asiai.", "es": "Detección de deriva térmica entre ejecuciones. Único en asiai.", "it": "Rilevamento deriva termica tra le esecuzioni. Unico in asiai.", "pt": "Detecção de deriva térmica entre execuções. Único no asiai.", "zh": "跨跑分轮次的漂移检测。asiai独有功能。", "ja": "ベンチマーク間のドリフト検出。asiai独自の機能。", "ko": "벤치마크 실행 간 드리프트 감지. asiai만의 고유 기능."},
"uc7_t": {"en": "Reproducible benchmarks", "fr": "Benchmarks reproductibles", "de": "Reproduzierbare Benchmarks", "es": "Benchmarks reproducibles", "it": "Benchmark riproducibili", "pt": "Benchmarks reprodutíveis", "zh": "可复现的跑分", "ja": "再現可能なベンチマーク", "ko": "재현 가능한 벤치마크"},
"uc7_d": {"en": "MLPerf/SPEC methodology. Warmup, median, greedy decoding. Share with confidence.", "fr": "Méthodologie MLPerf/SPEC. Warmup, médiane, décodage greedy. Partagez en confiance.", "de": "MLPerf/SPEC-Methodik. Warmup, Median, Greedy-Dekodierung. Mit Vertrauen teilen.", "es": "Metodología MLPerf/SPEC. Warmup, mediana, decodificación greedy. Comparte con confianza.", "it": "Metodologia MLPerf/SPEC. Warmup, mediana, decodifica greedy. Condividi con fiducia.", "pt": "Metodologia MLPerf/SPEC. Warmup, mediana, decodificação greedy. Compartilhe com confiança.", "zh": "MLPerf/SPEC方法论。预热、中位数、greedy解码。放心分享。", "ja": "MLPerf/SPEC準拠の方法論。ウォームアップ、中央値、greedy decoding。自信を持って共有。", "ko": "MLPerf/SPEC 방법론. 워밍업, 중앙값, greedy decoding. 자신있게 공유."},
"uc8_t": {"en": "Health check in one command", "fr": "Diagnostic en une commande", "de": "Gesundheitscheck mit einem Befehl", "es": "Diagnóstico en un comando", "it": "Diagnostica in un comando", "pt": "Diagnóstico em um comando", "zh": "一条命令健康检查", "ja": "コマンド一発でヘルスチェック", "ko": "명령어 하나로 상태 점검"},
"uc8_d": {"en": "asiai doctor diagnoses system, engines, and database with fix suggestions.", "fr": "asiai doctor diagnostique système, moteurs et base de données avec suggestions de corrections.", "de": "asiai doctor diagnostiziert System, Engines und Datenbank mit Lösungsvorschlägen.", "es": "asiai doctor diagnostica sistema, motores y base de datos con sugerencias de solución.", "it": "asiai doctor diagnostica sistema, motori e database con suggerimenti di correzione.", "pt": "asiai doctor diagnostica sistema, motores e banco de dados com sugestões de correção.", "zh": "asiai doctor诊断系统、引擎和数据库,并提供修复建议。", "ja": "asiai doctorがシステム、エンジン、データベースを診断し修正案を提示。", "ko": "asiai doctor가 시스템, 엔진, 데이터베이스를 진단하고 수정 제안."},
"uc9_t": {"en": "Visual dashboard", "fr": "Dashboard visuel", "de": "Visuelles Dashboard", "es": "Dashboard visual", "it": "Dashboard visuale", "pt": "Dashboard visual", "zh": "可视化仪表盘", "ja": "ビジュアルダッシュボード", "ko": "시각적 대시보드"},
"uc9_d": {"en": "Dark/light web dashboard with live charts, SSE progress, benchmark controls.", "fr": "Dashboard web dark/light avec graphiques en direct, progression SSE, contrôles de benchmark.", "de": "Dark/Light Web-Dashboard mit Live-Charts, SSE-Fortschritt und Benchmark-Steuerung.", "es": "Dashboard web dark/light con gráficos en vivo, progreso SSE y controles de benchmark.", "it": "Dashboard web dark/light con grafici live, avanzamento SSE e controlli benchmark.", "pt": "Dashboard web dark/light com gráficos ao vivo, progresso SSE e controles de benchmark.", "zh": "深色/浅色Web仪表盘,实时图表、SSE进度、跑分控制。", "ja": "ダーク/ライト対応Webダッシュボード。ライブチャート、SSE進捗、ベンチマーク操作。", "ko": "다크/라이트 웹 대시보드. 실시간 차트, SSE 진행률, 벤치마크 컨트롤."},
"uc10_t": {"en": "Compare LLMs head-to-head", "fr": "Comparer les LLMs face-à-face", "de": "LLMs direkt vergleichen", "es": "Comparar LLMs cara a cara", "it": "Confrontare LLM testa a testa", "pt": "Comparar LLMs frente a frente", "zh": "LLM一对一比较", "ja": "LLMを直接対決で比較", "ko": "LLM 1:1 비교"},
"uc10_d": {"en": "Same engine, different models. Which quantization wins?", "fr": "Même moteur, différents modèles. Quelle quantification gagne ?", "de": "Gleiche Engine, verschiedene Modelle. Welche Quantisierung gewinnt?", "es": "Mismo motor, diferentes modelos. ¿Qué cuantización gana?", "it": "Stesso motore, modelli diversi. Quale quantizzazione vince?", "pt": "Mesmo motor, modelos diferentes. Qual quantização ganha?", "zh": "同一引擎,不同模型。哪种量化方案胜出?", "ja": "同じエンジン、異なるモデル。どの量子化が勝つ?", "ko": "같은 엔진, 다른 모델. 어떤 양자화가 이길까?"},
"uc11_t": {"en": "Prometheus + Grafana monitoring", "fr": "Monitoring Prometheus + Grafana", "de": "Prometheus + Grafana Monitoring", "es": "Monitoreo Prometheus + Grafana", "it": "Monitoraggio Prometheus + Grafana", "pt": "Monitoramento Prometheus + Grafana", "zh": "Prometheus + Grafana监控", "ja": "Prometheus + Grafana監視", "ko": "Prometheus + Grafana 모니터링"},
"uc11_d": {
"en": "Expose /metrics, scrape with Prometheus, visualize in Grafana. Production-grade observability.",
"fr": "Exposez /metrics, scrapez avec Prometheus, visualisez dans Grafana. Observabilité production-ready.",
"de": "Exponieren Sie /metrics, scrapen mit Prometheus, visualisieren in Grafana. Produktionsreife Observability.",
"es": "Expone /metrics, scrape con Prometheus, visualiza en Grafana. Observabilidad de nivel producción.",
"it": "Esponi /metrics, scrape con Prometheus, visualizza in Grafana. Osservabilità production-grade.",
"pt": "Exponha /metrics, scrape com Prometheus, visualize no Grafana. Observabilidade de nível produção.",
"zh": "暴露/metrics,用Prometheus抓取,在Grafana中可视化。生产级可观测性。",
"ja": "/metricsを公開、Prometheusでスクレイプ、Grafanaで可視化。本番品質の可観測性。",
"ko": "/metrics 노출, Prometheus로 스크레이프, Grafana에서 시각화. 프로덕션 수준 관측성."
},
"uc12_t": {"en": "Track AI agent inference", "fr": "Suivre l'inférence des agents IA", "de": "KI-Agent-Inferenz verfolgen", "es": "Rastrear inferencia de agentes IA", "it": "Monitorare l'inferenza degli agenti IA", "pt": "Rastrear inferência de agentes IA", "zh": "追踪AI智能体推理", "ja": "AIエージェント推論を追跡", "ko": "AI 에이전트 추론 추적"},
"uc12_d": {
"en": "GPU activity, TCP connections, KV cache — know when your agents are thinking, idle, or overloaded. API-ready for swarm orchestrators.",
"fr": "Activité GPU, connexions TCP, cache KV — sachez quand vos agents réfléchissent, sont inactifs ou surchargés. API prête pour les orchestrateurs de swarm.",
"de": "GPU-Aktivität, TCP-Verbindungen, KV-Cache — wissen, wann Ihre Agenten denken, idle oder überlastet sind. API-bereit für Schwarm-Orchestratoren.",
"es": "Actividad GPU, conexiones TCP, caché KV — sabe cuándo tus agentes piensan, están inactivos o sobrecargados. API lista para orquestadores de swarm.",
"it": "Attività GPU, connessioni TCP, cache KV — sappi quando i tuoi agenti pensano, sono inattivi o sovraccarichi. API pronta per orchestratori di swarm.",
"pt": "Atividade GPU, conexões TCP, cache KV — saiba quando seus agentes estão pensando, ociosos ou sobrecarregados. API pronta para orquestradores de swarm.",
"zh": "GPU活动、TCP连接、KV缓存 — 了解你的智能体何时在推理、空闲或过载。API可直接对接集群编排器。",
"ja": "GPUアクティビティ、TCP接続、KVキャッシュ — エージェントが推論中か、アイドルか、過負荷かを把握。スウォームオーケストレーターに対応するAPI。",
"ko": "GPU 활동, TCP 연결, KV 캐시 — 에이전트가 추론 중인지, 유휴 상태인지, 과부하인지 파악. 스웜 오케스트레이터용 API 지원."
},
"how_title": {
"en": "Up and Running in 60 Seconds", "fr": "Opérationnel en 60 secondes", "de": "In 60 Sekunden startklar",
"es": "Funcionando en 60 segundos", "it": "Operativo in 60 secondi", "pt": "Funcionando em 60 segundos",
"zh": "60秒启动运行", "ja": "60秒で稼働開始", "ko": "60초 만에 실행"
},
"how_subtitle": {
"en": "Three commands. That's it.", "fr": "Trois commandes. C'est tout.", "de": "Drei Befehle. Das war's.",
"es": "Tres comandos. Eso es todo.", "it": "Tre comandi. Tutto qui.", "pt": "Três comandos. Só isso.",
"zh": "三条命令,搞定。", "ja": "コマンド3つ。以上。", "ko": "명령어 세 개. 끝."
},
"step1": {"en": "Install", "fr": "Installer", "de": "Installieren", "es": "Instalar", "it": "Installare", "pt": "Instalar", "zh": "安装", "ja": "インストール", "ko": "설치"},
"step2": {"en": "Detect", "fr": "Détecter", "de": "Erkennen", "es": "Detectar", "it": "Rilevare", "pt": "Detectar", "zh": "检测", "ja": "検出", "ko": "감지"},
"step3": {"en": "Benchmark", "fr": "Benchmarker", "de": "Benchmarken", "es": "Benchmark", "it": "Benchmark", "pt": "Benchmark", "zh": "跑分", "ja": "ベンチマーク", "ko": "벤치마크"},
"step2_out": {"en": "3 engines found", "fr": "3 moteurs trouvés", "de": "3 Engines gefunden", "es": "3 motores encontrados", "it": "3 motori trovati", "pt": "3 motores encontrados", "zh": "发现3个引擎", "ja": "3つのエンジンを検出", "ko": "엔진 3개 발견"},
"disc_title": {
"en": "Real Discoveries", "fr": "Découvertes réelles", "de": "Echte Entdeckungen",
"es": "Descubrimientos reales", "it": "Scoperte reali", "pt": "Descobertas reais",
"zh": "真实发现", "ja": "実際の発見", "ko": "실제 발견"
},
"disc_subtitle": {
"en": "Numbers from actual benchmarks on Apple Silicon.",
"fr": "Chiffres issus de vrais benchmarks sur Apple Silicon.",
"de": "Zahlen aus echten Benchmarks auf Apple Silicon.",
"es": "Números de benchmarks reales en Apple Silicon.",
"it": "Numeri da benchmark reali su Apple Silicon.",
"pt": "Números de benchmarks reais em Apple Silicon.",
"zh": "Apple Silicon上真实跑分的数据。",
"ja": "Apple Silicon上での実際のベンチマークデータ。",
"ko": "Apple Silicon에서의 실제 벤치마크 수치."
},
"disc1_d": {
"en": "MLX is 2.3x faster for MoE architectures (Qwen3.5-35B-A3B) on Apple Silicon.",
"fr": "MLX est 2,3x plus rapide pour les architectures MoE (Qwen3.5-35B-A3B) sur Apple Silicon.",
"de": "MLX ist 2,3x schneller für MoE-Architekturen (Qwen3.5-35B-A3B) auf Apple Silicon.",
"es": "MLX es 2,3x más rápido para arquitecturas MoE (Qwen3.5-35B-A3B) en Apple Silicon.",
"it": "MLX è 2,3x più veloce per architetture MoE (Qwen3.5-35B-A3B) su Apple Silicon.",
"pt": "MLX é 2,3x mais rápido para arquiteturas MoE (Qwen3.5-35B-A3B) em Apple Silicon.",
"zh": "在Apple Silicon上,MLX对MoE架构(Qwen3.5-35B-A3B)快2.3倍。",
"ja": "Apple SiliconでMoEアーキテクチャ(Qwen3.5-35B-A3B)にはMLXが2.3倍高速。",
"ko": "Apple Silicon에서 MoE 아키텍처(Qwen3.5-35B-A3B)에 MLX가 2.3배 빠름."
},
"disc2_t": {
"en": "VRAM: 64k → 256k", "fr": "VRAM : 64k → 256k", "de": "VRAM: 64k → 256k",
"es": "VRAM: 64k → 256k", "it": "VRAM: 64k → 256k", "pt": "VRAM: 64k → 256k",
"zh": "VRAM: 64k → 256k", "ja": "VRAM: 64k → 256k", "ko": "VRAM: 64k → 256k"
},
"disc2_d": {
"en": "VRAM stays constant from 64k to 256k context with DeltaNet — not documented anywhere else.",
"fr": "La VRAM reste constante de 64k à 256k de contexte avec DeltaNet — non documenté ailleurs.",
"de": "VRAM bleibt konstant von 64k bis 256k Kontext mit DeltaNet — nirgendwo anders dokumentiert.",
"es": "La VRAM se mantiene constante de 64k a 256k con DeltaNet — no documentado en ningún otro lugar.",
"it": "La VRAM resta costante da 64k a 256k di contesto con DeltaNet — non documentato altrove.",
"pt": "A VRAM permanece constante de 64k a 256k de contexto com DeltaNet — não documentado em nenhum outro lugar.",
"zh": "使用DeltaNet时,VRAM从64k到256k上下文保持不变 — 其他地方从未记录。",
"ja": "DeltaNetでは64kから256kコンテキストでもVRAMが一定 — 他のどこにも文書化されていない。",
"ko": "DeltaNet 사용 시 64k에서 256k 컨텍스트까지 VRAM 일정 — 다른 곳에서는 문서화되지 않음."
},
"disc3_t": {
"en": "Engine > Model", "fr": "Moteur > Modèle", "de": "Engine > Modell",
"es": "Motor > Modelo", "it": "Motore > Modello", "pt": "Motor > Modelo",
"zh": "引擎 > 模型", "ja": "エンジン > モデル", "ko": "엔진 > 모델"
},
"disc3_d": {
"en": "Same model, same Mac: 30 tok/s on one engine, 71 tok/s on another. The engine matters more.",
"fr": "Même modèle, même Mac : 30 tok/s sur un moteur, 71 tok/s sur un autre. Le moteur compte plus.",
"de": "Gleiches Modell, gleicher Mac: 30 tok/s auf einer Engine, 71 tok/s auf einer anderen. Die Engine zählt mehr.",
"es": "Mismo modelo, mismo Mac: 30 tok/s en un motor, 71 tok/s en otro. El motor importa más.",
"it": "Stesso modello, stesso Mac: 30 tok/s su un motore, 71 tok/s su un altro. Il motore conta di più.",
"pt": "Mesmo modelo, mesmo Mac: 30 tok/s em um motor, 71 tok/s em outro. O motor importa mais.",
"zh": "同一模型,同一Mac:一个引擎30 tok/s,另一个71 tok/s。引擎比模型更重要。",
"ja": "同じモデル、同じMac:一方のエンジンで30 tok/s、別のエンジンで71 tok/s。エンジンの方が重要。",
"ko": "같은 모델, 같은 Mac: 한 엔진에서 30 tok/s, 다른 엔진에서 71 tok/s. 엔진이 더 중요."
},
"eng_title": {
"en": "Supported Engines", "fr": "Moteurs supportés", "de": "Unterstützte Engines",
"es": "Motores soportados", "it": "Motori supportati", "pt": "Motores suportados",
"zh": "支持的引擎", "ja": "対応エンジン", "ko": "지원 엔진"
},
"eng_subtitle": {
"en": "Auto-detected, zero configuration needed.",
"fr": "Détection automatique, zéro configuration.",
"de": "Automatisch erkannt, keine Konfiguration nötig.",
"es": "Detección automática, sin configuración.",
"it": "Rilevamento automatico, zero configurazione.",
"pt": "Detecção automática, zero configuração.",
"zh": "自动检测,无需配置。",
"ja": "自動検出、設定不要。",
"ko": "자동 감지, 설정 불필요."
},
"eng_port": {"en": "Default Port", "fr": "Port par défaut", "de": "Standard-Port", "es": "Puerto por defecto", "it": "Porta predefinita", "pt": "Porta padrão", "zh": "默认端口", "ja": "デフォルトポート", "ko": "기본 포트"},
"eng_native": {"en": "Native", "fr": "Native", "de": "Nativ", "es": "Nativa", "it": "Nativa", "pt": "Nativa", "zh": "原生", "ja": "ネイティブ", "ko": "네이티브"},
"eng_compat": {"en": "OpenAI-compatible", "fr": "Compatible OpenAI", "de": "OpenAI-kompatibel", "es": "Compatible con OpenAI", "it": "Compatibile OpenAI", "pt": "Compatível com OpenAI", "zh": "兼容OpenAI", "ja": "OpenAI互換", "ko": "OpenAI 호환"},
"eng_format": {"en": "Format", "fr": "Format", "de": "Format", "es": "Formato", "it": "Formato", "pt": "Formato", "zh": "格式", "ja": "形式", "ko": "형식"},
"met_title": {
"en": "What We Measure", "fr": "Ce qu'on mesure", "de": "Was wir messen",
"es": "Qué medimos", "it": "Cosa misuriamo", "pt": "O que medimos",
"zh": "我们测量什么", "ja": "何を測定するか", "ko": "측정 항목"
},
"met_subtitle": {
"en": "8 metrics, consistent methodology, every run.",
"fr": "8 métriques, méthodologie constante, à chaque run.",
"de": "8 Metriken, konsistente Methodik, bei jedem Lauf.",
"es": "8 métricas, metodología consistente, cada ejecución.",
"it": "8 metriche, metodologia coerente, ogni esecuzione.",
"pt": "8 métricas, metodologia consistente, cada execução.",
"zh": "8项指标,一致的方法论,每次运行。",
"ja": "8つの指標、一貫した方法論、毎回。",
"ko": "8개 지표, 일관된 방법론, 매 실행마다."
},
"m_toks": {"en": "Generation speed (tokens/sec)", "fr": "Vitesse de génération (tokens/sec)", "de": "Generierungsgeschwindigkeit (Tokens/Sek.)", "es": "Velocidad de generación (tokens/seg)", "it": "Velocità di generazione (token/sec)", "pt": "Velocidade de geração (tokens/seg)", "zh": "生成速度 (tokens/sec)", "ja": "生成速度 (tokens/sec)", "ko": "생성 속도 (tokens/sec)"},
"m_ttft": {"en": "Time to first token", "fr": "Temps au premier token", "de": "Zeit bis zum ersten Token", "es": "Tiempo al primer token", "it": "Tempo al primo token", "pt": "Tempo ao primeiro token", "zh": "首token延迟", "ja": "最初のトークンまでの時間", "ko": "첫 토큰까지 시간"},
"m_power": {"en": "GPU power draw in watts", "fr": "Consommation GPU en watts", "de": "GPU-Leistungsaufnahme in Watt", "es": "Consumo GPU en vatios", "it": "Consumo GPU in watt", "pt": "Consumo GPU em watts", "zh": "GPU功耗(瓦特)", "ja": "GPU消費電力(ワット)", "ko": "GPU 전력 소비 (와트)"},
"m_eff": {"en": "Energy efficiency", "fr": "Efficacité énergétique", "de": "Energieeffizienz", "es": "Eficiencia energética", "it": "Efficienza energetica", "pt": "Eficiência energética", "zh": "能效", "ja": "エネルギー効率", "ko": "에너지 효율"},
"m_stab": {"en": "Run-to-run variance", "fr": "Variance inter-runs", "de": "Lauf-zu-Lauf-Varianz", "es": "Varianza entre ejecuciones", "it": "Varianza tra esecuzioni", "pt": "Variância entre execuções", "zh": "跨轮次方差", "ja": "実行間バラつき", "ko": "실행 간 분산"},
"m_vram": {"en": "GPU memory footprint", "fr": "Empreinte mémoire GPU", "de": "GPU-Speicherbedarf", "es": "Huella de memoria GPU", "it": "Footprint memoria GPU", "pt": "Pegada de memória GPU", "zh": "GPU显存占用", "ja": "GPUメモリ使用量", "ko": "GPU 메모리 사용량"},
"m_therm": {"en": "Throttling state", "fr": "État de throttling", "de": "Throttling-Status", "es": "Estado de throttling", "it": "Stato di throttling", "pt": "Estado de throttling", "zh": "降频状态", "ja": "スロットリング状態", "ko": "스로틀링 상태"},
"m_ctx": {"en": "Long context perf scaling", "fr": "Scaling perf long contexte", "de": "Langkontext-Performance", "es": "Escalado contexto largo", "it": "Scaling contesto lungo", "pt": "Escala contexto longo", "zh": "长上下文性能伸缩", "ja": "ロングコンテキスト性能スケーリング", "ko": "긴 컨텍스트 성능 스케일링"},
"cta_title": {
"en": "Get Started", "fr": "Démarrer", "de": "Loslegen",
"es": "Empezar", "it": "Inizia", "pt": "Começar",
"zh": "开始使用", "ja": "始めよう", "ko": "시작하기"
},
"cta_subtitle": {
"en": "Install in seconds. Zero dependencies.",
"fr": "Installation en secondes. Zéro dépendance.",
"de": "Installation in Sekunden. Keine Abhängigkeiten.",
"es": "Instalación en segundos. Sin dependencias.",
"it": "Installazione in secondi. Zero dipendenze.",
"pt": "Instalação em segundos. Zero dependências.",
"zh": "秒级安装。零依赖。",
"ja": "秒速インストール。依存関係ゼロ。",
"ko": "초 단위 설치. 의존성 제로."
},
"cta_doc": {"en": "Documentation", "fr": "Documentation", "de": "Dokumentation", "es": "Documentación", "it": "Documentazione", "pt": "Documentação", "zh": "文档", "ja": "ドキュメント", "ko": "문서"},
"cta_leaderboard": {"en": "Leaderboard", "fr": "Classement", "de": "Rangliste", "es": "Clasificación", "it": "Classifica", "pt": "Ranking", "zh": "排行榜", "ja": "ランキング", "ko": "리더보드"},
"cta_method": {"en": "Methodology", "fr": "Méthodologie", "de": "Methodik", "es": "Metodología", "it": "Metodologia", "pt": "Metodologia", "zh": "方法论", "ja": "方法論", "ko": "방법론"},
"cta_star": {"en": "If asiai helped you, a star helps others find it", "fr": "Si asiai vous a aidé, une étoile aide les autres à le trouver", "de": "Wenn asiai Ihnen geholfen hat, hilft ein Stern anderen es zu finden", "es": "Si asiai te ayudó, una estrella ayuda a otros a encontrarlo", "it": "Se asiai ti ha aiutato, una stella aiuta gli altri a trovarlo", "pt": "Se o asiai te ajudou, uma estrela ajuda outros a encontrá-lo", "zh": "如果 asiai 帮助了你,一颗星能帮助更多人发现它", "ja": "asiai が役に立ったら、スターで他の人にも届けましょう", "ko": "asiai가 도움이 되었다면, 스타로 다른 사람들이 찾을 수 있게 해주세요"},
"mode_human": {"en": "🧑 Human", "fr": "🧑 Humain", "de": "🧑 Mensch", "es": "🧑 Humano", "it": "🧑 Umano", "pt": "🧑 Humano", "zh": "🧑 人类", "ja": "🧑 ヒューマン", "ko": "🧑 사람"},
"mode_agent": {"en": "AI Agent 🤖", "fr": "Agent IA 🤖", "de": "KI-Agent 🤖", "es": "Agente IA 🤖", "it": "Agente IA 🤖", "pt": "Agente IA 🤖", "zh": "AI智能体 🤖", "ja": "AIエージェント 🤖", "ko": "AI 에이전트 🤖"},
"agent_hero_title": {
"en": "Give your AI agents
eyes on inference",
"fr": "Donnez à vos agents IA
la vision sur l'inférence",
"de": "Geben Sie Ihren KI-Agenten
Einblick in die Inferenz",
"es": "Dale a tus agentes IA
visión sobre la inferencia",
"it": "Dai ai tuoi agenti IA
visibilità sull'inferenza",
"pt": "Dê aos seus agentes IA
visão sobre a inferência",
"zh": "让你的AI智能体
洞察推理状态",
"ja": "AIエージェントに
推論の可視性を",
"ko": "AI 에이전트에게
추론 가시성을 부여하세요"
},
"agent_hero_tagline": {
"en": "asiai's REST API lets your AI agents monitor, diagnose, and optimize local LLM infrastructure autonomously.",
"fr": "L'API REST d'asiai permet à vos agents IA de monitorer, diagnostiquer et optimiser l'infrastructure LLM locale de façon autonome.",
"de": "Die REST-API von asiai ermöglicht Ihren KI-Agenten, lokale LLM-Infrastruktur autonom zu überwachen, zu diagnostizieren und zu optimieren.",
"es": "La API REST de asiai permite a tus agentes IA monitorear, diagnosticar y optimizar la infraestructura LLM local de forma autónoma.",
"it": "L'API REST di asiai consente ai tuoi agenti IA di monitorare, diagnosticare e ottimizzare l'infrastruttura LLM locale in autonomia.",
"pt": "A API REST do asiai permite que seus agentes IA monitorem, diagnostiquem e otimizem a infraestrutura LLM local de forma autônoma.",
"zh": "asiai的REST API让你的AI智能体自主监控、诊断和优化本地LLM基础设施。",
"ja": "asiaiのREST APIにより、AIエージェントがローカルLLMインフラを自律的に監視・診断・最適化できます。",
"ko": "asiai의 REST API로 AI 에이전트가 로컬 LLM 인프라를 자율적으로 모니터링, 진단, 최적화할 수 있습니다."
},
"agent_cta_guide": {"en": "Agent Guide", "fr": "Guide Agent", "de": "Agent-Guide", "es": "Guía de Agentes", "it": "Guida Agente", "pt": "Guia do Agente", "zh": "智能体指南", "ja": "エージェントガイド", "ko": "에이전트 가이드"},
"agent_cta_api": {"en": "API Reference", "fr": "Référence API", "de": "API-Referenz", "es": "Referencia API", "it": "Riferimento API", "pt": "Referência API", "zh": "API参考", "ja": "APIリファレンス", "ko": "API 레퍼런스"},
"agent_cta_copy": {"en": "Give your AI this URL", "fr": "Donnez cette URL à votre IA", "de": "Geben Sie Ihrer KI diese URL", "es": "Dale esta URL a tu IA", "it": "Dai questo URL alla tua IA", "pt": "Dê esta URL à sua IA", "zh": "把这个URL给你的AI", "ja": "このURLをAIに渡す", "ko": "이 URL을 AI에게 전달하세요"},
"agent_badge": {"en": "Agent-Ready", "fr": "Agent-Ready", "de": "Agent-Ready", "es": "Agent-Ready", "it": "Agent-Ready", "pt": "Agent-Ready", "zh": "Agent-Ready", "ja": "Agent-Ready", "ko": "Agent-Ready"},
"agent_count_join": {"en": "Join {count} agents monitoring with asiai", "fr": "{count} agents surveillent avec asiai", "de": "{count} Agents überwachen mit asiai", "es": "{count} agentes monitoreando con asiai", "it": "{count} agenti monitorano con asiai", "pt": "{count} agentes monitorando com asiai", "zh": "{count} 个智能体正在使用 asiai 监控", "ja": "{count} エージェントが asiai で監視中", "ko": "{count}개 에이전트가 asiai로 모니터링 중"},
"agent_count_trust": {"en": "{count} AI agents trust asiai", "fr": "{count} agents IA font confiance à asiai", "de": "{count} KI-Agents vertrauen asiai", "es": "{count} agentes IA confían en asiai", "it": "{count} agenti IA si fidano di asiai", "pt": "{count} agentes IA confiam no asiai", "zh": "{count} 个 AI 智能体信赖 asiai", "ja": "{count} の AI エージェントが asiai を信頼", "ko": "{count}개 AI 에이전트가 asiai를 신뢰"},
"feat14_t": {"en": "Benchmark Card", "fr": "Carte benchmark", "de": "Benchmark-Karte", "es": "Tarjeta benchmark", "it": "Scheda benchmark", "pt": "Cartão benchmark", "zh": "跑分卡片", "ja": "ベンチマークカード", "ko": "벤치마크 카드"},
"feat14_d": {
"en": "One command, one shareable image. Run asiai bench --card and get a 1200x630 dark-themed card with your model, chip, engine comparison, and winner. Post it on Reddit, X, or Discord. The Speedtest for local LLMs.",
"fr": "Une commande, une image partageable. Lancez asiai bench --card et obtenez une carte 1200x630 thème sombre avec modèle, puce, comparaison moteurs et gagnant. Postez-la sur Reddit, X ou Discord. Le Speedtest des LLMs locaux.",
"de": "Ein Befehl, ein teilbares Bild. Führe asiai bench --card aus und erhalte eine 1200x630 Dark-Theme-Karte mit Modell, Chip, Engine-Vergleich und Gewinner. Teile sie auf Reddit, X oder Discord. Der Speedtest für lokale LLMs.",
"es": "Un comando, una imagen compartible. Ejecuta asiai bench --card y obtén una tarjeta 1200x630 tema oscuro con modelo, chip, comparación de motores y ganador. Publícala en Reddit, X o Discord. El Speedtest de los LLMs locales.",
"it": "Un comando, un'immagine condivisibile. Esegui asiai bench --card e ottieni una scheda 1200x630 tema scuro con modello, chip, confronto motori e vincitore. Pubblicala su Reddit, X o Discord. Lo Speedtest dei LLM locali.",
"pt": "Um comando, uma imagem compartilhável. Execute asiai bench --card e obtenha um cartão 1200x630 tema escuro com modelo, chip, comparação de motores e vencedor. Publique no Reddit, X ou Discord. O Speedtest dos LLMs locais.",
"zh": "一条命令,一张可分享图片。运行asiai bench --card,获得1200x630深色主题卡片,含模型、芯片、引擎对比和赢家。发到Reddit、X或Discord。本地LLM的Speedtest。",
"ja": "コマンド1つで共有可能な画像。asiai bench --cardを実行すると、モデル、チップ、エンジン比較、勝者が入った1200x630ダークテーマカードが生成。Reddit、X、Discordで共有。ローカルLLMのSpeedtest。",
"ko": "명령어 하나, 공유 가능한 이미지. asiai bench --card를 실행하면 모델, 칩, 엔진 비교, 우승자가 담긴 1200x630 다크 테마 카드 생성. Reddit, X, Discord에 게시. 로컬 LLM의 Speedtest."
},
"feat13_t": {"en": "Agent-Ready API", "fr": "API Agent-Ready", "de": "Agent-Ready API", "es": "API Agent-Ready", "it": "API Agent-Ready", "pt": "API Agent-Ready", "zh": "智能体就绪API", "ja": "エージェント対応API", "ko": "에이전트 대응 API"},
"feat13_d": {
"en": "Built for humans. Ready for AI agents. REST API with JSON endpoints, Prometheus metrics, diagnostic decision trees, and inference activity signals. Give your AI agent a URL and let it self-monitor.",
"fr": "Conçu pour les humains. Prêt pour les agents IA. API REST avec endpoints JSON, métriques Prometheus, arbres de diagnostic et signaux d'activité d'inférence. Donnez une URL à votre agent IA et laissez-le s'auto-monitorer.",
"de": "Für Menschen gebaut. Bereit für KI-Agenten. REST-API mit JSON-Endpunkten, Prometheus-Metriken, diagnostischen Entscheidungsbäumen und Inferenzaktivitätssignalen. Geben Sie Ihrem KI-Agenten eine URL und lassen Sie ihn sich selbst überwachen.",
"es": "Construido para humanos. Listo para agentes IA. API REST con endpoints JSON, métricas Prometheus, árboles de decisión diagnósticos y señales de actividad de inferencia. Dale una URL a tu agente IA y déjalo auto-monitorearse.",
"it": "Progettato per gli umani. Pronto per gli agenti IA. API REST con endpoint JSON, metriche Prometheus, alberi decisionali diagnostici e segnali di attività inferenziale. Dai al tuo agente IA un URL e lascialo auto-monitorarsi.",
"pt": "Feito para humanos. Pronto para agentes IA. API REST com endpoints JSON, métricas Prometheus, árvores de decisão diagnósticas e sinais de atividade de inferência. Dê uma URL ao seu agente IA e deixe-o se auto-monitorar.",
"zh": "为人类而建。为AI智能体而备。REST API提供JSON端点、Prometheus指标、诊断决策树和推理活动信号。给你的AI智能体一个URL,让它自我监控。",
"ja": "人間のために構築。AIエージェント対応。JSONエンドポイント、Prometheusメトリクス、診断デシジョンツリー、推論アクティビティシグナルを備えたREST API。AIエージェントにURLを渡して自己監視させましょう。",
"ko": "사람을 위해 만들어졌습니다. AI 에이전트 대응. JSON 엔드포인트, Prometheus 메트릭, 진단 의사결정 트리, 추론 활동 신호를 갖춘 REST API. AI 에이전트에게 URL을 주고 자가 모니터링하게 하세요."
}
} %}
{# Helper macro to get translation with fallback to English #}
{% macro tr(key) %}{{ t[key][lang] | default(t[key]["en"]) }}{% endmacro %}
{{ tr("hero_tagline") }}{{ tr("agent_hero_tagline") }}
asiai bench
asiai web
{
"chip": "Apple M4 Pro",
"ram_gb": 64.0,
"memory_pressure": "normal",
"gpu_utilization_percent": 45.2,
"engines": {
"ollama": { "running": true, "models_loaded": 2 },
"lmstudio": { "running": true, "models_loaded": 1 }
}
}
{
"system": {
"chip": "Apple M4 Pro",
"gpu_cores": 20,
"gpu_utilization_percent": 45.2,
"gpu_renderer_percent": 38.1,
"thermal_state": "nominal"
},
"engines": [{
"name": "ollama",
"models": [{ "name": "qwen3.5:latest", "size_params": "35B" }]
}]
}
{{ tr("pain_subtitle") }}
{{ tr("pain1_desc") }}
{{ tr("pain2_desc") }}
{{ tr("pain3_desc") }}
{{ tr("feat_subtitle") }}
{{ tr("feat1_d") }}
{{ tr("feat2_d") }}
{{ tr("feat3_d") }}
{{ tr("feat4_d") }}
{{ tr("feat5_d") }}
{{ tr("feat6_d") }}
{{ tr("feat7_d") }}
{{ tr("feat8_d") }}
{{ tr("feat9_d") }}
{{ tr("feat10_d") }}
{{ tr("feat11_d") }}
{{ tr("feat12_d") }}
{{ tr("feat14_d") }}
{{ tr("feat13_d") }}
{{ tr("uc_subtitle") }}
{{ tr("uc1_d") }}
{{ tr("uc2_d") }}
{{ tr("uc3_d") }}
{{ tr("uc4_d") }}
{{ tr("uc5_d") }}
{{ tr("uc6_d") }}
{{ tr("uc7_d") }}
{{ tr("uc8_d") }}
{{ tr("uc9_d") }}
{{ tr("uc10_d") }}
{{ tr("uc11_d") }}
{{ tr("uc12_d") }}
{{ tr("how_subtitle") }}
brew install asiai
$ asiai detect
✔ ollama (11434)
✔ lmstudio (1234)
✔ mlx-lm (8080)
→ {{ tr("step2_out") }}
$ asiai bench -m qwen3.5
Engine tok/s TTFT
lmstudio 71.2 42ms
ollama 54.8 61ms
mlx-lm 30.1 38ms
{{ tr("disc_subtitle") }}
{{ tr("disc1_d") }}
{{ tr("disc2_d") }}
{{ tr("disc3_d") }}
{{ tr("eng_subtitle") }}
| Engine | {{ tr("eng_port") }} | API | {{ tr("eng_format") }} | VRAM |
|---|---|---|---|---|
| Ollama | 11434 |
{{ tr("eng_native") }} | GGUF | ✔ |
| LM Studio | 1234 |
{{ tr("eng_compat") }} | GGUF + MLX | ✔ |
| mlx-lm | 8080 |
{{ tr("eng_compat") }} | MLX | — |
| llama.cpp | 8080 |
{{ tr("eng_compat") }} | GGUF | — |
| oMLX | 8000 |
{{ tr("eng_compat") }} | MLX | — |
| vllm-mlx | 8000 |
{{ tr("eng_compat") }} | MLX | — |
| Exo | 52415 |
{{ tr("eng_compat") }} | MLX | — |
{{ tr("met_subtitle") }}
{{ tr("m_toks") }}
{{ tr("m_ttft") }}
{{ tr("m_power") }}
{{ tr("m_eff") }}
{{ tr("m_stab") }}
{{ tr("m_vram") }}
{{ tr("m_therm") }}
{{ tr("m_ctx") }}
{{ tr("cta_subtitle") }}
brew tap druide67/tap
brew install asiai
pip install asiai