kiln_ai.adapters.ml_model_list

  1from enum import Enum
  2from typing import Dict, List
  3
  4from pydantic import BaseModel
  5
  6"""
  7Provides model configuration and management for various LLM providers and models.
  8This module handles the integration with different AI model providers and their respective models,
  9including configuration, validation, and instantiation of language models.
 10"""
 11
 12
 13class ModelProviderName(str, Enum):
 14    """
 15    Enumeration of supported AI model providers.
 16    """
 17
 18    openai = "openai"
 19    groq = "groq"
 20    amazon_bedrock = "amazon_bedrock"
 21    ollama = "ollama"
 22    openrouter = "openrouter"
 23    fireworks_ai = "fireworks_ai"
 24    kiln_fine_tune = "kiln_fine_tune"
 25    kiln_custom_registry = "kiln_custom_registry"
 26
 27
 28class ModelFamily(str, Enum):
 29    """
 30    Enumeration of supported model families/architectures.
 31    """
 32
 33    gpt = "gpt"
 34    llama = "llama"
 35    phi = "phi"
 36    mistral = "mistral"
 37    gemma = "gemma"
 38    gemini = "gemini"
 39    claude = "claude"
 40    mixtral = "mixtral"
 41    qwen = "qwen"
 42
 43
 44# Where models have instruct and raw versions, instruct is default and raw is specified
 45class ModelName(str, Enum):
 46    """
 47    Enumeration of specific model versions supported by the system.
 48    Where models have instruct and raw versions, instruct is default and raw is specified.
 49    """
 50
 51    llama_3_1_8b = "llama_3_1_8b"
 52    llama_3_1_70b = "llama_3_1_70b"
 53    llama_3_1_405b = "llama_3_1_405b"
 54    llama_3_2_1b = "llama_3_2_1b"
 55    llama_3_2_3b = "llama_3_2_3b"
 56    llama_3_2_11b = "llama_3_2_11b"
 57    llama_3_2_90b = "llama_3_2_90b"
 58    llama_3_3_70b = "llama_3_3_70b"
 59    gpt_4o_mini = "gpt_4o_mini"
 60    gpt_4o = "gpt_4o"
 61    phi_3_5 = "phi_3_5"
 62    mistral_large = "mistral_large"
 63    mistral_nemo = "mistral_nemo"
 64    gemma_2_2b = "gemma_2_2b"
 65    gemma_2_9b = "gemma_2_9b"
 66    gemma_2_27b = "gemma_2_27b"
 67    claude_3_5_haiku = "claude_3_5_haiku"
 68    claude_3_5_sonnet = "claude_3_5_sonnet"
 69    gemini_1_5_flash = "gemini_1_5_flash"
 70    gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
 71    gemini_1_5_pro = "gemini_1_5_pro"
 72    nemotron_70b = "nemotron_70b"
 73    mixtral_8x7b = "mixtral_8x7b"
 74    qwen_2p5_7b = "qwen_2p5_7b"
 75    qwen_2p5_72b = "qwen_2p5_72b"
 76
 77
 78class KilnModelProvider(BaseModel):
 79    """
 80    Configuration for a specific model provider.
 81
 82    Attributes:
 83        name: The provider's identifier
 84        supports_structured_output: Whether the provider supports structured output formats
 85        supports_data_gen: Whether the provider supports data generation
 86        untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
 87        provider_finetune_id: The finetune ID for the provider, if applicable
 88        provider_options: Additional provider-specific configuration options
 89        adapter_options: Additional options specific to the adapter. Top level key should be adapter ID.
 90    """
 91
 92    name: ModelProviderName
 93    supports_structured_output: bool = True
 94    supports_data_gen: bool = True
 95    untested_model: bool = False
 96    provider_finetune_id: str | None = None
 97    provider_options: Dict = {}
 98    adapter_options: Dict = {}
 99
100
101class KilnModel(BaseModel):
102    """
103    Configuration for a specific AI model.
104
105    Attributes:
106        family: The model's architecture family
107        name: The model's identifier
108        friendly_name: Human-readable name for the model
109        providers: List of providers that offer this model
110        supports_structured_output: Whether the model supports structured output formats
111    """
112
113    family: str
114    name: str
115    friendly_name: str
116    providers: List[KilnModelProvider]
117    supports_structured_output: bool = True
118
119
120built_in_models: List[KilnModel] = [
121    # GPT 4o Mini
122    KilnModel(
123        family=ModelFamily.gpt,
124        name=ModelName.gpt_4o_mini,
125        friendly_name="GPT 4o Mini",
126        providers=[
127            KilnModelProvider(
128                name=ModelProviderName.openai,
129                provider_options={"model": "gpt-4o-mini"},
130                provider_finetune_id="gpt-4o-mini-2024-07-18",
131            ),
132            KilnModelProvider(
133                name=ModelProviderName.openrouter,
134                provider_options={"model": "openai/gpt-4o-mini"},
135            ),
136        ],
137    ),
138    # GPT 4o
139    KilnModel(
140        family=ModelFamily.gpt,
141        name=ModelName.gpt_4o,
142        friendly_name="GPT 4o",
143        providers=[
144            KilnModelProvider(
145                name=ModelProviderName.openai,
146                provider_options={"model": "gpt-4o"},
147                provider_finetune_id="gpt-4o-2024-08-06",
148            ),
149            KilnModelProvider(
150                name=ModelProviderName.openrouter,
151                provider_options={"model": "openai/gpt-4o-2024-08-06"},
152            ),
153        ],
154    ),
155    # Claude 3.5 Haiku
156    KilnModel(
157        family=ModelFamily.claude,
158        name=ModelName.claude_3_5_haiku,
159        friendly_name="Claude 3.5 Haiku",
160        providers=[
161            KilnModelProvider(
162                name=ModelProviderName.openrouter,
163                provider_options={"model": "anthropic/claude-3-5-haiku"},
164            ),
165        ],
166    ),
167    # Claude 3.5 Sonnet
168    KilnModel(
169        family=ModelFamily.claude,
170        name=ModelName.claude_3_5_sonnet,
171        friendly_name="Claude 3.5 Sonnet",
172        providers=[
173            KilnModelProvider(
174                name=ModelProviderName.openrouter,
175                provider_options={"model": "anthropic/claude-3.5-sonnet"},
176            ),
177        ],
178    ),
179    # Gemini 1.5 Pro
180    KilnModel(
181        family=ModelFamily.gemini,
182        name=ModelName.gemini_1_5_pro,
183        friendly_name="Gemini 1.5 Pro",
184        providers=[
185            KilnModelProvider(
186                name=ModelProviderName.openrouter,
187                supports_structured_output=False,  # it should, but doesn't work on openrouter
188                supports_data_gen=False,  # doesn't work on openrouter
189                provider_options={"model": "google/gemini-pro-1.5"},
190            ),
191        ],
192    ),
193    # Gemini 1.5 Flash
194    KilnModel(
195        family=ModelFamily.gemini,
196        name=ModelName.gemini_1_5_flash,
197        friendly_name="Gemini 1.5 Flash",
198        providers=[
199            KilnModelProvider(
200                name=ModelProviderName.openrouter,
201                supports_data_gen=False,
202                provider_options={"model": "google/gemini-flash-1.5"},
203            ),
204        ],
205    ),
206    # Gemini 1.5 Flash 8B
207    KilnModel(
208        family=ModelFamily.gemini,
209        name=ModelName.gemini_1_5_flash_8b,
210        friendly_name="Gemini 1.5 Flash 8B",
211        providers=[
212            KilnModelProvider(
213                name=ModelProviderName.openrouter,
214                supports_structured_output=False,
215                supports_data_gen=False,
216                provider_options={"model": "google/gemini-flash-1.5-8b"},
217            ),
218        ],
219    ),
220    # Nemotron 70B
221    KilnModel(
222        family=ModelFamily.llama,
223        name=ModelName.nemotron_70b,
224        friendly_name="Nemotron 70B",
225        providers=[
226            KilnModelProvider(
227                name=ModelProviderName.openrouter,
228                supports_structured_output=False,
229                supports_data_gen=False,
230                provider_options={"model": "nvidia/llama-3.1-nemotron-70b-instruct"},
231            ),
232        ],
233    ),
234    # Llama 3.1-8b
235    KilnModel(
236        family=ModelFamily.llama,
237        name=ModelName.llama_3_1_8b,
238        friendly_name="Llama 3.1 8B",
239        providers=[
240            KilnModelProvider(
241                name=ModelProviderName.groq,
242                provider_options={"model": "llama-3.1-8b-instant"},
243            ),
244            KilnModelProvider(
245                name=ModelProviderName.amazon_bedrock,
246                supports_structured_output=False,
247                supports_data_gen=False,
248                provider_options={
249                    "model": "meta.llama3-1-8b-instruct-v1:0",
250                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
251                },
252            ),
253            KilnModelProvider(
254                name=ModelProviderName.ollama,
255                supports_data_gen=False,
256                provider_options={
257                    "model": "llama3.1:8b",
258                    "model_aliases": ["llama3.1"],  # 8b is default
259                },
260            ),
261            KilnModelProvider(
262                name=ModelProviderName.openrouter,
263                supports_structured_output=False,
264                supports_data_gen=False,
265                provider_options={"model": "meta-llama/llama-3.1-8b-instruct"},
266            ),
267            KilnModelProvider(
268                name=ModelProviderName.fireworks_ai,
269                supports_structured_output=False,
270                supports_data_gen=False,
271                provider_finetune_id="accounts/fireworks/models/llama-v3p1-8b-instruct",
272                provider_options={
273                    "model": "accounts/fireworks/models/llama-v3p1-8b-instruct"
274                },
275            ),
276        ],
277    ),
278    # Llama 3.1 70b
279    KilnModel(
280        family=ModelFamily.llama,
281        name=ModelName.llama_3_1_70b,
282        friendly_name="Llama 3.1 70B",
283        providers=[
284            KilnModelProvider(
285                name=ModelProviderName.groq,
286                provider_options={"model": "llama-3.1-70b-versatile"},
287            ),
288            KilnModelProvider(
289                name=ModelProviderName.amazon_bedrock,
290                # AWS 70b not working as well as the others.
291                supports_structured_output=False,
292                supports_data_gen=False,
293                provider_options={
294                    "model": "meta.llama3-1-70b-instruct-v1:0",
295                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
296                },
297            ),
298            KilnModelProvider(
299                name=ModelProviderName.openrouter,
300                provider_options={"model": "meta-llama/llama-3.1-70b-instruct"},
301            ),
302            KilnModelProvider(
303                name=ModelProviderName.ollama,
304                provider_options={"model": "llama3.1:70b"},
305            ),
306            KilnModelProvider(
307                name=ModelProviderName.fireworks_ai,
308                provider_finetune_id="accounts/fireworks/models/llama-v3p1-70b-instruct",
309                provider_options={
310                    "model": "accounts/fireworks/models/llama-v3p1-70b-instruct"
311                },
312            ),
313        ],
314    ),
315    # Llama 3.1 405b
316    KilnModel(
317        family=ModelFamily.llama,
318        name=ModelName.llama_3_1_405b,
319        friendly_name="Llama 3.1 405B",
320        providers=[
321            KilnModelProvider(
322                name=ModelProviderName.amazon_bedrock,
323                supports_data_gen=False,
324                provider_options={
325                    "model": "meta.llama3-1-405b-instruct-v1:0",
326                    "region_name": "us-west-2",  # Llama 3.1 only in west-2
327                },
328            ),
329            KilnModelProvider(
330                name=ModelProviderName.ollama,
331                provider_options={"model": "llama3.1:405b"},
332            ),
333            KilnModelProvider(
334                name=ModelProviderName.openrouter,
335                provider_options={"model": "meta-llama/llama-3.1-405b-instruct"},
336            ),
337            KilnModelProvider(
338                name=ModelProviderName.fireworks_ai,
339                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
340                provider_options={
341                    "model": "accounts/fireworks/models/llama-v3p1-405b-instruct"
342                },
343            ),
344        ],
345    ),
346    # Mistral Nemo
347    KilnModel(
348        family=ModelFamily.mistral,
349        name=ModelName.mistral_nemo,
350        friendly_name="Mistral Nemo",
351        providers=[
352            KilnModelProvider(
353                name=ModelProviderName.openrouter,
354                provider_options={"model": "mistralai/mistral-nemo"},
355            ),
356        ],
357    ),
358    # Mistral Large
359    KilnModel(
360        family=ModelFamily.mistral,
361        name=ModelName.mistral_large,
362        friendly_name="Mistral Large",
363        providers=[
364            KilnModelProvider(
365                name=ModelProviderName.amazon_bedrock,
366                provider_options={
367                    "model": "mistral.mistral-large-2407-v1:0",
368                    "region_name": "us-west-2",  # only in west-2
369                },
370            ),
371            KilnModelProvider(
372                name=ModelProviderName.openrouter,
373                provider_options={"model": "mistralai/mistral-large"},
374            ),
375            KilnModelProvider(
376                name=ModelProviderName.ollama,
377                provider_options={"model": "mistral-large"},
378            ),
379        ],
380    ),
381    # Llama 3.2 1B
382    KilnModel(
383        family=ModelFamily.llama,
384        name=ModelName.llama_3_2_1b,
385        friendly_name="Llama 3.2 1B",
386        providers=[
387            KilnModelProvider(
388                name=ModelProviderName.openrouter,
389                supports_structured_output=False,
390                supports_data_gen=False,
391                provider_options={"model": "meta-llama/llama-3.2-1b-instruct"},
392            ),
393            KilnModelProvider(
394                name=ModelProviderName.ollama,
395                supports_structured_output=False,
396                supports_data_gen=False,
397                provider_options={"model": "llama3.2:1b"},
398            ),
399            KilnModelProvider(
400                name=ModelProviderName.fireworks_ai,
401                provider_finetune_id="accounts/fireworks/models/llama-v3p2-1b-instruct",
402                supports_structured_output=False,
403                supports_data_gen=False,
404                provider_options={
405                    "model": "accounts/fireworks/models/llama-v3p2-1b-instruct"
406                },
407            ),
408        ],
409    ),
410    # Llama 3.2 3B
411    KilnModel(
412        family=ModelFamily.llama,
413        name=ModelName.llama_3_2_3b,
414        friendly_name="Llama 3.2 3B",
415        providers=[
416            KilnModelProvider(
417                name=ModelProviderName.openrouter,
418                supports_structured_output=False,
419                supports_data_gen=False,
420                provider_options={"model": "meta-llama/llama-3.2-3b-instruct"},
421            ),
422            KilnModelProvider(
423                name=ModelProviderName.ollama,
424                supports_structured_output=False,
425                supports_data_gen=False,
426                provider_options={"model": "llama3.2"},
427            ),
428            KilnModelProvider(
429                name=ModelProviderName.fireworks_ai,
430                provider_finetune_id="accounts/fireworks/models/llama-v3p2-3b-instruct",
431                supports_structured_output=False,
432                supports_data_gen=False,
433                provider_options={
434                    "model": "accounts/fireworks/models/llama-v3p2-3b-instruct"
435                },
436            ),
437        ],
438    ),
439    # Llama 3.2 11B
440    KilnModel(
441        family=ModelFamily.llama,
442        name=ModelName.llama_3_2_11b,
443        friendly_name="Llama 3.2 11B",
444        providers=[
445            KilnModelProvider(
446                name=ModelProviderName.openrouter,
447                provider_options={"model": "meta-llama/llama-3.2-11b-vision-instruct"},
448                adapter_options={
449                    "langchain": {
450                        "with_structured_output_options": {"method": "json_mode"}
451                    }
452                },
453            ),
454            KilnModelProvider(
455                name=ModelProviderName.ollama,
456                supports_structured_output=False,
457                supports_data_gen=False,
458                provider_options={"model": "llama3.2-vision"},
459            ),
460            KilnModelProvider(
461                name=ModelProviderName.fireworks_ai,
462                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
463                provider_options={
464                    "model": "accounts/fireworks/models/llama-v3p2-11b-vision-instruct"
465                },
466                adapter_options={
467                    "langchain": {
468                        "with_structured_output_options": {"method": "json_mode"}
469                    }
470                },
471            ),
472        ],
473    ),
474    # Llama 3.2 90B
475    KilnModel(
476        family=ModelFamily.llama,
477        name=ModelName.llama_3_2_90b,
478        friendly_name="Llama 3.2 90B",
479        providers=[
480            KilnModelProvider(
481                name=ModelProviderName.openrouter,
482                provider_options={"model": "meta-llama/llama-3.2-90b-vision-instruct"},
483                adapter_options={
484                    "langchain": {
485                        "with_structured_output_options": {"method": "json_mode"}
486                    }
487                },
488            ),
489            KilnModelProvider(
490                name=ModelProviderName.ollama,
491                provider_options={"model": "llama3.2-vision:90b"},
492            ),
493            KilnModelProvider(
494                name=ModelProviderName.fireworks_ai,
495                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
496                provider_options={
497                    "model": "accounts/fireworks/models/llama-v3p2-90b-vision-instruct"
498                },
499                adapter_options={
500                    "langchain": {
501                        "with_structured_output_options": {"method": "json_mode"}
502                    }
503                },
504            ),
505        ],
506    ),
507    # Llama 3.3 70B
508    KilnModel(
509        family=ModelFamily.llama,
510        name=ModelName.llama_3_3_70b,
511        friendly_name="Llama 3.3 70B",
512        providers=[
513            KilnModelProvider(
514                name=ModelProviderName.openrouter,
515                provider_options={"model": "meta-llama/llama-3.3-70b-instruct"},
516                # Openrouter not supporing tools yet. Once they do probably can remove. JSON mode sometimes works, but not consistently.
517                supports_structured_output=False,
518                supports_data_gen=False,
519                adapter_options={
520                    "langchain": {
521                        "with_structured_output_options": {"method": "json_mode"}
522                    }
523                },
524            ),
525            KilnModelProvider(
526                name=ModelProviderName.ollama,
527                provider_options={"model": "llama3.3"},
528            ),
529            KilnModelProvider(
530                name=ModelProviderName.fireworks_ai,
531                # Finetuning not live yet
532                # provider_finetune_id="accounts/fireworks/models/llama-v3p3-70b-instruct",
533                provider_options={
534                    "model": "accounts/fireworks/models/llama-v3p3-70b-instruct"
535                },
536            ),
537        ],
538    ),
539    # Phi 3.5
540    KilnModel(
541        family=ModelFamily.phi,
542        name=ModelName.phi_3_5,
543        friendly_name="Phi 3.5",
544        supports_structured_output=False,
545        providers=[
546            KilnModelProvider(
547                name=ModelProviderName.ollama,
548                supports_structured_output=False,
549                supports_data_gen=False,
550                provider_options={"model": "phi3.5"},
551            ),
552            KilnModelProvider(
553                name=ModelProviderName.openrouter,
554                supports_structured_output=False,
555                supports_data_gen=False,
556                provider_options={"model": "microsoft/phi-3.5-mini-128k-instruct"},
557            ),
558            KilnModelProvider(
559                name=ModelProviderName.fireworks_ai,
560                supports_structured_output=False,
561                supports_data_gen=False,
562                # No finetune support. https://docs.fireworks.ai/fine-tuning/fine-tuning-models
563                provider_options={
564                    "model": "accounts/fireworks/models/phi-3-vision-128k-instruct"
565                },
566            ),
567        ],
568    ),
569    # Gemma 2 2.6b
570    KilnModel(
571        family=ModelFamily.gemma,
572        name=ModelName.gemma_2_2b,
573        friendly_name="Gemma 2 2B",
574        supports_structured_output=False,
575        providers=[
576            KilnModelProvider(
577                name=ModelProviderName.ollama,
578                supports_structured_output=False,
579                supports_data_gen=False,
580                provider_options={
581                    "model": "gemma2:2b",
582                },
583            ),
584        ],
585    ),
586    # Gemma 2 9b
587    KilnModel(
588        family=ModelFamily.gemma,
589        name=ModelName.gemma_2_9b,
590        friendly_name="Gemma 2 9B",
591        supports_structured_output=False,
592        providers=[
593            KilnModelProvider(
594                name=ModelProviderName.ollama,
595                supports_data_gen=False,
596                provider_options={
597                    "model": "gemma2:9b",
598                },
599            ),
600            KilnModelProvider(
601                name=ModelProviderName.openrouter,
602                supports_data_gen=False,
603                provider_options={"model": "google/gemma-2-9b-it"},
604            ),
605            # fireworks AI errors - not allowing system role. Exclude until resolved.
606        ],
607    ),
608    # Gemma 2 27b
609    KilnModel(
610        family=ModelFamily.gemma,
611        name=ModelName.gemma_2_27b,
612        friendly_name="Gemma 2 27B",
613        supports_structured_output=False,
614        providers=[
615            KilnModelProvider(
616                name=ModelProviderName.ollama,
617                supports_data_gen=False,
618                provider_options={
619                    "model": "gemma2:27b",
620                },
621            ),
622            KilnModelProvider(
623                name=ModelProviderName.openrouter,
624                supports_data_gen=False,
625                provider_options={"model": "google/gemma-2-27b-it"},
626            ),
627        ],
628    ),
629    # Mixtral 8x7B
630    KilnModel(
631        family=ModelFamily.mixtral,
632        name=ModelName.mixtral_8x7b,
633        friendly_name="Mixtral 8x7B",
634        providers=[
635            KilnModelProvider(
636                name=ModelProviderName.openrouter,
637                provider_options={"model": "mistralai/mixtral-8x7b-instruct"},
638                adapter_options={
639                    "langchain": {
640                        "with_structured_output_options": {"method": "json_mode"}
641                    }
642                },
643            ),
644            KilnModelProvider(
645                name=ModelProviderName.ollama,
646                supports_structured_output=False,
647                supports_data_gen=False,
648                provider_options={"model": "mixtral"},
649            ),
650        ],
651    ),
652    # Qwen 2.5 7B
653    KilnModel(
654        family=ModelFamily.qwen,
655        name=ModelName.qwen_2p5_7b,
656        friendly_name="Qwen 2.5 7B",
657        providers=[
658            KilnModelProvider(
659                name=ModelProviderName.openrouter,
660                provider_options={"model": "qwen/qwen-2.5-7b-instruct"},
661                # Tool calls not supported. JSON doesn't error, but fails.
662                supports_structured_output=False,
663                supports_data_gen=False,
664                adapter_options={
665                    "langchain": {
666                        "with_structured_output_options": {"method": "json_mode"}
667                    }
668                },
669            ),
670            KilnModelProvider(
671                name=ModelProviderName.ollama,
672                provider_options={"model": "qwen2.5"},
673            ),
674        ],
675    ),
676    # Qwen 2.5 72B
677    KilnModel(
678        family=ModelFamily.qwen,
679        name=ModelName.qwen_2p5_72b,
680        friendly_name="Qwen 2.5 72B",
681        providers=[
682            KilnModelProvider(
683                name=ModelProviderName.openrouter,
684                provider_options={"model": "qwen/qwen-2.5-72b-instruct"},
685                # Not consistent with structure data. Works sometimes but not often
686                supports_structured_output=False,
687                supports_data_gen=False,
688                adapter_options={
689                    "langchain": {
690                        "with_structured_output_options": {"method": "json_mode"}
691                    }
692                },
693            ),
694            KilnModelProvider(
695                name=ModelProviderName.ollama,
696                provider_options={"model": "qwen2.5:72b"},
697            ),
698            KilnModelProvider(
699                name=ModelProviderName.fireworks_ai,
700                provider_options={
701                    "model": "accounts/fireworks/models/qwen2p5-72b-instruct"
702                },
703                # Fireworks will start tuning, but it never finishes.
704                # provider_finetune_id="accounts/fireworks/models/qwen2p5-72b-instruct",
705                adapter_options={
706                    "langchain": {
707                        "with_structured_output_options": {"method": "json_mode"}
708                    }
709                },
710            ),
711        ],
712    ),
713]
class ModelProviderName(builtins.str, enum.Enum):
14class ModelProviderName(str, Enum):
15    """
16    Enumeration of supported AI model providers.
17    """
18
19    openai = "openai"
20    groq = "groq"
21    amazon_bedrock = "amazon_bedrock"
22    ollama = "ollama"
23    openrouter = "openrouter"
24    fireworks_ai = "fireworks_ai"
25    kiln_fine_tune = "kiln_fine_tune"
26    kiln_custom_registry = "kiln_custom_registry"

Enumeration of supported AI model providers.

openai = <ModelProviderName.openai: 'openai'>
groq = <ModelProviderName.groq: 'groq'>
amazon_bedrock = <ModelProviderName.amazon_bedrock: 'amazon_bedrock'>
ollama = <ModelProviderName.ollama: 'ollama'>
openrouter = <ModelProviderName.openrouter: 'openrouter'>
fireworks_ai = <ModelProviderName.fireworks_ai: 'fireworks_ai'>
kiln_fine_tune = <ModelProviderName.kiln_fine_tune: 'kiln_fine_tune'>
kiln_custom_registry = <ModelProviderName.kiln_custom_registry: 'kiln_custom_registry'>
class ModelFamily(builtins.str, enum.Enum):
29class ModelFamily(str, Enum):
30    """
31    Enumeration of supported model families/architectures.
32    """
33
34    gpt = "gpt"
35    llama = "llama"
36    phi = "phi"
37    mistral = "mistral"
38    gemma = "gemma"
39    gemini = "gemini"
40    claude = "claude"
41    mixtral = "mixtral"
42    qwen = "qwen"

Enumeration of supported model families/architectures.

gpt = <ModelFamily.gpt: 'gpt'>
llama = <ModelFamily.llama: 'llama'>
phi = <ModelFamily.phi: 'phi'>
mistral = <ModelFamily.mistral: 'mistral'>
gemma = <ModelFamily.gemma: 'gemma'>
gemini = <ModelFamily.gemini: 'gemini'>
claude = <ModelFamily.claude: 'claude'>
mixtral = <ModelFamily.mixtral: 'mixtral'>
qwen = <ModelFamily.qwen: 'qwen'>
class ModelName(builtins.str, enum.Enum):
46class ModelName(str, Enum):
47    """
48    Enumeration of specific model versions supported by the system.
49    Where models have instruct and raw versions, instruct is default and raw is specified.
50    """
51
52    llama_3_1_8b = "llama_3_1_8b"
53    llama_3_1_70b = "llama_3_1_70b"
54    llama_3_1_405b = "llama_3_1_405b"
55    llama_3_2_1b = "llama_3_2_1b"
56    llama_3_2_3b = "llama_3_2_3b"
57    llama_3_2_11b = "llama_3_2_11b"
58    llama_3_2_90b = "llama_3_2_90b"
59    llama_3_3_70b = "llama_3_3_70b"
60    gpt_4o_mini = "gpt_4o_mini"
61    gpt_4o = "gpt_4o"
62    phi_3_5 = "phi_3_5"
63    mistral_large = "mistral_large"
64    mistral_nemo = "mistral_nemo"
65    gemma_2_2b = "gemma_2_2b"
66    gemma_2_9b = "gemma_2_9b"
67    gemma_2_27b = "gemma_2_27b"
68    claude_3_5_haiku = "claude_3_5_haiku"
69    claude_3_5_sonnet = "claude_3_5_sonnet"
70    gemini_1_5_flash = "gemini_1_5_flash"
71    gemini_1_5_flash_8b = "gemini_1_5_flash_8b"
72    gemini_1_5_pro = "gemini_1_5_pro"
73    nemotron_70b = "nemotron_70b"
74    mixtral_8x7b = "mixtral_8x7b"
75    qwen_2p5_7b = "qwen_2p5_7b"
76    qwen_2p5_72b = "qwen_2p5_72b"

Enumeration of specific model versions supported by the system. Where models have instruct and raw versions, instruct is default and raw is specified.

llama_3_1_8b = <ModelName.llama_3_1_8b: 'llama_3_1_8b'>
llama_3_1_70b = <ModelName.llama_3_1_70b: 'llama_3_1_70b'>
llama_3_1_405b = <ModelName.llama_3_1_405b: 'llama_3_1_405b'>
llama_3_2_1b = <ModelName.llama_3_2_1b: 'llama_3_2_1b'>
llama_3_2_3b = <ModelName.llama_3_2_3b: 'llama_3_2_3b'>
llama_3_2_11b = <ModelName.llama_3_2_11b: 'llama_3_2_11b'>
llama_3_2_90b = <ModelName.llama_3_2_90b: 'llama_3_2_90b'>
llama_3_3_70b = <ModelName.llama_3_3_70b: 'llama_3_3_70b'>
gpt_4o_mini = <ModelName.gpt_4o_mini: 'gpt_4o_mini'>
gpt_4o = <ModelName.gpt_4o: 'gpt_4o'>
phi_3_5 = <ModelName.phi_3_5: 'phi_3_5'>
mistral_large = <ModelName.mistral_large: 'mistral_large'>
mistral_nemo = <ModelName.mistral_nemo: 'mistral_nemo'>
gemma_2_2b = <ModelName.gemma_2_2b: 'gemma_2_2b'>
gemma_2_9b = <ModelName.gemma_2_9b: 'gemma_2_9b'>
gemma_2_27b = <ModelName.gemma_2_27b: 'gemma_2_27b'>
claude_3_5_haiku = <ModelName.claude_3_5_haiku: 'claude_3_5_haiku'>
claude_3_5_sonnet = <ModelName.claude_3_5_sonnet: 'claude_3_5_sonnet'>
gemini_1_5_flash = <ModelName.gemini_1_5_flash: 'gemini_1_5_flash'>
gemini_1_5_flash_8b = <ModelName.gemini_1_5_flash_8b: 'gemini_1_5_flash_8b'>
gemini_1_5_pro = <ModelName.gemini_1_5_pro: 'gemini_1_5_pro'>
nemotron_70b = <ModelName.nemotron_70b: 'nemotron_70b'>
mixtral_8x7b = <ModelName.mixtral_8x7b: 'mixtral_8x7b'>
qwen_2p5_7b = <ModelName.qwen_2p5_7b: 'qwen_2p5_7b'>
qwen_2p5_72b = <ModelName.qwen_2p5_72b: 'qwen_2p5_72b'>
class KilnModelProvider(pydantic.main.BaseModel):
79class KilnModelProvider(BaseModel):
80    """
81    Configuration for a specific model provider.
82
83    Attributes:
84        name: The provider's identifier
85        supports_structured_output: Whether the provider supports structured output formats
86        supports_data_gen: Whether the provider supports data generation
87        untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable.
88        provider_finetune_id: The finetune ID for the provider, if applicable
89        provider_options: Additional provider-specific configuration options
90        adapter_options: Additional options specific to the adapter. Top level key should be adapter ID.
91    """
92
93    name: ModelProviderName
94    supports_structured_output: bool = True
95    supports_data_gen: bool = True
96    untested_model: bool = False
97    provider_finetune_id: str | None = None
98    provider_options: Dict = {}
99    adapter_options: Dict = {}

Configuration for a specific model provider.

Attributes: name: The provider's identifier supports_structured_output: Whether the provider supports structured output formats supports_data_gen: Whether the provider supports data generation untested_model: Whether the model is untested (typically user added). The supports_ fields are not applicable. provider_finetune_id: The finetune ID for the provider, if applicable provider_options: Additional provider-specific configuration options adapter_options: Additional options specific to the adapter. Top level key should be adapter ID.

supports_structured_output: bool
supports_data_gen: bool
untested_model: bool
provider_finetune_id: str | None
provider_options: Dict
adapter_options: Dict
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

class KilnModel(pydantic.main.BaseModel):
102class KilnModel(BaseModel):
103    """
104    Configuration for a specific AI model.
105
106    Attributes:
107        family: The model's architecture family
108        name: The model's identifier
109        friendly_name: Human-readable name for the model
110        providers: List of providers that offer this model
111        supports_structured_output: Whether the model supports structured output formats
112    """
113
114    family: str
115    name: str
116    friendly_name: str
117    providers: List[KilnModelProvider]
118    supports_structured_output: bool = True

Configuration for a specific AI model.

Attributes: family: The model's architecture family name: The model's identifier friendly_name: Human-readable name for the model providers: List of providers that offer this model supports_structured_output: Whether the model supports structured output formats

family: str
name: str
friendly_name: str
providers: List[KilnModelProvider]
supports_structured_output: bool
model_config: ClassVar[pydantic.config.ConfigDict] = {}

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

built_in_models: List[KilnModel] = [KilnModel(family='gpt', name='gpt_4o_mini', friendly_name='GPT 4o Mini', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='gpt-4o-mini-2024-07-18', provider_options={'model': 'gpt-4o-mini'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'openai/gpt-4o-mini'}, adapter_options={})], supports_structured_output=True), KilnModel(family='gpt', name='gpt_4o', friendly_name='GPT 4o', providers=[KilnModelProvider(name=<ModelProviderName.openai: 'openai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='gpt-4o-2024-08-06', provider_options={'model': 'gpt-4o'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'openai/gpt-4o-2024-08-06'}, adapter_options={})], supports_structured_output=True), KilnModel(family='claude', name='claude_3_5_haiku', friendly_name='Claude 3.5 Haiku', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'anthropic/claude-3-5-haiku'}, adapter_options={})], supports_structured_output=True), KilnModel(family='claude', name='claude_3_5_sonnet', friendly_name='Claude 3.5 Sonnet', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'anthropic/claude-3.5-sonnet'}, adapter_options={})], supports_structured_output=True), KilnModel(family='gemini', name='gemini_1_5_pro', friendly_name='Gemini 1.5 Pro', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-pro-1.5'}, adapter_options={})], supports_structured_output=True), KilnModel(family='gemini', name='gemini_1_5_flash', friendly_name='Gemini 1.5 Flash', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-flash-1.5'}, adapter_options={})], supports_structured_output=True), KilnModel(family='gemini', name='gemini_1_5_flash_8b', friendly_name='Gemini 1.5 Flash 8B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemini-flash-1.5-8b'}, adapter_options={})], supports_structured_output=True), KilnModel(family='llama', name='nemotron_70b', friendly_name='Nemotron 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'nvidia/llama-3.1-nemotron-70b-instruct'}, adapter_options={})], supports_structured_output=True), KilnModel(family='llama', name='llama_3_1_8b', friendly_name='Llama 3.1 8B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.1-8b-instant'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-8b-instruct-v1:0', 'region_name': 'us-west-2'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:8b', 'model_aliases': ['llama3.1']}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-8b-instruct'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p1-8b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p1-8b-instruct'}, adapter_options={})], supports_structured_output=True), KilnModel(family='llama', name='llama_3_1_70b', friendly_name='Llama 3.1 70B', providers=[KilnModelProvider(name=<ModelProviderName.groq: 'groq'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama-3.1-70b-versatile'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-70b-instruct-v1:0', 'region_name': 'us-west-2'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-70b-instruct'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:70b'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p1-70b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p1-70b-instruct'}, adapter_options={})], supports_structured_output=True), KilnModel(family='llama', name='llama_3_1_405b', friendly_name='Llama 3.1 405B', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta.llama3-1-405b-instruct-v1:0', 'region_name': 'us-west-2'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.1:405b'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.1-405b-instruct'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p1-405b-instruct'}, adapter_options={})], supports_structured_output=True), KilnModel(family='mistral', name='mistral_nemo', friendly_name='Mistral Nemo', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mistral-nemo'}, adapter_options={})], supports_structured_output=True), KilnModel(family='mistral', name='mistral_large', friendly_name='Mistral Large', providers=[KilnModelProvider(name=<ModelProviderName.amazon_bedrock: 'amazon_bedrock'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistral.mistral-large-2407-v1:0', 'region_name': 'us-west-2'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mistral-large'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistral-large'}, adapter_options={})], supports_structured_output=True), KilnModel(family='llama', name='llama_3_2_1b', friendly_name='Llama 3.2 1B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-1b-instruct'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2:1b'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p2-1b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p2-1b-instruct'}, adapter_options={})], supports_structured_output=True), KilnModel(family='llama', name='llama_3_2_3b', friendly_name='Llama 3.2 3B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-3b-instruct'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id='accounts/fireworks/models/llama-v3p2-3b-instruct', provider_options={'model': 'accounts/fireworks/models/llama-v3p2-3b-instruct'}, adapter_options={})], supports_structured_output=True), KilnModel(family='llama', name='llama_3_2_11b', friendly_name='Llama 3.2 11B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-11b-vision-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2-vision'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p2-11b-vision-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}})], supports_structured_output=True), KilnModel(family='llama', name='llama_3_2_90b', friendly_name='Llama 3.2 90B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.2-90b-vision-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.2-vision:90b'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p2-90b-vision-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}})], supports_structured_output=True), KilnModel(family='llama', name='llama_3_3_70b', friendly_name='Llama 3.3 70B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'meta-llama/llama-3.3-70b-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'llama3.3'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/llama-v3p3-70b-instruct'}, adapter_options={})], supports_structured_output=True), KilnModel(family='phi', name='phi_3_5', friendly_name='Phi 3.5', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'phi3.5'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'microsoft/phi-3.5-mini-128k-instruct'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/phi-3-vision-128k-instruct'}, adapter_options={})], supports_structured_output=False), KilnModel(family='gemma', name='gemma_2_2b', friendly_name='Gemma 2 2B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:2b'}, adapter_options={})], supports_structured_output=False), KilnModel(family='gemma', name='gemma_2_9b', friendly_name='Gemma 2 9B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:9b'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemma-2-9b-it'}, adapter_options={})], supports_structured_output=False), KilnModel(family='gemma', name='gemma_2_27b', friendly_name='Gemma 2 27B', providers=[KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'gemma2:27b'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'google/gemma-2-27b-it'}, adapter_options={})], supports_structured_output=False), KilnModel(family='mixtral', name='mixtral_8x7b', friendly_name='Mixtral 8x7B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mistralai/mixtral-8x7b-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'mixtral'}, adapter_options={})], supports_structured_output=True), KilnModel(family='qwen', name='qwen_2p5_7b', friendly_name='Qwen 2.5 7B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen/qwen-2.5-7b-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen2.5'}, adapter_options={})], supports_structured_output=True), KilnModel(family='qwen', name='qwen_2p5_72b', friendly_name='Qwen 2.5 72B', providers=[KilnModelProvider(name=<ModelProviderName.openrouter: 'openrouter'>, supports_structured_output=False, supports_data_gen=False, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen/qwen-2.5-72b-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}}), KilnModelProvider(name=<ModelProviderName.ollama: 'ollama'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'qwen2.5:72b'}, adapter_options={}), KilnModelProvider(name=<ModelProviderName.fireworks_ai: 'fireworks_ai'>, supports_structured_output=True, supports_data_gen=True, untested_model=False, provider_finetune_id=None, provider_options={'model': 'accounts/fireworks/models/qwen2p5-72b-instruct'}, adapter_options={'langchain': {'with_structured_output_options': {'method': 'json_mode'}}})], supports_structured_output=True)]