Base class for chat models. It extends the BaseLanguageModel class and provides methods for generating chat based on input messages.

Hierarchy

Constructors

Properties

ParsedCallOptions: Omit<ChatOllamaFunctionsCallOptions, never>
caller: AsyncCaller

The async caller should be used by subclasses to make any async calls, which will thus benefit from the concurrency and retry logic.

toolSystemPrompt: BasePromptTemplate<any, BasePromptValue, any> = TOOL_SYSTEM_PROMPT
verbose: boolean

Whether to print out response text.

callbacks?: Callbacks
metadata?: Record<string, unknown>
tags?: string[]
defaultResponseFunction: {
    description: string;
    name: string;
    parameters: {
        properties: {
            response: {
                description: string;
                type: string;
            };
        };
        required: string[];
        type: string;
    };
} = ...

Type declaration

  • description: string
  • name: string
  • parameters: {
        properties: {
            response: {
                description: string;
                type: string;
            };
        };
        required: string[];
        type: string;
    }
    • properties: {
          response: {
              description: string;
              type: string;
          };
      }
      • response: {
            description: string;
            type: string;
        }
        • description: string
        • type: string
    • required: string[]
    • type: string
lc_runnable: boolean = true

Accessors

Methods

  • Get the parameters used to invoke the model

    Returns {
        format: undefined | StringWithAutocomplete<"json">;
        model: string;
        options: {
            embedding_only: undefined | boolean;
            f16_kv: undefined | boolean;
            frequency_penalty: undefined | number;
            logits_all: undefined | boolean;
            low_vram: undefined | boolean;
            main_gpu: undefined | number;
            mirostat: undefined | number;
            mirostat_eta: undefined | number;
            mirostat_tau: undefined | number;
            num_batch: undefined | number;
            num_ctx: undefined | number;
            num_gpu: undefined | number;
            num_gqa: undefined | number;
            num_keep: undefined | number;
            num_thread: undefined | number;
            penalize_newline: undefined | boolean;
            presence_penalty: undefined | number;
            repeat_last_n: undefined | number;
            repeat_penalty: undefined | number;
            rope_frequency_base: undefined | number;
            rope_frequency_scale: undefined | number;
            stop: undefined | string[];
            temperature: undefined | number;
            tfs_z: undefined | number;
            top_k: undefined | number;
            top_p: undefined | number;
            typical_p: undefined | number;
            use_mlock: undefined | boolean;
            use_mmap: undefined | boolean;
            vocab_only: undefined | boolean;
        };
    }

    • format: undefined | StringWithAutocomplete<"json">
    • model: string
    • options: {
          embedding_only: undefined | boolean;
          f16_kv: undefined | boolean;
          frequency_penalty: undefined | number;
          logits_all: undefined | boolean;
          low_vram: undefined | boolean;
          main_gpu: undefined | number;
          mirostat: undefined | number;
          mirostat_eta: undefined | number;
          mirostat_tau: undefined | number;
          num_batch: undefined | number;
          num_ctx: undefined | number;
          num_gpu: undefined | number;
          num_gqa: undefined | number;
          num_keep: undefined | number;
          num_thread: undefined | number;
          penalize_newline: undefined | boolean;
          presence_penalty: undefined | number;
          repeat_last_n: undefined | number;
          repeat_penalty: undefined | number;
          rope_frequency_base: undefined | number;
          rope_frequency_scale: undefined | number;
          stop: undefined | string[];
          temperature: undefined | number;
          tfs_z: undefined | number;
          top_k: undefined | number;
          top_p: undefined | number;
          typical_p: undefined | number;
          use_mlock: undefined | boolean;
          use_mmap: undefined | boolean;
          vocab_only: undefined | boolean;
      }
      • embedding_only: undefined | boolean
      • f16_kv: undefined | boolean
      • frequency_penalty: undefined | number
      • logits_all: undefined | boolean
      • low_vram: undefined | boolean
      • main_gpu: undefined | number
      • mirostat: undefined | number
      • mirostat_eta: undefined | number
      • mirostat_tau: undefined | number
      • num_batch: undefined | number
      • num_ctx: undefined | number
      • num_gpu: undefined | number
      • num_gqa: undefined | number
      • num_keep: undefined | number
      • num_thread: undefined | number
      • penalize_newline: undefined | boolean
      • presence_penalty: undefined | number
      • repeat_last_n: undefined | number
      • repeat_penalty: undefined | number
      • rope_frequency_base: undefined | number
      • rope_frequency_scale: undefined | number
      • stop: undefined | string[]
      • temperature: undefined | number
      • tfs_z: undefined | number
      • top_k: undefined | number
      • top_p: undefined | number
      • typical_p: undefined | number
      • use_mlock: undefined | boolean
      • use_mmap: undefined | boolean
      • vocab_only: undefined | boolean
  • Stream all output from a runnable, as reported to the callback system. This includes all inner runs of LLMs, Retrievers, Tools, etc. Output is streamed as Log objects, which include a list of jsonpatch ops that describe how the state of the run has changed in each step, and the final state of the run. The jsonpatch ops can be applied in order to construct state.

    Parameters

    Returns AsyncGenerator<RunLogPatch, any, unknown>

Generated using TypeDoc