Class: ReplicateLLM
Replicate LLM implementation used
Extends
BaseLLM
Constructors
new ReplicateLLM()
new ReplicateLLM(
init
?):ReplicateLLM
Parameters
• init?: Partial
<ReplicateLLM
> & object
Returns
Overrides
BaseLLM.constructor
Source
packages/core/src/llm/replicate_ai.ts:115
Properties
chatStrategy
chatStrategy:
ReplicateChatStrategy
Source
packages/core/src/llm/replicate_ai.ts:109
maxTokens?
optional
maxTokens:number
Source
packages/core/src/llm/replicate_ai.ts:112
model
model:
"Llama-2-70b-chat-old"
|"Llama-2-70b-chat-4bit"
|"Llama-2-13b-chat-old"
|"Llama-2-13b-chat-4bit"
|"Llama-2-7b-chat-old"
|"Llama-2-7b-chat-4bit"
|"llama-3-70b-instruct"
|"llama-3-8b-instruct"
Source
packages/core/src/llm/replicate_ai.ts:108
replicateSession
replicateSession:
ReplicateSession
Source
packages/core/src/llm/replicate_ai.ts:113
temperature
temperature:
number
Source
packages/core/src/llm/replicate_ai.ts:110
topP
topP:
number
Source
packages/core/src/llm/replicate_ai.ts:111
Accessors
metadata
get
metadata():object
Returns
object
contextWindow
contextWindow:
number
maxTokens
maxTokens:
undefined
|number
model
model:
"Llama-2-70b-chat-old"
|"Llama-2-70b-chat-4bit"
|"Llama-2-13b-chat-old"
|"Llama-2-13b-chat-4bit"
|"Llama-2-7b-chat-old"
|"Llama-2-7b-chat-4bit"
|"llama-3-70b-instruct"
|"llama-3-8b-instruct"
temperature
temperature:
number
tokenizer
tokenizer:
undefined
=undefined
topP
topP:
number
Source
packages/core/src/llm/replicate_ai.ts:140
Methods
chat()
chat(params)
chat(
params
):Promise
<AsyncIterable
<ChatResponseChunk
>>
Parameters
• params: LLMChatParamsStreaming
<object
, object
>
Returns
Promise
<AsyncIterable
<ChatResponseChunk
>>
Overrides
BaseLLM.chat
Source
packages/core/src/llm/replicate_ai.ts:307
chat(params)
chat(
params
):Promise
<ChatResponse
<object
>>
Parameters
• params: LLMChatParamsNonStreaming
<object
, object
>
Returns
Promise
<ChatResponse
<object
>>
Overrides
BaseLLM.chat
Source
packages/core/src/llm/replicate_ai.ts:310
complete()
complete(params)
complete(
params
):Promise
<AsyncIterable
<CompletionResponse
>>
Parameters
• params: LLMCompletionParamsStreaming
Returns
Promise
<AsyncIterable
<CompletionResponse
>>