Async::OllamaSourceAsyncOllamaClient

class Client

Represents a connection to the Ollama service, providing methods to generate completions, chat, and list models.

Definitions

ENDPOINT = Async::HTTP::Endpoint.parse("http://localhost:11434")

The default endpoint to connect to.

def generate(prompt, **options, &block)

Generates a response from the given prompt using Ollama.

Signature

parameter prompt String

The prompt to generate a response from.

parameter options Hash

Additional options for the request.

returns Generate

The generated response representation.

Implementation

def generate(prompt, **options, &block)
	options[:prompt] = prompt
	options[:model] ||= MODEL
	
	Generate.post(self.with(path: "/api/generate"), options) do |resource, response|
		if block_given?
			yield response
		end
		
		Generate.new(resource, value: response.read, metadata: response.headers)
	end
end

def chat(messages, **options, &block)

Sends a chat request with the given messages to Ollama.

Signature

parameter messages Array(Hash)

The chat messages to send.

parameter options Hash

Additional options for the request.

returns Chat

The chat response representation.

Implementation

def chat(messages, **options, &block)
	options[:model] ||= MODEL
	options[:messages] = messages
	
	Chat.post(self.with(path: "/api/chat"), options) do |resource, response|
		if block_given?
			yield response
		end
		
		Chat.new(resource, value: response.read, metadata: response.headers)
	end
end

def models

Retrieves the list of available models from Ollama.

Signature

returns Models

The models response representation.

Implementation

def models
	Models.get(self.with(path: "/api/tags"))
end