Ollama Ai Model
provides a lot of functionality, along with it, Ollama
allows to registration of custom Funtions
, tools
and external API
for enhancing the Model
capability. The AI Model
can intelligently choose to call one or many functions
. The Spring Ai Ollama Function
calling can easily integrate with the Ai Models
.
The function
needs to register as a Spring Bean
with the name
, description
, and call signature
that will tell the Model
about the required arguments
. The description
helps the Model
to decide when to call the function
.
package com.example.springai.config;
import com.example.springai.service.WeatherService;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Description;
import java.util.function.Function;
@Configuration
public class FunctionConfiguration {
@Bean
@Description("Get the current weather condition for the given city")
public Function<WeatherService.Request, WeatherService.Response> currentWeatherFunction() {
return new WeatherService();
}
}
package com.example.springai.controller;
import org.springframework.ai.chat.client.ChatClient;
import org.springframework.ai.chat.messages.SystemMessage;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.model.ChatResponse;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.ollama.api.OllamaOptions;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import java.util.List;
@RestController
public class SpringAiController {
private final ChatClient chatClient;
public SpringAiController(ChatClient.Builder chatClient) {
this.chatClient = chatClient.build();
}
@GetMapping("/function")
public String function(@RequestParam(value = "question") String question) {
SystemMessage systemMessage = new SystemMessage("You are a helpful AI assistant answering questions about cities around the world and their current weather in very detail.");
UserMessage userMessage = new UserMessage(question);
OllamaOptions currentWeatherFunction = OllamaOptions.builder().function("currentWeatherFunction").build();
Prompt prompt = new Prompt(List.of(systemMessage, userMessage), currentWeatherFunction);
ChatResponse chatResponse = chatClient.prompt(prompt).call().chatResponse();
return chatResponse.getResult().getOutput().getContent();
}
}
package com.example.springai.service;
import com.google.gson.Gson;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.Resource;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.function.Function;
@Configuration
public class WeatherService implements Function<WeatherService.Request, WeatherService.Response> {
@Value("classpath:weather.json")
Resource resourceFile;
private @Autowired Gson gson;
@Override
public Response apply(Request request) {
System.out.println("WeatherService.apply request = " + request);
try {
return gson.fromJson(resourceFile.getContentAsString(StandardCharsets.UTF_8), Response.class);
} catch (IOException e) {
throw new RuntimeException("Unable to read weather json file " + e);
}
}
public record Request(String city) {
}
record AirQuality(double co, double no2, double o3, double so2, double pm2_5, double pm10) {
}
record Condition(String text, String icon, int code) {
}
record Current(int last_updated_epoch, String last_updated, int temp_c, double temp_f, int is_day,
Condition condition, double wind_mph, double wind_kph, int wind_degree, String wind_dir,
int pressure_mb, double pressure_in, double precip_mm, int precip_in, int humidity, int cloud,
double feelslike_c, double feelslike_f, int vis_km, int vis_miles, int uv, double gust_mph,
double gust_kph, AirQuality air_quality) {
}
record Location(String name, String region, String country, double lat, double lon, String tz_id,
int localtime_epoch, String localtime) {
}
public record Response(Location location, Current current) {
}
}
package com.example.springai;
import com.google.gson.Gson;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
@SpringBootApplication
public class SpringAiApplication {
public static void main(String[] args) {
SpringApplication.run(SpringAiApplication.class, args);
}
@Bean
public Gson gson() {
return new Gson();
}
}
spring.application.name=SpringAi
spring.docker.compose.lifecycle-management=start-only
spring.threads.virtual.enabled=true
# The default Ollama Model in Spring Ai is mistral, but it can be changed by setting the below property. make sure to download the same model in entrypoint.sh file
#spring.ai.ollama.chat.options.model=llama3.1
# If running the Ollama Docker Instance separately, then set this property
spring.docker.compose.enabled=false
{
"location": {
"name": "London",
"region": "City of London, Greater London",
"country": "United Kingdom",
"lat": 51.52,
"lon": -0.11,
"tz_id": "Europe/London",
"localtime_epoch": 1613896955,
"localtime": "2024-02-21 8:42"
},
"current": {
"last_updated_epoch": 1613896210,
"last_updated": "2021-02-21 08:30",
"temp_c": 10,
"temp_f": 50.8,
"is_day": 1,
"condition": {
"text": "Partly cloudy",
"icon": "//cdn.weatherapi.com/weather/64x64/day/116.png",
"code": 1003
},
"wind_mph": 3.8,
"wind_kph": 6.1,
"wind_degree": 220,
"wind_dir": "SW",
"pressure_mb": 1009,
"pressure_in": 30.3,
"precip_mm": 0.1,
"precip_in": 0,
"humidity": 82,
"cloud": 75,
"feelslike_c": 9.5,
"feelslike_f": 49.2,
"vis_km": 10,
"vis_miles": 6,
"uv": 1,
"gust_mph": 10.5,
"gust_kph": 16.9,
"air_quality": {
"co": 230.3,
"no2": 13.5,
"o3": 54.3,
"so2": 7.9,
"pm2_5": 8.6,
"pm10": 11.3,
"us-epa-index": 1,
"gb-defra-index": 1
}
}
}
services:
ollama-model:
image: ollama/ollama:latest
container_name: ollama_container
ports:
- 11434:11434/tcp
healthcheck:
test: ollama --version || exit 1
command: serve
volumes:
- ./ollama/ollama:/root/.ollama
- ./entrypoint.sh:/entrypoint.sh
pull_policy: missing
tty: true
restart: no
entrypoint: [ "/usr/bin/bash", "/entrypoint.sh" ]
open-webui:
image: ghcr.io/open-webui/open-webui:main
container_name: open_webui_container
environment:
WEBUI_AUTH: false
ports:
- "8081:8080"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
- open-webui:/app/backend/data
restart: no
volumes:
open-webui:
#!/bin/bash
# Start Ollama in the background.
/bin/ollama serve &
# Record Process ID.
pid=$!
# Pause for Ollama to start.
sleep 5
# The default Ollama Model in Spring Ai is mistral, but it can be changed in the applications property file. Make sure to download the same Model here
echo "🔴 Retrieve LLAMA3 model..."
ollama pull mistral
echo "🟢 Done!"
# Wait for the Ollama process to finish.
wait $pid
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://maven.apache.org/POM/4.0.0"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>3.3.2</version>
<relativePath/>
</parent>
<groupId>com.example.springai</groupId>
<artifactId>function-calling</artifactId>
<version>0.0.1-SNAPSHOT</version>
<name>Function calling</name>
<description>Demo project for Spring Boot</description>
<properties>
<java.version>21</java.version>
<spring-ai.version>1.0.0-SNAPSHOT</spring-ai.version>
</properties>
<dependencies>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.11.0</version>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-ollama-spring-boot-starter</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-docker-compose</artifactId>
<scope>runtime</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-test</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-bom</artifactId>
<version>${spring-ai.version}</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<build>
<plugins>
<plugin>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-maven-plugin</artifactId>
<configuration>
<mainClass>com.example.springai.SpringAiApplication</mainClass>
<excludes>
<exclude>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</exclude>
</excludes>
</configuration>
</plugin>
</plugins>
</build>
<repositories>
<repository>
<id>spring-milestones</id>
<name>Spring Milestones</name>
<url>https://repo.spring.io/milestone</url>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
<repository>
<id>spring-snapshots</id>
<name>Spring Snapshots</name>
<url>https://repo.spring.io/snapshot</url>
<releases>
<enabled>false</enabled>
</releases>
</repository>
</repositories>
</project>
Run the curl to see the Spring Ai Ollama Function Calling
curl --location 'localhost:8080/function?question=what%20is%20current%20weather%20in%20London'