From d071505358e9ebb3a2393618efc113942c0836e6 Mon Sep 17 00:00:00 2001 From: denazi Date: Tue, 27 Jan 2026 10:32:57 +0200 Subject: [PATCH] Documentation #26: Addition of MCP BE Service docs in deployment guides --- .../deployment/docker_compose.md | 31 +++++++++++++++++ doc/getting_started/deployment/kubernetes.md | 33 +++++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/doc/getting_started/deployment/docker_compose.md b/doc/getting_started/deployment/docker_compose.md index c4f609ef..e3fe4581 100644 --- a/doc/getting_started/deployment/docker_compose.md +++ b/doc/getting_started/deployment/docker_compose.md @@ -263,6 +263,37 @@ SPRING_APPLICATION_JSON: '{ }' ``` +### 8. Configure oslmcpbackend (MCP Backend Service) + +Change the respective fields: + +- To configure the Ollama LLM server URL and model. +- To customize the AI assistant's system prompt and temperature. +- If you made changes to keycloak credentials. +- If you want to change logging level (TRACE / DEBUG / INFO / WARN / ERROR). + +> ***If you are using a non-local domain, replace everywhere the http://keycloak:8080 with the respective {{protocol://domain.name}}.*** + +In folder `org.etsi.osl.main/compose/` edit the file `docker-compose.yaml` + +```yaml + # AI Configuration + SPRING_AI_OLLAMA_BASE_URL: http://ollama:11434 + SPRING_AI_OLLAMA_CHAT_MODEL: gpt-oss:20b + SPRING_AI_OLLAMA_CHAT_TEMPERATURE: 0.5 + SPRING_AI_CHAT_SYSTEM_PROMPT: "You are an OpenSlice AI Assistant." + SPRING_AI_CHAT_MAX_MESSAGES: 100 + + # OAuth2/Keycloak Configuration + SPRING_SECURITY_OAUTH2_RESOURCESERVER_JWT_ISSUER_URI: http://keycloak:8080/auth/realms/openslice + SPRING_SECURITY_OAUTH2_CLIENT_PROVIDER_KEYCLOAK_ISSUER_URI: http://keycloak:8080/auth/realms/openslice + SPRING_SECURITY_OAUTH2_CLIENT_REGISTRATION_KEYCLOAK_CLIENT_ID: osapiWebClientId + + # Logging Configuration + LOGGING_LEVEL_ROOT: INFO + LOGGING_LEVEL_OSL: INFO + LOGGING_LEVEL_SPRING_AI: INFO +``` ## Configure nginx diff --git a/doc/getting_started/deployment/kubernetes.md b/doc/getting_started/deployment/kubernetes.md index d9ee88a3..596a886b 100644 --- a/doc/getting_started/deployment/kubernetes.md +++ b/doc/getting_started/deployment/kubernetes.md @@ -255,6 +255,39 @@ OpenSlice also offers management support of *multiple Kubernetes Clusters* simul For this, you will have to replicate the steps in [Standalone CRIDGE deployment](#standalone-cridge-deployment) for every Cluster. Each CRIDGE instance will be in charged with the management of one Kubernetes Cluster. +### MCP Backend Service + +The MCP Backend Service provides AI-powered assistance using Ollama and connects to the OpenSlice MCP server. + +To configure the MCP Backend Service, update the following fields in the `values.yaml` file: + +```yaml +mcpbackend: + enabled: true + logLevelRoot: INFO + logLevelOSL: INFO + spring: + logLevel: INFO + ai: + ollama: + model: "gpt-oss:20b" # Change the used model here + temperature: 0.5 + apiUrl: "http://ollama:11434" # Change the Ollama API URL here + chat: + systemPrompt: "You are an OpenSlice AI Assistant." # Customize your initial Assistant prompt + maxMessages: 100 # Maximum number of messages to keep in context +``` + +**Key Configuration Fields:** + +- `mcpbackend.enabled`: Set to `true` to deploy the MCP Backend Service, `false` to disable it. +- `mcpbackend.spring.ai.ollama.apiUrl`: URL of your Ollama server. Update this if using an external Ollama instance. +- `mcpbackend.spring.ai.ollama.model`: The AI model to use (e.g., gpt-oss:20b, llama2, mistral). +- `mcpbackend.spring.ai.chat.systemPrompt`: Customize your initial Assistant prompt. +- `mcpbackend.spring.ai.ollama.temperature`: Controls randomness (0.0-1.0). Lower values are more deterministic. +- `mcpbackend.spring.ai.chat.maxMessages`: Maximum number of messages to keep in the conversation context. + +> **Note:** Ensure the Ollama server is running and the specified model is available before starting the service. ### External Services (optional) -- GitLab