diff --git a/compose.nvidia.yaml b/compose.nvidia.yaml index eae1ef4ab..6f3805fdb 100644 --- a/compose.nvidia.yaml +++ b/compose.nvidia.yaml @@ -147,7 +147,7 @@ services: capabilities: [gpu] count: "all" - ## Ollama AI Model Runner (optional) + ## Ollama Large-Language Model Runner (optional) ## Run "ollama pull [name]:[version]" to download a vision model ## listed at , for example: ## docker compose exec ollama ollama pull qwen2.5vl:3b diff --git a/compose.yaml b/compose.yaml index c3b1543f7..ad2981ff7 100644 --- a/compose.yaml +++ b/compose.yaml @@ -227,7 +227,7 @@ services: OLLAMA_ENABLED: "true" OLLAMA_HOST: "http://ollama:11434" - ## Ollama AI Model Runner (optional) + ## Ollama Large-Language Model Runner (optional) ## Run "ollama pull [name]:[version]" to download a vision model ## listed at , for example: ## docker compose exec ollama ollama pull qwen2.5vl:3b diff --git a/setup/docker/arm64/compose.yaml b/setup/docker/arm64/compose.yaml index 28f2b755c..e583cb9c4 100644 --- a/setup/docker/arm64/compose.yaml +++ b/setup/docker/arm64/compose.yaml @@ -152,7 +152,7 @@ services: MARIADB_PASSWORD: "insecure" MARIADB_ROOT_PASSWORD: "insecure" - ## Ollama AI Model Runner (optional) + ## Ollama Large-Language Model Runner (optional) ## Run "ollama pull [name]:[version]" to download a vision model ## listed at , for example: ## docker compose exec ollama ollama pull qwen2.5vl:3b diff --git a/setup/docker/compose.yaml b/setup/docker/compose.yaml index bb41ec86e..ca090e846 100644 --- a/setup/docker/compose.yaml +++ b/setup/docker/compose.yaml @@ -157,7 +157,7 @@ services: # MARIADB_REPLICATION_USER: "" # MARIADB_REPLICATION_PASSWORD: "" - ## Ollama AI Model Runner (optional) + ## Ollama Large-Language Model Runner (optional) ## Run "ollama pull [name]:[version]" to download a vision model ## listed at , for example: ## docker compose exec ollama ollama pull qwen2.5vl:3b diff --git a/setup/docker/nvidia/compose.yaml b/setup/docker/nvidia/compose.yaml index 0002fc278..e7b8b263b 100644 --- a/setup/docker/nvidia/compose.yaml +++ b/setup/docker/nvidia/compose.yaml @@ -157,7 +157,7 @@ services: # MARIADB_REPLICATION_USER: "" # MARIADB_REPLICATION_PASSWORD: "" - ## Ollama AI Model Runner (optional) + ## Ollama Large-Language Model Runner (optional) ## Run "ollama pull [name]:[version]" to download a vision model ## listed at , for example: ## docker compose exec ollama ollama pull qwen2.5vl:3b