diff --git a/ollama/__init__.py b/ollama/__init__.py index 92bba280..8dcb55d2 100644 --- a/ollama/__init__.py +++ b/ollama/__init__.py @@ -52,6 +52,7 @@ create = _client.create delete = _client.delete list = _client.list +exists = _client.exists copy = _client.copy show = _client.show ps = _client.ps diff --git a/ollama/_client.py b/ollama/_client.py index 18cb0fb4..4c25b136 100644 --- a/ollama/_client.py +++ b/ollama/_client.py @@ -629,6 +629,18 @@ def list(self) -> ListResponse: '/api/tags', ) + def exists(self, model: str) -> bool: + """Check if a model is available locally. + + Args: + model: The model name to check (e.g. 'llama3.1:8b'). + + Returns: + True if the model exists locally, False otherwise. + """ + models = self.list().models or [] + return any(m.model == model for m in models) + def delete(self, model: str) -> StatusResponse: r = self._request_raw( 'DELETE', @@ -1270,6 +1282,19 @@ async def list(self) -> ListResponse: '/api/tags', ) + async def exists(self, model: str) -> bool: + """Check if a model is available locally. + + Args: + model: The model name to check (e.g. 'llama3.1:8b'). + + Returns: + True if the model exists locally, False otherwise. + """ + resp = await self.list() + models = resp.models or [] + return any(m.model == model for m in models) + async def delete(self, model: str) -> StatusResponse: r = await self._request_raw( 'DELETE', diff --git a/tests/test_exists.py b/tests/test_exists.py new file mode 100644 index 00000000..32c84c73 --- /dev/null +++ b/tests/test_exists.py @@ -0,0 +1,82 @@ +import json + +import pytest +from pytest_httpserver import HTTPServer + +from ollama._client import AsyncClient, Client + + +pytestmark = pytest.mark.anyio + + +@pytest.fixture +def anyio_backend(): + return 'asyncio' + + +def test_client_exists_true(httpserver: HTTPServer): + """exists() returns True when model is present.""" + httpserver.expect_ordered_request( + '/api/tags', + method='GET', + ).respond_with_json({ + 'models': [ + {'name': 'llama3.1:8b', 'model': 'llama3.1:8b', 'size': 4661224676}, + {'name': 'qwen2.5:latest', 'model': 'qwen2.5:latest', 'size': 4430121000}, + ] + }) + + client = Client(host=f'http://{httpserver.host}:{httpserver.port}') + assert client.exists('llama3.1:8b') is True + + +def test_client_exists_false(httpserver: HTTPServer): + """exists() returns False when model is not present.""" + httpserver.expect_ordered_request( + '/api/tags', + method='GET', + ).respond_with_json({ + 'models': [ + {'name': 'llama3.1:8b', 'model': 'llama3.1:8b', 'size': 4661224676}, + ] + }) + + client = Client(host=f'http://{httpserver.host}:{httpserver.port}') + assert client.exists('gemma2:2b') is False + + +def test_client_exists_empty_list(httpserver: HTTPServer): + """exists() returns False when no models are available.""" + httpserver.expect_ordered_request( + '/api/tags', + method='GET', + ).respond_with_json({'models': []}) + + client = Client(host=f'http://{httpserver.host}:{httpserver.port}') + assert client.exists('llama3.1:8b') is False + + +async def test_async_client_exists_true(httpserver: HTTPServer): + """Async exists() returns True when model is present.""" + httpserver.expect_ordered_request( + '/api/tags', + method='GET', + ).respond_with_json({ + 'models': [ + {'name': 'llama3.1:8b', 'model': 'llama3.1:8b', 'size': 4661224676}, + ] + }) + + client = AsyncClient(host=f'http://{httpserver.host}:{httpserver.port}') + assert await client.exists('llama3.1:8b') is True + + +async def test_async_client_exists_false(httpserver: HTTPServer): + """Async exists() returns False when model is not present.""" + httpserver.expect_ordered_request( + '/api/tags', + method='GET', + ).respond_with_json({'models': []}) + + client = AsyncClient(host=f'http://{httpserver.host}:{httpserver.port}') + assert await client.exists('nonexistent') is False