From e572d2372f171a3f0434a2e03c44389113017e78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 15:45:19 +0100 Subject: [PATCH 01/11] wip: use oai endpoint by default (structured output guide) --- .../guide.de-de.md | 17 ++++++++++------- .../guide.en-asia.md | 17 ++++++++++------- .../guide.en-au.md | 17 ++++++++++------- .../guide.en-ca.md | 17 ++++++++++------- .../guide.en-gb.md | 17 ++++++++++------- .../guide.en-ie.md | 17 ++++++++++------- .../guide.en-sg.md | 17 ++++++++++------- .../guide.en-us.md | 17 ++++++++++------- .../guide.es-es.md | 17 ++++++++++------- .../guide.es-us.md | 17 ++++++++++------- .../guide.fr-ca.md | 17 ++++++++++------- .../guide.fr-fr.md | 17 ++++++++++------- .../guide.it-it.md | 17 ++++++++++------- .../guide.pl-pl.md | 17 ++++++++++------- .../guide.pt-pt.md | 17 ++++++++++------- 15 files changed, 150 insertions(+), 105 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.de-de.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.de-de.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-asia.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-asia.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-au.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-au.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ca.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ca.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-gb.md index cb1f84d378b..25443538138 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-gb.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ie.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-ie.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-sg.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-sg.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-us.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.en-us.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-es.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-es.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-us.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.es-us.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-ca.md index 72029ee5249..1c20cf40c63 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-ca.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-fr.md index 72029ee5249..1c20cf40c63 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.fr-fr.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.it-it.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.it-it.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pl-pl.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pl-pl.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pt-pt.md index a49a160a73c..538c225531f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_05_structured_output/guide.pt-pt.md @@ -114,7 +114,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -151,7 +151,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json'\ >> -H 'content-type: application/json' \ >> -d '{ @@ -160,6 +160,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> { "content": "You are a helpful assistant that help users rank different things. You always answer in JSON format.", "role": "system" }, >> { "content": "What are the top 3 most popular programming languages ?", "role": "user" } >> ], +>> "model": "Meta-Llama-3_3-70B-Instruct", >> "response_format": { >> "type":"json_schema", >> "json_schema": { @@ -258,7 +259,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -266,6 +267,7 @@ The following code samples provide a simple example on how to specify a JSON sch >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_schema', >> json_schema: jsonSchema @@ -327,7 +329,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> # Initialise the client >> api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> openai_client = openai.OpenAI( ->> base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', +>> base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', >> api_key=api_key >> ) >> @@ -375,7 +377,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> Input query: >> >> ```sh ->> curl -X POST "https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions" \ +>> curl -X POST "https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions" \ >> -H 'accept: application/json' \ >> -H 'content-type: application/json' \ >> -d '{ @@ -413,7 +415,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> // Initialise the client >> const apiKey = process.env.AI_ENDPOINT_API_KEY; // Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') >> const options = { ->> url: 'https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions', +>> url: 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions', >> headers: { >> 'Content-Type': 'application/json', >> 'Authorization': `Bearer ${apiKey}` @@ -421,6 +423,7 @@ The following code samples provide a simple example on how to use the legacy JSO >> json: true, >> body: { >> messages: messages, +>> model: 'Meta-Llama-3_3-70B-Instruct', >> response_format: { >> type: 'json_object', >> }, @@ -487,7 +490,7 @@ class LanguageRankings(BaseModel): # Initialise the client api_key = os.environ['AI_ENDPOINT_API_KEY'] # Assuming your API key is available in this environment variable (export AI_ENDPOINT_API_KEY='your_api_key') openai_client = openai.OpenAI( - base_url='https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1', + base_url='https://oai.endpoints.kepler.ai.cloud.ovh.net/v1', api_key=api_key ) From ff644bc3d545483ab11622678e4aaf3cd8ca050b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 15:50:43 +0100 Subject: [PATCH 02/11] wip: use oai endpoint by default (function calling guide) --- .../guide.de-de.md | 102 +++++++++--------- .../guide.en-asia.md | 102 +++++++++--------- .../guide.en-au.md | 102 +++++++++--------- .../guide.en-ca.md | 102 +++++++++--------- .../guide.en-gb.md | 102 +++++++++--------- .../guide.en-ie.md | 102 +++++++++--------- .../guide.en-sg.md | 102 +++++++++--------- .../guide.en-us.md | 102 +++++++++--------- .../guide.es-es.md | 102 +++++++++--------- .../guide.es-us.md | 102 +++++++++--------- .../guide.fr-ca.md | 102 +++++++++--------- .../guide.fr-fr.md | 102 +++++++++--------- .../guide.it-it.md | 102 +++++++++--------- .../guide.pl-pl.md | 102 +++++++++--------- .../guide.pt-pt.md | 102 +++++++++--------- 15 files changed, 765 insertions(+), 765 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.de-de.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.de-de.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-asia.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-asia.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-au.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-au.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ca.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ca.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-gb.md index 544d261dc42..3505e4553d8 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-gb.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ie.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-ie.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-sg.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-sg.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-us.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.en-us.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-es.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-es.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-us.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.es-us.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-ca.md index 217fd3f89bb..71acdf3e37d 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-ca.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-fr.md index 217fd3f89bb..71acdf3e37d 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.fr-fr.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.it-it.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.it-it.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pl-pl.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pl-pl.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pt-pt.md index 429e28589a9..97cf9080100 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_guide_06_function_calling/guide.pt-pt.md @@ -617,58 +617,58 @@ It is possible to use Function Calling in streaming mode, by setting `stream` to Let's see an example with cURL and the LLaMa 3.1 8B model: ```bash -curl -X 'POST' - 'https://llama-3-1-8b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1/chat/completions' - -H 'accept: application/json' - -H 'Content-Type: application/json' - -d '{ -"max_tokens": 100, -"messages": [ - { - "content": "What is the current weather in Paris?", - "role": "user" - } -], -"model": null, -"seed": null, -"stream": true, -"temperature": 0.1, -"tool_choice": "auto", -"tools": [ - { - "function": { - "description": "Get the current weather in a given location", - "name": "get_current_weather", - "parameters": { - "properties": { - "country": { - "description": "The two-letters country code", - "type": "string" - }, - "location": { - "description": "The city", - "type": "string" +curl -X 'POST' \ + 'https://oai.endpoints.kepler.ai.cloud.ovh.net/v1/chat/completions' \ + -H 'accept: application/json' \ + -H 'Content-Type: application/json' \ + -d '{ + "max_tokens": 100, + "messages": [ + { + "content": "What is the current weather in Paris?", + "role": "user" + } + ], + "model": "Llama-3.1-8B-Instruct", + "seed": null, + "stream": true, + "temperature": 0.1, + "tool_choice": "auto", + "tools": [ + { + "function": { + "description": "Get the current weather in a given location", + "name": "get_current_weather", + "parameters": { + "properties": { + "country": { + "description": "The two-letters country code", + "type": "string" + }, + "location": { + "description": "The city", + "type": "string" + }, + "unit": { + "enum": [ + "celsius", + "fahrenheit" + ], + "type": "string" + } + }, + "required": [ + "location", + "country" + ], + "type": "object" + } }, - "unit": { - "enum": [ - "celsius", - "fahrenheit" - ], - "type": "string" - } - }, - "required": [ - "location", - "country" - ], - "type": "object" - } - }, - "type": "function" - } -], -"top_p": 1 -}' + "type": "function" + } + ], + "top_p": 1 + }' ``` You will get tool call deltas in the server-side events chunks, with this format: From bef9641499c3bc2a03ced13ffb6d60a5d17f295b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:26:39 +0100 Subject: [PATCH 03/11] wip: use oai endpoint by default (tuto_01_audio_summarizer) --- .../guide.de-de.md | 29 +++++++------------ .../guide.en-asia.md | 29 +++++++------------ .../guide.en-au.md | 29 +++++++------------ .../guide.en-ca.md | 29 +++++++------------ .../guide.en-gb.md | 29 +++++++------------ .../guide.en-ie.md | 29 +++++++------------ .../guide.en-sg.md | 29 +++++++------------ .../guide.en-us.md | 29 +++++++------------ .../guide.es-es.md | 29 +++++++------------ .../guide.es-us.md | 29 +++++++------------ .../guide.fr-ca.md | 29 +++++++------------ .../guide.fr-fr.md | 29 +++++++------------ .../guide.it-it.md | 29 +++++++------------ .../guide.pl-pl.md | 29 +++++++------------ .../guide.pt-pt.md | 29 +++++++------------ 15 files changed, 165 insertions(+), 270 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.de-de.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.de-de.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-asia.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-asia.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-au.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-au.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ca.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ca.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-gb.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-gb.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ie.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-ie.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-sg.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-sg.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-us.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.en-us.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-es.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-es.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-us.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.es-us.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-ca.md index b1df5f59661..fe796e9b32d 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-ca.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-fr.md index b1df5f59661..fe796e9b32d 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.fr-fr.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.it-it.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.it-it.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pl-pl.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pl-pl.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pt-pt.md index 9351da605b4..158b2f8d765 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_01_audio_summarizer/guide.pt-pt.md @@ -40,8 +40,7 @@ This tutorial will explore how AI APIs can be connected to create an advanced vi In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -85,21 +84,15 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -asr_ai_endpoint_url = os.environ.get("ASR_AI_ENDPOINT") -llm_ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("OVH_AI_ENDPOINTS_URL") ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") ``` -Then define the clients that communicate with your APIs and authenticate your requests: +Then define the client that communicates with the APIs and authenticates your requests: ```python -asr_client = OpenAI( - base_url=asr_ai_endpoint_url, - api_key=ai_endpoint_token -) - -llm_client = OpenAI( - base_url=llm_ai_endpoint_url, +oai_client = OpenAI( + base_url=ai_endpoint_url, api_key=ai_endpoint_token ) ``` @@ -111,7 +104,7 @@ llm_client = OpenAI( First, create the **Automatic Speech Recognition** function in order to transcribe audio files into text: ```python -def asr_transcription(asr_client, audio): +def asr_transcription(oai_client, audio): if audio is None: return " " @@ -125,7 +118,7 @@ def asr_transcription(asr_client, audio): process_audio_to_wav.export(processed_audio, format="wav") with open(processed_audio, "rb") as audio_file: - response = asr_client.audio.transcriptions.create( + response = oai_client.audio.transcriptions.create( model="whisper-large-v3", file=audio_file, response_format="verbose_json", @@ -159,7 +152,7 @@ In this second step, create the `chat_completion` function to use `Mixtral8x7B` - Return the audio summary ```python -def chat_completion(llm_client, new_message): +def chat_completion(oai_client, new_message): if new_message==" ": return "Please, send an input audio to get its summary!" @@ -169,7 +162,7 @@ def chat_completion(llm_client, new_message): # prompt history_openai_format = [{"role": "user", "content": f"Summarize the following text in a few words: {new_message}"}] # return summary - return llm_client.chat.completions.create( + return oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=history_openai_format, temperature=0, @@ -195,8 +188,8 @@ Inside a Gradio Block, you can: - Add a clear button with gr.ClearButton() to reset the page of the web app ```python -asr_transcribe_fn = functools.partial(asr_transcription, asr_client) -chat_completion_fn = functools.partial(chat_completion, llm_client) +asr_transcribe_fn = functools.partial(asr_transcription, oai_client) +chat_completion_fn = functools.partial(chat_completion, oai_client) # gradio with gr.Blocks(theme=gr.themes.Default(primary_hue="blue"), fill_height=True) as demo: From c9f4fa0d3be90cf1d6452b71a69d8defa9714aa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:33:44 +0100 Subject: [PATCH 04/11] wip: use oai endpoint by default (tuto_02_voice_virtual_assistant) --- .../guide.de-de.md | 29 +++++++------------ .../guide.en-asia.md | 29 +++++++------------ .../guide.en-au.md | 29 +++++++------------ .../guide.en-ca.md | 29 +++++++------------ .../guide.en-gb.md | 29 +++++++------------ .../guide.en-ie.md | 29 +++++++------------ .../guide.en-sg.md | 29 +++++++------------ .../guide.en-us.md | 29 +++++++------------ .../guide.es-es.md | 29 +++++++------------ .../guide.es-us.md | 29 +++++++------------ .../guide.fr-ca.md | 29 +++++++------------ .../guide.fr-fr.md | 29 +++++++------------ .../guide.it-it.md | 29 +++++++------------ .../guide.pl-pl.md | 29 +++++++------------ .../guide.pt-pt.md | 29 +++++++------------ 15 files changed, 165 insertions(+), 270 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.de-de.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.de-de.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-asia.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-asia.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-au.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-au.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ca.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ca.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-gb.md index 91e268d8fec..9d5641094ec 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-gb.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ie.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-ie.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-sg.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-sg.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-us.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.en-us.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-es.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-es.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-us.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.es-us.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-ca.md index 439e53f413b..562ecf1b38d 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-ca.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-fr.md index 439e53f413b..562ecf1b38d 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.fr-fr.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.it-it.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.it-it.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pl-pl.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pl-pl.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pt-pt.md index 3612f61c772..85b42fc5cab 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_02_voice_virtual_assistant/guide.pt-pt.md @@ -44,9 +44,8 @@ All of this is done by connecting **AI Endpoints** like puzzle pieces—allowing In order to use AI Endpoints APIs easily, create a `.env` file to store environment variables: ```bash -ASR_AI_ENDPOINT=https://whisper-large-v3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 TTS_GRPC_ENDPOINT=nvr-tts-en-us.endpoints-grpc.kepler.ai.cloud.ovh.net:443 -LLM_AI_ENDPOINT=https://mixtral-8x7b-instruct-v01.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -92,17 +91,16 @@ After these lines, load and access the environnement variables of your `.env` fi # access the environment variables from the .env file load_dotenv() -ASR_AI_ENDPOINT = os.environ.get('ASR_AI_ENDPOINT') TTS_GRPC_ENDPOINT = os.environ.get('TTS_GRPC_ENDPOINT') -LLM_AI_ENDPOINT = os.environ.get('LLM_AI_ENDPOINT') +OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') ``` - Next, define the clients that will be used to interact with the models: +Next, define the clients that will be used to interact with the models: ```python -llm_client = OpenAI( - base_url=LLM_AI_ENDPOINT, +oai_client = OpenAI( + base_url=OVH_AI_ENDPOINTS_URL, api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN ) @@ -113,11 +111,6 @@ tts_client = riva.client.SpeechSynthesisService( metadata_args=[["authorization", f"bearer {OVH_AI_ENDPOINTS_ACCESS_TOKEN}"]] ) ) - -asr_client = OpenAI( - base_url=ASR_AI_ENDPOINT, - api_key=OVH_AI_ENDPOINTS_ACCESS_TOKEN -) ``` 💡 You are now ready to start coding your web app! @@ -127,8 +120,8 @@ asr_client = OpenAI( First, create the **Automatic Speech Recognition (ASR)** function in order to transcribe microphone input into text: ```python -def asr_transcription(question, asr_client): - return asr_client.audio.transcriptions.create( +def asr_transcription(question, oai_client): + return oai_client.audio.transcriptions.create( model="whisper-large-v3", file=question ).text @@ -147,8 +140,8 @@ def asr_transcription(question, asr_client): Now, create a function that calls the LLM client to provide responses to questions: ```python -def llm_answer(input, llm_client): - response = llm_client.chat.completions.create( +def llm_answer(input, oai_client): + response = oai_client.chat.completions.create( model="Mixtral-8x7B-Instruct-v0.1", messages=input, temperature=0, @@ -228,12 +221,12 @@ with st.container(): ) if recording: - user_question = asr_transcription(recording['bytes'], asr_client) + user_question = asr_transcription(recording['bytes'], oai_client) if prompt := user_question: st.session_state.messages.append({"role": "user", "content": prompt, "avatar":"👤"}) messages.chat_message("user", avatar="👤").write(prompt) - msg = llm_answer(st.session_state.messages, llm_client) + msg = llm_answer(st.session_state.messages, oai_client) st.session_state.messages.append({"role": "assistant", "content": msg, "avatar": "🤖"}) messages.chat_message("system", avatar="🤖").write(msg) From 3325212a14f30d5a0ea5767aa9fdd1b3420e3689 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:36:29 +0100 Subject: [PATCH 05/11] wip: use oai endpoint by default (tuto_03) --- .../endpoints_tuto_03_code_assistant_continue/guide.de-de.md | 4 ++-- .../guide.en-asia.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.en-au.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.en-ca.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.en-gb.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.en-ie.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.en-sg.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.en-us.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.es-es.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.es-us.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.it-it.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md | 4 ++-- .../endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md | 4 ++-- 15 files changed, 30 insertions(+), 30 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.de-de.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.de-de.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-asia.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-asia.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-au.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-au.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ca.md index 1b78d52a568..ab4ea702fc6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ca.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-gb.md index 1b78d52a568..ab4ea702fc6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-gb.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ie.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-ie.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-sg.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-sg.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-us.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.en-us.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-es.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-es.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-us.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.es-us.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md index 9331eae8541..2cfd234630e 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-ca.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md index 9331eae8541..2cfd234630e 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.fr-fr.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.it-it.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.it-it.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pl-pl.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md index 633711d1480..f57611f4c2c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_03_code_assistant_continue/guide.pt-pt.md @@ -51,13 +51,13 @@ models: - name: Meta-Llama-3_3-70B-Instruct provider: openai model: Meta-Llama-3_3-70B-Instruct - apiBase: https://llama-3-3-70b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize] - name: Qwen3-Coder-30B-A3B-Instruct provider: openai model: Qwen3-Coder-30B-A3B-Instruct - apiBase: https://qwen-3-coder-30b-a3b-instruct.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 + apiBase: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 apiKey: # replace with your API key roles: [chat, edit, apply, summarize, autocomplete] ``` From a9e0a6cf0d08d8d43e706f171b6726f8c7c40a20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:41:59 +0100 Subject: [PATCH 06/11] wip: use oai endpoint by default (tuto_05) --- .../guide.de-de.md | 17 ++++++----------- .../guide.en-asia.md | 17 ++++++----------- .../guide.en-au.md | 17 ++++++----------- .../guide.en-ca.md | 17 ++++++----------- .../guide.en-gb.md | 17 ++++++----------- .../guide.en-ie.md | 17 ++++++----------- .../guide.en-sg.md | 17 ++++++----------- .../guide.en-us.md | 17 ++++++----------- .../guide.es-es.md | 17 ++++++----------- .../guide.es-us.md | 17 ++++++----------- .../guide.fr-ca.md | 17 ++++++----------- .../guide.fr-fr.md | 17 ++++++----------- .../guide.it-it.md | 17 ++++++----------- .../guide.pl-pl.md | 17 ++++++----------- .../guide.pt-pt.md | 17 ++++++----------- 15 files changed, 90 insertions(+), 165 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.de-de.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.de-de.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-asia.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-asia.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-au.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-au.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ca.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ca.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-gb.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-gb.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ie.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-ie.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-sg.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-sg.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-us.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.en-us.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-es.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-es.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-us.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.es-us.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-ca.md index c666ab460c6..0c8cac0de10 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-ca.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-fr.md index c666ab460c6..0c8cac0de10 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.fr-fr.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.it-it.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.it-it.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pl-pl.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pl-pl.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pt-pt.md index 20e9ddd9abc..ee41492df95 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_05_chatbot_langchain_python/guide.pt-pt.md @@ -45,17 +45,12 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - Then, create a `requirements.txt` file with the following libraries: ```bash @@ -87,7 +82,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. # Question parameter is the user's question. @@ -96,7 +91,7 @@ def chat_completion(question: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500) prompt = ChatPromptTemplate.from_messages([ @@ -158,7 +153,7 @@ load_dotenv() ## Set the OVHcloud AI Endpoints token to use models _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') # Function in charge to call the LLM model. @@ -168,7 +163,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) From b8333bf23a767d30187ba1cdb6d920612d7f76ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:45:45 +0100 Subject: [PATCH 07/11] wip: use oai endpoint by default (tuto_06) --- .../guide.de-de.md | 14 +++++--------- .../guide.en-asia.md | 14 +++++--------- .../guide.en-au.md | 14 +++++--------- .../guide.en-ca.md | 14 +++++--------- .../guide.en-gb.md | 14 +++++--------- .../guide.en-ie.md | 14 +++++--------- .../guide.en-sg.md | 14 +++++--------- .../guide.en-us.md | 14 +++++--------- .../guide.es-es.md | 14 +++++--------- .../guide.es-us.md | 14 +++++--------- .../guide.fr-ca.md | 14 +++++--------- .../guide.fr-fr.md | 14 +++++--------- .../guide.it-it.md | 14 +++++--------- .../guide.pl-pl.md | 14 +++++--------- .../guide.pt-pt.md | 14 +++++--------- 15 files changed, 75 insertions(+), 135 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.de-de.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.de-de.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-asia.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-asia.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-au.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-au.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ca.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ca.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-gb.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-gb.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ie.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-ie.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-sg.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-sg.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-us.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.en-us.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-es.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-es.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-us.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.es-us.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-ca.md index 38a10f4a819..02664537326 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-ca.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-fr.md index 38a10f4a819..02664537326 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.fr-fr.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.it-it.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.it-it.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pl-pl.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pl-pl.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pt-pt.md index e7c604c17ab..77611c20582 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_06_chatbot_langchain_javascript/guide.pt-pt.md @@ -37,17 +37,12 @@ You will need to declare the following environment variables: ```bash _OVH_AI_ENDPOINTS_ACCESS_TOKEN= -_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -_OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +_OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 # (or any other model you want to use) +_OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ``` **Make sure to replace the token value (`OVH_AI_ENDPOINTS_ACCESS_TOKEN`) by yours.** If you do not have one yet, follow the instructions in the [AI Endpoints - Getting Started](/pages/public_cloud/ai_machine_learning/endpoints_guide_01_getting_started) guide. -You will also have to set two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - -- `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 - ### Project setup The first step is to get the necessary dependencies. To do this, create a `package.json` with the following content: @@ -103,7 +98,8 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, maxTokens: 1500, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, + maxTokens: 1500, streaming: false, verbose: false, }); @@ -173,7 +169,7 @@ async function chatCompletion(question) { modelName: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, model: process.env.OVH_AI_ENDPOINTS_MODEL_NAME, apiKey: process.env.OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint: process.env.OVH_AI_ENDPOINTS_MODEL_URL, + endpoint: process.env.OVH_AI_ENDPOINTS_URL, maxTokens: 1500, streaming: true, verbose: false, From 70e90edf03737944a415960dfe3be70e5a3bb245 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:47:59 +0100 Subject: [PATCH 08/11] wip: use oai endpoint by default (tuto_07) --- .../guide.de-de.md | 4 ++-- .../guide.en-asia.md | 4 ++-- .../guide.en-au.md | 4 ++-- .../guide.en-ca.md | 4 ++-- .../guide.en-gb.md | 4 ++-- .../guide.en-ie.md | 4 ++-- .../guide.en-sg.md | 4 ++-- .../guide.en-us.md | 4 ++-- .../guide.es-es.md | 4 ++-- .../guide.es-us.md | 4 ++-- .../guide.fr-ca.md | 4 ++-- .../guide.fr-fr.md | 4 ++-- .../guide.it-it.md | 4 ++-- .../guide.pl-pl.md | 4 ++-- .../guide.pt-pt.md | 4 ++-- 15 files changed, 30 insertions(+), 30 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.de-de.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.de-de.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-asia.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-asia.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-au.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-au.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ca.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ca.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-gb.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-gb.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ie.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-ie.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-sg.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-sg.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-us.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.en-us.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-es.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-es.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-us.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.es-us.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-ca.md index a6a66754469..c56ac5326d6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-ca.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-fr.md index a6a66754469..c56ac5326d6 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.fr-fr.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.it-it.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.it-it.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pl-pl.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pl-pl.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pt-pt.md index e133eeec674..be179f2f74f 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_07_chatbot_langchain4j_quarkus/guide.pt-pt.md @@ -135,7 +135,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -162,7 +162,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot From 2f64490963b419fcb984ac6263d4f727330fdaa6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:48:56 +0100 Subject: [PATCH 09/11] wip: use oai endpoint by default (tuto_08) --- .../guide.de-de.md | 4 ++-- .../guide.en-asia.md | 4 ++-- .../guide.en-au.md | 4 ++-- .../guide.en-ca.md | 4 ++-- .../guide.en-gb.md | 4 ++-- .../guide.en-ie.md | 4 ++-- .../guide.en-sg.md | 4 ++-- .../guide.en-us.md | 4 ++-- .../guide.es-es.md | 4 ++-- .../guide.es-us.md | 4 ++-- .../guide.fr-ca.md | 4 ++-- .../guide.fr-fr.md | 4 ++-- .../guide.it-it.md | 4 ++-- .../guide.pl-pl.md | 4 ++-- .../guide.pt-pt.md | 4 ++-- 15 files changed, 30 insertions(+), 30 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.de-de.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.de-de.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-asia.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-asia.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-au.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-au.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ca.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ca.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-gb.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-gb.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ie.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-ie.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-sg.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-sg.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-us.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.en-us.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-es.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-es.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-us.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.es-us.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-ca.md index 9157c20ef7a..294b84bbd87 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-ca.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-fr.md index 9157c20ef7a..294b84bbd87 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.fr-fr.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.it-it.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.it-it.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pl-pl.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pl-pl.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pt-pt.md index 3267ea6f35e..5fbe5f1f771 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_08_streaming_chatbot_langchain4j_quarkus/guide.pt-pt.md @@ -89,7 +89,7 @@ Adapt and add the following configuration to your application.properties, to ena ```console ### Global configurations # Base URL for Mistral AI endpoints -quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_MODEL_URL} +quarkus.langchain4j.mistralai.base-url=${OVH_AI_ENDPOINTS_URL} # Activate or not the log during the request quarkus.langchain4j.mistralai.log-requests=true # Activate or not the log during the response @@ -116,7 +116,7 @@ quarkus.langchain4j.mistralai.chat-model.max-tokens=1024 You will also have to replace two other environments variables, related to the model you want to use. You can find these model-specific values in the `documentation` tab of each model. For example, if you want to add the `Mistral-7B-Instruct-v0.3` model, the expected environment variables will be: - `OVH_AI_ENDPOINTS_MODEL_NAME`: Mistral-7B-Instruct-v0.3 -- `OVH_AI_ENDPOINTS_MODEL_URL`: https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +- `OVH_AI_ENDPOINTS_URL`: https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 ### Build a REST API to interact with the chatbot From 787cecf806cd02d72744dac5add7e6d122ceefc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:50:28 +0100 Subject: [PATCH 10/11] wip: use oai endpoint by default (tuto_09) --- .../guide.de-de.md | 8 ++++---- .../guide.en-asia.md | 8 ++++---- .../guide.en-au.md | 8 ++++---- .../guide.en-ca.md | 8 ++++---- .../guide.en-gb.md | 8 ++++---- .../guide.en-ie.md | 8 ++++---- .../guide.en-sg.md | 8 ++++---- .../guide.en-us.md | 8 ++++---- .../guide.es-es.md | 8 ++++---- .../guide.es-us.md | 8 ++++---- .../guide.fr-ca.md | 8 ++++---- .../guide.fr-fr.md | 8 ++++---- .../guide.it-it.md | 8 ++++---- .../guide.pl-pl.md | 8 ++++---- .../guide.pt-pt.md | 8 ++++---- 15 files changed, 60 insertions(+), 60 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.de-de.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.de-de.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-asia.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-asia.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-au.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-au.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ca.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ca.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-gb.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-gb.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ie.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-ie.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-sg.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-sg.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-us.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.en-us.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-es.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-es.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-us.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.es-us.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-ca.md index 3aa9a227053..fd501e165a0 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-ca.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-fr.md index 3aa9a227053..fd501e165a0 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.fr-fr.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.it-it.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.it-it.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pl-pl.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pl-pl.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pt-pt.md index a7f3fdf47e9..9d70408ca70 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_09_chatbot_memory_langchain/guide.pt-pt.md @@ -54,7 +54,7 @@ LangChain provides several memory modules that can be used within a **Conversati In order to use **AI Endpoints APIs** easily, create a `.env` file to store environment variables: ```bash -LLM_AI_ENDPOINT=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +LLM_AI_ENDPOINT=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -97,7 +97,7 @@ After these lines, load and access the environnement variables of your `.env` fi load_dotenv() ai_endpoint_token = os.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN") -ai_endpoint_mistral7b = os.getenv("LLM_AI_ENDPOINT") +ai_endpoint_url = os.getenv("LLM_AI_ENDPOINT") ``` 💡 You are now ready to test your LLM without conversational memory! @@ -111,7 +111,7 @@ Test the model in a basic way and see what happens with the context: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) @@ -179,7 +179,7 @@ which gives the following code: llm = ChatOpenAI( model_name="Mistral-7B-Instruct-v0.3", openai_api_key=ai_endpoint_token, - openai_api_base=ai_endpoint_mistral7b, + openai_api_base=ai_endpoint_url, max_tokens=512, temperature=0.0 ) From da3723b6b7b3b31b741f7136356ffcd0b033dbd7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?R=C3=A9mi=20Sahl?= Date: Mon, 17 Nov 2025 16:53:30 +0100 Subject: [PATCH 11/11] wip: use oai endpoint by default (tuto_10,11,12) --- .../guide.de-de.md | 2 +- .../guide.en-asia.md | 2 +- .../guide.en-au.md | 2 +- .../guide.en-ca.md | 2 +- .../guide.en-gb.md | 2 +- .../guide.en-ie.md | 2 +- .../guide.en-sg.md | 2 +- .../guide.en-us.md | 2 +- .../guide.es-es.md | 2 +- .../guide.es-us.md | 2 +- .../guide.fr-ca.md | 2 +- .../guide.fr-fr.md | 2 +- .../guide.it-it.md | 2 +- .../guide.pl-pl.md | 2 +- .../guide.pt-pt.md | 2 +- .../guide.de-de.md | 6 +++--- .../guide.en-asia.md | 6 +++--- .../guide.en-au.md | 6 +++--- .../guide.en-ca.md | 6 +++--- .../guide.en-gb.md | 6 +++--- .../guide.en-ie.md | 6 +++--- .../guide.en-sg.md | 6 +++--- .../guide.en-us.md | 6 +++--- .../guide.es-es.md | 6 +++--- .../guide.es-us.md | 6 +++--- .../guide.fr-ca.md | 6 +++--- .../guide.fr-fr.md | 6 +++--- .../guide.it-it.md | 6 +++--- .../guide.pl-pl.md | 6 +++--- .../guide.pt-pt.md | 6 +++--- .../guide.de-de.md | 10 +++++----- .../guide.en-asia.md | 10 +++++----- .../guide.en-au.md | 10 +++++----- .../guide.en-ca.md | 10 +++++----- .../guide.en-gb.md | 10 +++++----- .../guide.en-ie.md | 10 +++++----- .../guide.en-sg.md | 10 +++++----- .../guide.en-us.md | 10 +++++----- .../guide.es-es.md | 10 +++++----- .../guide.es-us.md | 10 +++++----- .../guide.fr-ca.md | 10 +++++----- .../guide.fr-fr.md | 10 +++++----- .../guide.it-it.md | 10 +++++----- .../guide.pl-pl.md | 10 +++++----- .../guide.pt-pt.md | 10 +++++----- 45 files changed, 135 insertions(+), 135 deletions(-) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.de-de.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.de-de.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-asia.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-asia.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-au.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-au.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ca.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ca.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-gb.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-gb.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ie.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-ie.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-sg.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-sg.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-us.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.en-us.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-es.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-es.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-us.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.es-us.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-ca.md index e1aee059416..38e1d0c5750 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-ca.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-fr.md index e1aee059416..38e1d0c5750 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.fr-fr.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.it-it.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.it-it.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pl-pl.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pl-pl.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pt-pt.md index 5e82baac280..297ec42b511 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_10_memory_chatbot_langchain4j/guide.pt-pt.md @@ -133,7 +133,7 @@ Make sure your environment variables are set: ```bash export OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -export OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +export OVH_AI_ENDPOINTS_MODEL_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 export OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.de-de.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.de-de.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-asia.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-asia.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-au.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-au.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ca.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ca.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-gb.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-gb.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ie.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-ie.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-sg.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-sg.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-us.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.en-us.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-es.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-es.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-us.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.es-us.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-ca.md index 0ea2b4879d2..1dac2e9567b 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-ca.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-fr.md index 0ea2b4879d2..1dac2e9567b 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.fr-fr.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.it-it.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.it-it.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pl-pl.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pl-pl.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pt-pt.md index 797664ced93..5ed1739c34c 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_11_rag_chatbot_langchain/guide.pt-pt.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME=bge-m3 OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -96,7 +96,7 @@ load_dotenv() ## Retrieve the OVHcloud AI Endpoints configurations _OVH_AI_ENDPOINTS_ACCESS_TOKEN = os.environ.get('OVH_AI_ENDPOINTS_ACCESS_TOKEN') _OVH_AI_ENDPOINTS_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_MODEL_NAME') -_OVH_AI_ENDPOINTS_MODEL_URL = os.environ.get('OVH_AI_ENDPOINTS_MODEL_URL') +_OVH_AI_ENDPOINTS_URL = os.environ.get('OVH_AI_ENDPOINTS_URL') _OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME = os.environ.get('OVH_AI_ENDPOINTS_EMBEDDING_MODEL_NAME') ``` @@ -110,7 +110,7 @@ def chat_completion(new_message: str): # no need to use a token model = ChatMistralAI(model=_OVH_AI_ENDPOINTS_MODEL_NAME, api_key=_OVH_AI_ENDPOINTS_ACCESS_TOKEN, - endpoint=_OVH_AI_ENDPOINTS_MODEL_URL, + endpoint=_OVH_AI_ENDPOINTS_URL, max_tokens=1500, streaming=True) diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.de-de.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.de-de.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.de-de.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.de-de.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-asia.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-asia.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-asia.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-asia.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-au.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-au.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-au.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-au.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ca.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ca.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-gb.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-gb.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-gb.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-gb.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ie.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ie.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ie.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-ie.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-sg.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-sg.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-sg.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-sg.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-us.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.en-us.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-es.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-es.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-es.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-es.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-us.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-us.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-us.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.es-us.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-ca.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-ca.md index 7afd33f1297..9d7f50b5b78 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-ca.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-ca.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-fr.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-fr.md index 7afd33f1297..9d7f50b5b78 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-fr.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.fr-fr.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.it-it.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.it-it.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.it-it.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.it-it.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pl-pl.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pl-pl.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pl-pl.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pl-pl.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); diff --git a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pt-pt.md b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pt-pt.md index 30474a76213..3f0f2b5ac46 100644 --- a/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pt-pt.md +++ b/pages/public_cloud/ai_machine_learning/endpoints_tuto_12_rag_chatbot_langchain4j/guide.pt-pt.md @@ -36,7 +36,7 @@ In order to use AI Endpoints APIs easily, create a `.env` file to store environm ```bash OVH_AI_ENDPOINTS_MODEL_NAME=Mistral-7B-Instruct-v0.3 -OVH_AI_ENDPOINTS_MODEL_URL=https://mistral-7b-instruct-v0-3.endpoints.kepler.ai.cloud.ovh.net/api/openai_compat/v1 +OVH_AI_ENDPOINTS_URL=https://oai.endpoints.kepler.ai.cloud.ovh.net/v1 OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL=https://bge-m3.endpoints.kepler.ai.cloud.ovh.net/api/text2vec OVH_AI_ENDPOINTS_ACCESS_TOKEN= ``` @@ -137,7 +137,7 @@ public class RAGStreamingChatbot { private static final Logger _LOG = LoggerFactory.getLogger(RAGStreamingChatbot.class); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -148,7 +148,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build(); @@ -314,7 +314,7 @@ public class RAGStreamingChatbot { private static final String DATABASE_PASSWORD = System.getenv("DATABASE_PASSWORD"); private static final String OVH_AI_ENDPOINTS_ACCESS_TOKEN = System.getenv("OVH_AI_ENDPOINTS_ACCESS_TOKEN"); private static final String OVH_AI_ENDPOINTS_MODEL_NAME = System.getenv("OVH_AI_ENDPOINTS_MODEL_NAME"); - private static final String OVH_AI_ENDPOINTS_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_MODEL_URL"); + private static final String OVH_AI_ENDPOINTS_URL = System.getenv("OVH_AI_ENDPOINTS_URL"); private static final String OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL = System.getenv("OVH_AI_ENDPOINTS_EMBEDDING_MODEL_URL"); interface Assistant { @@ -363,7 +363,7 @@ public class RAGStreamingChatbot { MistralAiStreamingChatModel streamingChatModel = MistralAiStreamingChatModel.builder() .apiKey(OVH_AI_ENDPOINTS_ACCESS_TOKEN) .modelName(OVH_AI_ENDPOINTS_MODEL_NAME) - .baseUrl(OVH_AI_ENDPOINTS_MODEL_URL) + .baseUrl(OVH_AI_ENDPOINTS_URL) .maxTokens(512) .build();