All URIs are relative to https://dashboard.quantcdn.io
| Method | HTTP request | Description |
|---|---|---|
| ChatInference | Post /api/v3/organizations/{organisation}/ai/chat | Chat inference via API Gateway (buffered responses) with multimodal support |
| ChatInferenceStream | Post /api/v3/organizations/{organisation}/ai/chat/stream | Chat inference via streaming endpoint (true HTTP streaming) with multimodal support |
| Embeddings | Post /api/v3/organizations/{organisation}/ai/embeddings | Generate text embeddings for semantic search and RAG applications |
| GetDurableExecutionStatus | Get /api/v3/organizations/{organisation}/ai/chat/executions/{identifier} | Get Durable Execution Status |
| ImageGeneration | Post /api/v3/organizations/{organisation}/ai/image-generation | Generate images with Amazon Nova Canvas |
| SubmitToolCallback | Post /api/v3/organizations/{organisation}/ai/chat/callback | Submit Client Tool Results (Callback) |
ChatInference200Response ChatInference(ctx, organisation).ChatInferenceRequest(chatInferenceRequest).Execute()
Chat inference via API Gateway (buffered responses) with multimodal support
package main
import (
"context"
"fmt"
"os"
openapiclient "github.com/quantcdn/quant-admin-go"
)
func main() {
organisation := "organisation_example" // string | The organisation ID
chatInferenceRequest := *openapiclient.NewChatInferenceRequest([]openapiclient.ChatInferenceRequestMessagesInner{*openapiclient.NewChatInferenceRequestMessagesInner("Role_example", openapiclient.chatInference_request_messages_inner_content{ArrayOfChatInferenceRequestMessagesInnerContentOneOfInner: new([]ChatInferenceRequestMessagesInnerContentOneOfInner)})}, "amazon.nova-lite-v1:0") // ChatInferenceRequest | Chat request with optional multimodal content blocks
configuration := openapiclient.NewConfiguration()
apiClient := openapiclient.NewAPIClient(configuration)
resp, r, err := apiClient.AIInferenceAPI.ChatInference(context.Background(), organisation).ChatInferenceRequest(chatInferenceRequest).Execute()
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `AIInferenceAPI.ChatInference``: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
// response from `ChatInference`: ChatInference200Response
fmt.Fprintf(os.Stdout, "Response from `AIInferenceAPI.ChatInference`: %v\n", resp)
}| Name | Type | Description | Notes |
|---|---|---|---|
| ctx | context.Context | context for authentication, logging, cancellation, deadlines, tracing, etc. | |
| organisation | string | The organisation ID |
Other parameters are passed through a pointer to a apiChatInferenceRequest struct via the builder pattern
| Name | Type | Description | Notes |
|---|
chatInferenceRequest | ChatInferenceRequest | Chat request with optional multimodal content blocks |
- Content-Type: application/json
- Accept: application/json
[Back to top] [Back to API list] [Back to Model list] [Back to README]
string ChatInferenceStream(ctx, organisation).ChatInferenceStreamRequest(chatInferenceStreamRequest).Execute()
Chat inference via streaming endpoint (true HTTP streaming) with multimodal support
package main
import (
"context"
"fmt"
"os"
openapiclient "github.com/quantcdn/quant-admin-go"
)
func main() {
organisation := "organisation_example" // string | The organisation ID
chatInferenceStreamRequest := *openapiclient.NewChatInferenceStreamRequest([]openapiclient.ChatInferenceStreamRequestMessagesInner{*openapiclient.NewChatInferenceStreamRequestMessagesInner("Role_example", openapiclient.chatInferenceStream_request_messages_inner_content{ArrayOfMapmapOfStringAny: new([]map[string]interface{})})}, "amazon.nova-lite-v1:0") // ChatInferenceStreamRequest | Chat request with optional multimodal content blocks
configuration := openapiclient.NewConfiguration()
apiClient := openapiclient.NewAPIClient(configuration)
resp, r, err := apiClient.AIInferenceAPI.ChatInferenceStream(context.Background(), organisation).ChatInferenceStreamRequest(chatInferenceStreamRequest).Execute()
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `AIInferenceAPI.ChatInferenceStream``: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
// response from `ChatInferenceStream`: string
fmt.Fprintf(os.Stdout, "Response from `AIInferenceAPI.ChatInferenceStream`: %v\n", resp)
}| Name | Type | Description | Notes |
|---|---|---|---|
| ctx | context.Context | context for authentication, logging, cancellation, deadlines, tracing, etc. | |
| organisation | string | The organisation ID |
Other parameters are passed through a pointer to a apiChatInferenceStreamRequest struct via the builder pattern
| Name | Type | Description | Notes |
|---|
chatInferenceStreamRequest | ChatInferenceStreamRequest | Chat request with optional multimodal content blocks |
string
- Content-Type: application/json
- Accept: text/event-stream, application/json
[Back to top] [Back to API list] [Back to Model list] [Back to README]
Embeddings200Response Embeddings(ctx, organisation).EmbeddingsRequest(embeddingsRequest).Execute()
Generate text embeddings for semantic search and RAG applications
package main
import (
"context"
"fmt"
"os"
openapiclient "github.com/quantcdn/quant-admin-go"
)
func main() {
organisation := "organisation_example" // string | The organisation ID
embeddingsRequest := *openapiclient.NewEmbeddingsRequest(openapiclient.embeddings_request_input{ArrayOfString: new([]string)}) // EmbeddingsRequest | Embedding request with single or multiple texts
configuration := openapiclient.NewConfiguration()
apiClient := openapiclient.NewAPIClient(configuration)
resp, r, err := apiClient.AIInferenceAPI.Embeddings(context.Background(), organisation).EmbeddingsRequest(embeddingsRequest).Execute()
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `AIInferenceAPI.Embeddings``: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
// response from `Embeddings`: Embeddings200Response
fmt.Fprintf(os.Stdout, "Response from `AIInferenceAPI.Embeddings`: %v\n", resp)
}| Name | Type | Description | Notes |
|---|---|---|---|
| ctx | context.Context | context for authentication, logging, cancellation, deadlines, tracing, etc. | |
| organisation | string | The organisation ID |
Other parameters are passed through a pointer to a apiEmbeddingsRequest struct via the builder pattern
| Name | Type | Description | Notes |
|---|
embeddingsRequest | EmbeddingsRequest | Embedding request with single or multiple texts |
- Content-Type: application/json
- Accept: application/json
[Back to top] [Back to API list] [Back to Model list] [Back to README]
GetDurableExecutionStatus200Response GetDurableExecutionStatus(ctx, organisation, identifier).Execute()
Get Durable Execution Status
package main
import (
"context"
"fmt"
"os"
openapiclient "github.com/quantcdn/quant-admin-go"
)
func main() {
organisation := "organisation_example" // string | The organisation ID
identifier := "XkdVWiEfSwMEPrw=" // string | Either the requestId from async response, or full executionArn (URL-encoded)
configuration := openapiclient.NewConfiguration()
apiClient := openapiclient.NewAPIClient(configuration)
resp, r, err := apiClient.AIInferenceAPI.GetDurableExecutionStatus(context.Background(), organisation, identifier).Execute()
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `AIInferenceAPI.GetDurableExecutionStatus``: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
// response from `GetDurableExecutionStatus`: GetDurableExecutionStatus200Response
fmt.Fprintf(os.Stdout, "Response from `AIInferenceAPI.GetDurableExecutionStatus`: %v\n", resp)
}| Name | Type | Description | Notes |
|---|---|---|---|
| ctx | context.Context | context for authentication, logging, cancellation, deadlines, tracing, etc. | |
| organisation | string | The organisation ID | |
| identifier | string | Either the requestId from async response, or full executionArn (URL-encoded) |
Other parameters are passed through a pointer to a apiGetDurableExecutionStatusRequest struct via the builder pattern
| Name | Type | Description | Notes |
|---|
GetDurableExecutionStatus200Response
- Content-Type: Not defined
- Accept: application/json
[Back to top] [Back to API list] [Back to Model list] [Back to README]
ImageGeneration200Response ImageGeneration(ctx, organisation).ImageGenerationRequest(imageGenerationRequest).Execute()
Generate images with Amazon Nova Canvas
package main
import (
"context"
"fmt"
"os"
openapiclient "github.com/quantcdn/quant-admin-go"
)
func main() {
organisation := "organisation_example" // string | The organisation ID
imageGenerationRequest := *openapiclient.NewImageGenerationRequest("TaskType_example") // ImageGenerationRequest | Image generation request
configuration := openapiclient.NewConfiguration()
apiClient := openapiclient.NewAPIClient(configuration)
resp, r, err := apiClient.AIInferenceAPI.ImageGeneration(context.Background(), organisation).ImageGenerationRequest(imageGenerationRequest).Execute()
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `AIInferenceAPI.ImageGeneration``: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
// response from `ImageGeneration`: ImageGeneration200Response
fmt.Fprintf(os.Stdout, "Response from `AIInferenceAPI.ImageGeneration`: %v\n", resp)
}| Name | Type | Description | Notes |
|---|---|---|---|
| ctx | context.Context | context for authentication, logging, cancellation, deadlines, tracing, etc. | |
| organisation | string | The organisation ID |
Other parameters are passed through a pointer to a apiImageGenerationRequest struct via the builder pattern
| Name | Type | Description | Notes |
|---|
imageGenerationRequest | ImageGenerationRequest | Image generation request |
- Content-Type: application/json
- Accept: application/json
[Back to top] [Back to API list] [Back to Model list] [Back to README]
SubmitToolCallback200Response SubmitToolCallback(ctx, organisation).SubmitToolCallbackRequest(submitToolCallbackRequest).Execute()
Submit Client Tool Results (Callback)
package main
import (
"context"
"fmt"
"os"
openapiclient "github.com/quantcdn/quant-admin-go"
)
func main() {
organisation := "organisation_example" // string | The organisation ID
submitToolCallbackRequest := *openapiclient.NewSubmitToolCallbackRequest("Ab9hZXi/YXJuOmF3czpsYW1iZGE...", []openapiclient.SubmitToolCallbackRequestToolResultsInner{*openapiclient.NewSubmitToolCallbackRequestToolResultsInner("toolu_bdrk_012KTC8NCG...", map[string]interface{}({"temperature":"24C","conditions":"Sunny"}))}) // SubmitToolCallbackRequest |
configuration := openapiclient.NewConfiguration()
apiClient := openapiclient.NewAPIClient(configuration)
resp, r, err := apiClient.AIInferenceAPI.SubmitToolCallback(context.Background(), organisation).SubmitToolCallbackRequest(submitToolCallbackRequest).Execute()
if err != nil {
fmt.Fprintf(os.Stderr, "Error when calling `AIInferenceAPI.SubmitToolCallback``: %v\n", err)
fmt.Fprintf(os.Stderr, "Full HTTP response: %v\n", r)
}
// response from `SubmitToolCallback`: SubmitToolCallback200Response
fmt.Fprintf(os.Stdout, "Response from `AIInferenceAPI.SubmitToolCallback`: %v\n", resp)
}| Name | Type | Description | Notes |
|---|---|---|---|
| ctx | context.Context | context for authentication, logging, cancellation, deadlines, tracing, etc. | |
| organisation | string | The organisation ID |
Other parameters are passed through a pointer to a apiSubmitToolCallbackRequest struct via the builder pattern
| Name | Type | Description | Notes |
|---|
submitToolCallbackRequest | SubmitToolCallbackRequest | |
- Content-Type: application/json
- Accept: application/json
[Back to top] [Back to API list] [Back to Model list] [Back to README]