From d3636718830e595dcee4fc71beeb53cee3524f93 Mon Sep 17 00:00:00 2001 From: Michal Harakal Date: Tue, 14 Apr 2026 15:58:03 +0200 Subject: [PATCH] Scale ops docs: TensorOps surface scan + human-prose partials (#511) KSP processor now walks the TensorOps interface and every @Backend-tagged implementor instead of relying solely on hand-placed @InProgress / @DslOp annotations, so coverage scales from 4 hand-annotated symbols to the full ~50-method surface. Generated pages fuse the auto-derived signature and backend matrix with optional human-authored partials at partials/ops//.adoc sliced by tag (math/intuition/examples/ references), giving LaTeX-ready prose a place to live without blocking the generator on missing content. matmul migrated as the reference example; the Explanation theory page now includes the same partial as the Reference page so they share one source of truth. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../main/kotlin/GenerateDocumentationTask.kt | 47 +- .../ROOT/pages/explanation/theory/matmul.adoc | 39 +- .../reference/operators/generated/index.adoc | 3 +- .../operators/generated/similarity.adoc | 15 + .../operators/generated/tensorops.adoc | 2347 +++++++++++++++++ .../operators/generated/voidtensorops.adoc | 30 + .../pages/reference/ops-status-matrix.adoc | 69 +- .../ROOT/partials/ops/tensorops/matmul.adoc | 41 + .../ainet/exec/tensor/ops/AccelerateCpuOps.kt | 2 + .../sk/ainet/exec/tensor/ops/DefaultCpuOps.kt | 2 + .../sk/ainet/lang/tensor/ops/VoidTensorOps.kt | 2 + .../kotlin/sk/ainet/lang/ops/TensorOp.kt | 19 + .../lang/ops/ksp/OperatorDocProcessor.kt | 95 +- 13 files changed, 2660 insertions(+), 51 deletions(-) create mode 100644 docs/modules/ROOT/pages/reference/operators/generated/tensorops.adoc create mode 100644 docs/modules/ROOT/partials/ops/tensorops/matmul.adoc diff --git a/build-logic/convention/src/main/kotlin/GenerateDocumentationTask.kt b/build-logic/convention/src/main/kotlin/GenerateDocumentationTask.kt index 845c8e2f..6129ca8d 100644 --- a/build-logic/convention/src/main/kotlin/GenerateDocumentationTask.kt +++ b/build-logic/convention/src/main/kotlin/GenerateDocumentationTask.kt @@ -268,14 +268,24 @@ abstract class GenerateDocumentationTask : DefaultTask() { appendLine("") appendLine("Modality: ${operator.modality.capitalize()}") appendLine("") - + operator.functions.forEach { function -> - generateFunctionSection(function, this) + generateFunctionSection(operator, function, this) } }) } - - private fun generateFunctionSection(function: FunctionDoc, builder: StringBuilder) { + + /** + * Per-function section layout fuses auto-derived facts (signature, + * parameters, return type, backend matrix) with optional hand-written + * prose pulled from a partial at + * `partials/ops//.adoc`. The partial is sliced by + * AsciiDoc tags — `math`, `intuition`, `examples`, `references` — so a + * single file per function carries all the human content, and missing + * tags render as empty via `optional`, keeping un-prosed ops valid. + */ + private fun generateFunctionSection(operator: OperatorDoc, function: FunctionDoc, builder: StringBuilder) { + val partialBase = "ops/${operator.name.lowercase()}/${function.name.lowercase()}.adoc" builder.apply { appendLine("== ${function.name}") appendLine("") @@ -286,7 +296,7 @@ abstract class GenerateDocumentationTask : DefaultTask() { appendLine(function.signature) appendLine("----") appendLine("") - + if (function.parameters.isNotEmpty()) { appendLine("=== Parameters") appendLine("") @@ -298,16 +308,32 @@ abstract class GenerateDocumentationTask : DefaultTask() { } appendLine("") } - + appendLine("=== Return Type") appendLine("") appendLine("`${function.returnType}`") appendLine("") - + + // Human prose: math first so LaTeX sits right under the signature, + // then intuition and examples before the backend table, references + // last. All optional — ops with no partial still render cleanly. + appendLine("=== Definition") + appendLine("") + appendLine("include::partial\$$partialBase[tag=math,optional]") + appendLine("") + appendLine("=== Intuition") + appendLine("") + appendLine("include::partial\$$partialBase[tag=intuition,optional]") + appendLine("") + appendLine("=== Examples") + appendLine("") + appendLine("include::partial\$$partialBase[tag=examples,optional]") + appendLine("") + if (includeBackendStatus.getOrElse(true) && function.statusByBackend.isNotEmpty()) { generateBackendStatusTable(function, this) } - + if (function.notes.isNotEmpty()) { appendLine("=== Notes") appendLine("") @@ -316,7 +342,10 @@ abstract class GenerateDocumentationTask : DefaultTask() { appendLine("") } } - + + appendLine("=== References") + appendLine("") + appendLine("include::partial\$$partialBase[tag=references,optional]") appendLine("") } } diff --git a/docs/modules/ROOT/pages/explanation/theory/matmul.adoc b/docs/modules/ROOT/pages/explanation/theory/matmul.adoc index c40ff179..cd3061f9 100644 --- a/docs/modules/ROOT/pages/explanation/theory/matmul.adoc +++ b/docs/modules/ROOT/pages/explanation/theory/matmul.adoc @@ -1,36 +1,18 @@ = Matrix Multiplication Theory +The theory and intuition below are the single source of truth for `matmul` +and are also embedded into the generated operator reference at +xref:reference/operators/generated/tensorops.adoc#matmul[TensorOps.matmul]. + [#matmul-definition] == Mathematical Definition -Matrix multiplication is a binary operation that produces a matrix from two matrices. -Given two matrices A ∈ ℝ^(m×k) and B ∈ ℝ^(k×n), the matrix product C = AB is defined as: - -[stem] -++++ -C_{ij} = \sum_{l=1}^{k} A_{il} \cdot B_{lj} -++++ - -Where: -- C ∈ ℝ^(m×n) is the resulting matrix -- i ranges from 1 to m (row index) -- j ranges from 1 to n (column index) -- l is the summation index over the shared dimension k +include::partial$ops/tensorops/matmul.adoc[tag=math] [#matmul-properties] -== Properties - -* **Associativity**: (AB)C = A(BC) -* **Distributivity**: A(B + C) = AB + AC and (A + B)C = AC + BC -* **Non-commutativity**: Generally AB ≠ BA -* **Identity element**: AI = IA = A where I is the identity matrix +== Intuition and Properties -[#matmul-complexity] -== Computational Complexity - -* Standard algorithm: O(mnk) operations -* Strassen's algorithm: O(n^2.807) for square matrices -* Current best known: O(n^2.373) (theoretical) +include::partial$ops/tensorops/matmul.adoc[tag=intuition] [#matmul-applications] == Applications @@ -38,4 +20,9 @@ Where: * Neural network forward pass computations * Linear transformations in computer graphics * Solving systems of linear equations -* Principal component analysis (PCA) \ No newline at end of file +* Principal component analysis (PCA) + +[#matmul-references] +== References + +include::partial$ops/tensorops/matmul.adoc[tag=references] diff --git a/docs/modules/ROOT/pages/reference/operators/generated/index.adoc b/docs/modules/ROOT/pages/reference/operators/generated/index.adoc index e64fe818..8ebeb8a3 100644 --- a/docs/modules/ROOT/pages/reference/operators/generated/index.adoc +++ b/docs/modules/ROOT/pages/reference/operators/generated/index.adoc @@ -1,11 +1,12 @@ = AI-NET Operators Reference -Generated from version `1.0.0` on 2026-04-13 +Generated from version `1.0.0` on 2026-04-14 == Operators by Modality === Core +* xref:reference/operators/generated/tensorops.adoc[TensorOps] * xref:reference/operators/generated/voidtensorops.adoc[VoidTensorOps] === Composite diff --git a/docs/modules/ROOT/pages/reference/operators/generated/similarity.adoc b/docs/modules/ROOT/pages/reference/operators/generated/similarity.adoc index a438c407..d93808b3 100644 --- a/docs/modules/ROOT/pages/reference/operators/generated/similarity.adoc +++ b/docs/modules/ROOT/pages/reference/operators/generated/similarity.adoc @@ -23,6 +23,18 @@ fun cosineDistance(other:Tensor, dim:Int, eps:Double): Tensor `Tensor` +=== Definition + +include::partial$ops/similarity/cosinedistance.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/similarity/cosinedistance.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/similarity/cosinedistance.adoc[tag=examples,optional] + === Backend Support [cols="1,1,3", options="header"] @@ -37,4 +49,7 @@ fun cosineDistance(other:Tensor, dim:Int, eps:Double): Tensor TIP: *all*: +=== References + +include::partial$ops/similarity/cosinedistance.adoc[tag=references,optional] diff --git a/docs/modules/ROOT/pages/reference/operators/generated/tensorops.adoc b/docs/modules/ROOT/pages/reference/operators/generated/tensorops.adoc new file mode 100644 index 00000000..83af1cbd --- /dev/null +++ b/docs/modules/ROOT/pages/reference/operators/generated/tensorops.adoc @@ -0,0 +1,2347 @@ += TensorOps + +Package: `sk.ainet.lang.tensor.ops` + +Modality: Core + +== add + +=== Signature + +[source,kotlin] +---- +fun add(a:Tensor, b:Tensor): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/add.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/add.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/add.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/add.adoc[tag=references,optional] + +== subtract + +=== Signature + +[source,kotlin] +---- +fun subtract(a:Tensor, b:Tensor): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/subtract.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/subtract.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/subtract.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/subtract.adoc[tag=references,optional] + +== multiply + +=== Signature + +[source,kotlin] +---- +fun multiply(a:Tensor, b:Tensor): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/multiply.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/multiply.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/multiply.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/multiply.adoc[tag=references,optional] + +== divide + +=== Signature + +[source,kotlin] +---- +fun divide(a:Tensor, b:Tensor): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/divide.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/divide.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/divide.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/divide.adoc[tag=references,optional] + +== addScalar + +=== Signature + +[source,kotlin] +---- +fun addScalar(a:Tensor, b:Number): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Number` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/addscalar.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/addscalar.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/addscalar.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/addscalar.adoc[tag=references,optional] + +== subScalar + +=== Signature + +[source,kotlin] +---- +fun subScalar(a:Tensor, b:Number): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Number` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/subscalar.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/subscalar.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/subscalar.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/subscalar.adoc[tag=references,optional] + +== mulScalar + +=== Signature + +[source,kotlin] +---- +fun mulScalar(a:Tensor, b:Number): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Number` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/mulscalar.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/mulscalar.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/mulscalar.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/mulscalar.adoc[tag=references,optional] + +== divScalar + +=== Signature + +[source,kotlin] +---- +fun divScalar(a:Tensor, b:Number): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Number` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/divscalar.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/divscalar.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/divscalar.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/divscalar.adoc[tag=references,optional] + +== rsubScalar + +=== Signature + +[source,kotlin] +---- +fun rsubScalar(a:Number, b:Tensor): Tensor +---- + +=== Parameters + +* `a: Number` +* `b: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/rsubscalar.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/rsubscalar.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/rsubscalar.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/rsubscalar.adoc[tag=references,optional] + +== rdivScalar + +=== Signature + +[source,kotlin] +---- +fun rdivScalar(a:Number, b:Tensor): Tensor +---- + +=== Parameters + +* `a: Number` +* `b: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/rdivscalar.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/rdivscalar.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/rdivscalar.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/rdivscalar.adoc[tag=references,optional] + +== matmul + +=== Signature + +[source,kotlin] +---- +fun matmul(a:Tensor, b:Tensor): Tensor +---- + +=== Parameters + +* `a: Tensor` +* `b: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/matmul.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/matmul.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/matmul.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/matmul.adoc[tag=references,optional] + +== transpose + +=== Signature + +[source,kotlin] +---- +fun transpose(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/transpose.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/transpose.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/transpose.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/transpose.adoc[tag=references,optional] + +== conv1d + +=== Signature + +[source,kotlin] +---- +fun conv1d(input:Tensor, weight:Tensor, bias:Tensor, stride:Int, padding:Int, dilation:Int, groups:Int): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `weight: Tensor` +* `bias: Tensor` +* `stride: Int` +* `padding: Int` +* `dilation: Int` +* `groups: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/conv1d.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/conv1d.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/conv1d.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/conv1d.adoc[tag=references,optional] + +== conv2d + +=== Signature + +[source,kotlin] +---- +fun conv2d(input:Tensor, weight:Tensor, bias:Tensor, stride:Pair, padding:Pair, dilation:Pair, groups:Int): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `weight: Tensor` +* `bias: Tensor` +* `stride: Pair` +* `padding: Pair` +* `dilation: Pair` +* `groups: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/conv2d.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/conv2d.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/conv2d.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/conv2d.adoc[tag=references,optional] + +== conv3d + +=== Signature + +[source,kotlin] +---- +fun conv3d(input:Tensor, weight:Tensor, bias:Tensor, stride:Triple, padding:Triple, dilation:Triple, groups:Int): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `weight: Tensor` +* `bias: Tensor` +* `stride: Triple` +* `padding: Triple` +* `dilation: Triple` +* `groups: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/conv3d.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/conv3d.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/conv3d.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/conv3d.adoc[tag=references,optional] + +== convTranspose1d + +=== Signature + +[source,kotlin] +---- +fun convTranspose1d(input:Tensor, weight:Tensor, bias:Tensor, stride:Int, padding:Int, outputPadding:Int, dilation:Int, groups:Int): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `weight: Tensor` +* `bias: Tensor` +* `stride: Int` +* `padding: Int` +* `outputPadding: Int` +* `dilation: Int` +* `groups: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/convtranspose1d.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/convtranspose1d.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/convtranspose1d.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/convtranspose1d.adoc[tag=references,optional] + +== maxPool2d + +=== Signature + +[source,kotlin] +---- +fun maxPool2d(input:Tensor, kernelSize:Pair, stride:Pair, padding:Pair): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `kernelSize: Pair` +* `stride: Pair` +* `padding: Pair` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/maxpool2d.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/maxpool2d.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/maxpool2d.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/maxpool2d.adoc[tag=references,optional] + +== avgPool2d + +=== Signature + +[source,kotlin] +---- +fun avgPool2d(input:Tensor, kernelSize:Pair, stride:Pair, padding:Pair, countIncludePad:Boolean): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `kernelSize: Pair` +* `stride: Pair` +* `padding: Pair` +* `countIncludePad: Boolean` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/avgpool2d.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/avgpool2d.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/avgpool2d.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/avgpool2d.adoc[tag=references,optional] + +== upsample2d + +=== Signature + +[source,kotlin] +---- +fun upsample2d(input:Tensor, scale:Pair, mode:UpsampleMode, alignCorners:Boolean): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `scale: Pair` +* `mode: UpsampleMode` +* `alignCorners: Boolean` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/upsample2d.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/upsample2d.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/upsample2d.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/upsample2d.adoc[tag=references,optional] + +== reshape + +=== Signature + +[source,kotlin] +---- +fun reshape(tensor:Tensor, newShape:Shape): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `newShape: Shape` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/reshape.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/reshape.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/reshape.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/reshape.adoc[tag=references,optional] + +== flatten + +=== Signature + +[source,kotlin] +---- +fun flatten(tensor:Tensor, startDim:Int, endDim:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `startDim: Int` +* `endDim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/flatten.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/flatten.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/flatten.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/flatten.adoc[tag=references,optional] + +== concat + +=== Signature + +[source,kotlin] +---- +fun concat(tensors:List, dim:Int): Tensor +---- + +=== Parameters + +* `tensors: List` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/concat.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/concat.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/concat.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/concat.adoc[tag=references,optional] + +== split + +=== Signature + +[source,kotlin] +---- +fun split(tensor:Tensor, splitSize:Int, dim:Int): List +---- + +=== Parameters + +* `tensor: Tensor` +* `splitSize: Int` +* `dim: Int` + +=== Return Type + +`List` + +=== Definition + +include::partial$ops/tensorops/split.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/split.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/split.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/split.adoc[tag=references,optional] + +== squeeze + +=== Signature + +[source,kotlin] +---- +fun squeeze(tensor:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/squeeze.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/squeeze.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/squeeze.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/squeeze.adoc[tag=references,optional] + +== unsqueeze + +=== Signature + +[source,kotlin] +---- +fun unsqueeze(tensor:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/unsqueeze.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/unsqueeze.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/unsqueeze.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/unsqueeze.adoc[tag=references,optional] + +== relu + +=== Signature + +[source,kotlin] +---- +fun relu(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/relu.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/relu.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/relu.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/relu.adoc[tag=references,optional] + +== leakyRelu + +=== Signature + +[source,kotlin] +---- +fun leakyRelu(tensor:Tensor, negativeSlope:Float): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `negativeSlope: Float` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/leakyrelu.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/leakyrelu.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/leakyrelu.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/leakyrelu.adoc[tag=references,optional] + +== elu + +=== Signature + +[source,kotlin] +---- +fun elu(tensor:Tensor, alpha:Float): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `alpha: Float` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/elu.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/elu.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/elu.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/elu.adoc[tag=references,optional] + +== softmax + +=== Signature + +[source,kotlin] +---- +fun softmax(tensor:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/softmax.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/softmax.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/softmax.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/softmax.adoc[tag=references,optional] + +== logSoftmax + +=== Signature + +[source,kotlin] +---- +fun logSoftmax(tensor:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/logsoftmax.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/logsoftmax.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/logsoftmax.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/logsoftmax.adoc[tag=references,optional] + +== sigmoid + +=== Signature + +[source,kotlin] +---- +fun sigmoid(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/sigmoid.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/sigmoid.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/sigmoid.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/sigmoid.adoc[tag=references,optional] + +== silu + +=== Signature + +[source,kotlin] +---- +fun silu(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/silu.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/silu.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/silu.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/silu.adoc[tag=references,optional] + +== gelu + +=== Signature + +[source,kotlin] +---- +fun gelu(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/gelu.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/gelu.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/gelu.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/gelu.adoc[tag=references,optional] + +== sum + +=== Signature + +[source,kotlin] +---- +fun sum(tensor:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/sum.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/sum.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/sum.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/sum.adoc[tag=references,optional] + +== mean + +=== Signature + +[source,kotlin] +---- +fun mean(tensor:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/mean.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/mean.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/mean.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/mean.adoc[tag=references,optional] + +== variance + +=== Signature + +[source,kotlin] +---- +fun variance(tensor:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/variance.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/variance.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/variance.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/variance.adoc[tag=references,optional] + +== sqrt + +=== Signature + +[source,kotlin] +---- +fun sqrt(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/sqrt.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/sqrt.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/sqrt.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/sqrt.adoc[tag=references,optional] + +== abs + +=== Signature + +[source,kotlin] +---- +fun abs(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/abs.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/abs.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/abs.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/abs.adoc[tag=references,optional] + +== sign + +=== Signature + +[source,kotlin] +---- +fun sign(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/sign.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/sign.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/sign.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/sign.adoc[tag=references,optional] + +== clamp + +=== Signature + +[source,kotlin] +---- +fun clamp(tensor:Tensor, minVal:Float, maxVal:Float): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `minVal: Float` +* `maxVal: Float` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/clamp.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/clamp.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/clamp.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/clamp.adoc[tag=references,optional] + +== narrow + +=== Signature + +[source,kotlin] +---- +fun narrow(tensor:Tensor, dim:Int, start:Int, length:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` +* `start: Int` +* `length: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/narrow.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/narrow.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/narrow.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/narrow.adoc[tag=references,optional] + +== pad2d + +=== Signature + +[source,kotlin] +---- +fun pad2d(tensor:Tensor, padLeft:Int, padRight:Int, padTop:Int, padBottom:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `padLeft: Int` +* `padRight: Int` +* `padTop: Int` +* `padBottom: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/pad2d.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/pad2d.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/pad2d.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/pad2d.adoc[tag=references,optional] + +== unfold + +=== Signature + +[source,kotlin] +---- +fun unfold(tensor:Tensor, dim:Int, size:Int, step:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `dim: Int` +* `size: Int` +* `step: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/unfold.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/unfold.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/unfold.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/unfold.adoc[tag=references,optional] + +== lt + +=== Signature + +[source,kotlin] +---- +fun lt(tensor:Tensor, value:Float): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `value: Float` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/lt.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/lt.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/lt.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/lt.adoc[tag=references,optional] + +== ge + +=== Signature + +[source,kotlin] +---- +fun ge(tensor:Tensor, value:Float): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `value: Float` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/ge.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/ge.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/ge.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/ge.adoc[tag=references,optional] + +== tril + +=== Signature + +[source,kotlin] +---- +fun tril(tensor:Tensor, k:Int): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `k: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/tril.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/tril.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/tril.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/tril.adoc[tag=references,optional] + +== convert + +=== Signature + +[source,kotlin] +---- +fun convert(tensor:Tensor, targetType:TTo): Tensor +---- + +=== Parameters + +* `tensor: Tensor` +* `targetType: TTo` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/convert.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/convert.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/convert.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/convert.adoc[tag=references,optional] + +== gather + +=== Signature + +[source,kotlin] +---- +fun gather(input:Tensor, indices:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `indices: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/gather.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/gather.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/gather.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/gather.adoc[tag=references,optional] + +== indexSelect + +=== Signature + +[source,kotlin] +---- +fun indexSelect(input:Tensor, indices:Tensor, dim:Int): Tensor +---- + +=== Parameters + +* `input: Tensor` +* `indices: Tensor` +* `dim: Int` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/indexselect.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/indexselect.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/indexselect.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/indexselect.adoc[tag=references,optional] + +== exp + +=== Signature + +[source,kotlin] +---- +fun exp(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/exp.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/exp.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/exp.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/exp.adoc[tag=references,optional] + +== expm1 + +=== Signature + +[source,kotlin] +---- +fun expm1(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/expm1.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/expm1.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/expm1.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/expm1.adoc[tag=references,optional] + +== sin + +=== Signature + +[source,kotlin] +---- +fun sin(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/sin.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/sin.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/sin.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/sin.adoc[tag=references,optional] + +== cos + +=== Signature + +[source,kotlin] +---- +fun cos(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/cos.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/cos.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/cos.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/cos.adoc[tag=references,optional] + +== tanh + +=== Signature + +[source,kotlin] +---- +fun tanh(tensor:Tensor): Tensor +---- + +=== Parameters + +* `tensor: Tensor` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/tanh.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/tanh.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/tanh.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/tanh.adoc[tag=references,optional] + +== scaledDotProductAttention + +=== Signature + +[source,kotlin] +---- +fun scaledDotProductAttention(query:Tensor, key:Tensor, value:Tensor, mask:Tensor, scale:Float, causal:Boolean): Tensor +---- + +=== Parameters + +* `query: Tensor` +* `key: Tensor` +* `value: Tensor` +* `mask: Tensor` +* `scale: Float` +* `causal: Boolean` + +=== Return Type + +`Tensor` + +=== Definition + +include::partial$ops/tensorops/scaleddotproductattention.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/tensorops/scaleddotproductattention.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/tensorops/scaleddotproductattention.adoc[tag=examples,optional] + +=== Backend Support + +[cols="1,1,3", options="header"] +|=== +| Backend | Status | Notes +| void | implemented | - +|=== + +=== References + +include::partial$ops/tensorops/scaleddotproductattention.adoc[tag=references,optional] + diff --git a/docs/modules/ROOT/pages/reference/operators/generated/voidtensorops.adoc b/docs/modules/ROOT/pages/reference/operators/generated/voidtensorops.adoc index d33cf062..fdba6e90 100644 --- a/docs/modules/ROOT/pages/reference/operators/generated/voidtensorops.adoc +++ b/docs/modules/ROOT/pages/reference/operators/generated/voidtensorops.adoc @@ -22,6 +22,18 @@ fun matmul(a:Tensor, b:Tensor): Tensor `Tensor` +=== Definition + +include::partial$ops/voidtensorops/matmul.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/voidtensorops/matmul.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/voidtensorops/matmul.adoc[tag=examples,optional] + === Backend Support [cols="1,1,3", options="header"] @@ -36,6 +48,9 @@ TIP: *Metal*: TIP: *Metal*: +=== References + +include::partial$ops/voidtensorops/matmul.adoc[tag=references,optional] == transpose @@ -54,6 +69,18 @@ fun transpose(tensor:Tensor): Tensor `Tensor` +=== Definition + +include::partial$ops/voidtensorops/transpose.adoc[tag=math,optional] + +=== Intuition + +include::partial$ops/voidtensorops/transpose.adoc[tag=intuition,optional] + +=== Examples + +include::partial$ops/voidtensorops/transpose.adoc[tag=examples,optional] + === Backend Support [cols="1,1,3", options="header"] @@ -68,4 +95,7 @@ TIP: *Metal*: TIP: *Metal*: +=== References + +include::partial$ops/voidtensorops/transpose.adoc[tag=references,optional] diff --git a/docs/modules/ROOT/pages/reference/ops-status-matrix.adoc b/docs/modules/ROOT/pages/reference/ops-status-matrix.adoc index 6ee3957d..caf9cb73 100644 --- a/docs/modules/ROOT/pages/reference/ops-status-matrix.adoc +++ b/docs/modules/ROOT/pages/reference/ops-status-matrix.adoc @@ -1,19 +1,74 @@ = Operator Coverage Matrix :description: Cross-backend status for every operator function in SKaiNET. -Generated from `operators.json` version `1.0.0` on 2026-04-13. +Generated from `operators.json` version `1.0.0` on 2026-04-14. Rows are `Operator.function` pairs; columns are backends that appear in any function's `statusByBackend` map. A missing entry means the backend makes no claim about the function — treat it as "unknown", not "not supported". -[cols="2,1,1,1,1", options="header"] +[cols="2,1,1,1,1,1", options="header"] |=== -| Operator.function | Metal | apple | cpu | wasm +| Operator.function | Metal | apple | cpu | void | wasm -| `VoidTensorOps.matmul` | 🚧 | — | — | — -| `VoidTensorOps.transpose` | 🚧 | — | — | — -| `Similarity.cosineDistance` | — | ✅ | ✅ | ✅ +| `TensorOps.add` | — | — | — | ✅ | — +| `TensorOps.subtract` | — | — | — | ✅ | — +| `TensorOps.multiply` | — | — | — | ✅ | — +| `TensorOps.divide` | — | — | — | ✅ | — +| `TensorOps.addScalar` | — | — | — | ✅ | — +| `TensorOps.subScalar` | — | — | — | ✅ | — +| `TensorOps.mulScalar` | — | — | — | ✅ | — +| `TensorOps.divScalar` | — | — | — | ✅ | — +| `TensorOps.rsubScalar` | — | — | — | ✅ | — +| `TensorOps.rdivScalar` | — | — | — | ✅ | — +| `TensorOps.matmul` | — | — | — | ✅ | — +| `TensorOps.transpose` | — | — | — | ✅ | — +| `TensorOps.conv1d` | — | — | — | ✅ | — +| `TensorOps.conv2d` | — | — | — | ✅ | — +| `TensorOps.conv3d` | — | — | — | ✅ | — +| `TensorOps.convTranspose1d` | — | — | — | ✅ | — +| `TensorOps.maxPool2d` | — | — | — | ✅ | — +| `TensorOps.avgPool2d` | — | — | — | ✅ | — +| `TensorOps.upsample2d` | — | — | — | ✅ | — +| `TensorOps.reshape` | — | — | — | ✅ | — +| `TensorOps.flatten` | — | — | — | ✅ | — +| `TensorOps.concat` | — | — | — | ✅ | — +| `TensorOps.split` | — | — | — | ✅ | — +| `TensorOps.squeeze` | — | — | — | ✅ | — +| `TensorOps.unsqueeze` | — | — | — | ✅ | — +| `TensorOps.relu` | — | — | — | ✅ | — +| `TensorOps.leakyRelu` | — | — | — | ✅ | — +| `TensorOps.elu` | — | — | — | ✅ | — +| `TensorOps.softmax` | — | — | — | ✅ | — +| `TensorOps.logSoftmax` | — | — | — | ✅ | — +| `TensorOps.sigmoid` | — | — | — | ✅ | — +| `TensorOps.silu` | — | — | — | ✅ | — +| `TensorOps.gelu` | — | — | — | ✅ | — +| `TensorOps.sum` | — | — | — | ✅ | — +| `TensorOps.mean` | — | — | — | ✅ | — +| `TensorOps.variance` | — | — | — | ✅ | — +| `TensorOps.sqrt` | — | — | — | ✅ | — +| `TensorOps.abs` | — | — | — | ✅ | — +| `TensorOps.sign` | — | — | — | ✅ | — +| `TensorOps.clamp` | — | — | — | ✅ | — +| `TensorOps.narrow` | — | — | — | ✅ | — +| `TensorOps.pad2d` | — | — | — | ✅ | — +| `TensorOps.unfold` | — | — | — | ✅ | — +| `TensorOps.lt` | — | — | — | ✅ | — +| `TensorOps.ge` | — | — | — | ✅ | — +| `TensorOps.tril` | — | — | — | ✅ | — +| `TensorOps.convert` | — | — | — | ✅ | — +| `TensorOps.gather` | — | — | — | ✅ | — +| `TensorOps.indexSelect` | — | — | — | ✅ | — +| `TensorOps.exp` | — | — | — | ✅ | — +| `TensorOps.expm1` | — | — | — | ✅ | — +| `TensorOps.sin` | — | — | — | ✅ | — +| `TensorOps.cos` | — | — | — | ✅ | — +| `TensorOps.tanh` | — | — | — | ✅ | — +| `TensorOps.scaledDotProductAttention` | — | — | — | ✅ | — +| `VoidTensorOps.matmul` | 🚧 | — | — | — | — +| `VoidTensorOps.transpose` | 🚧 | — | — | — | — +| `Similarity.cosineDistance` | — | ✅ | ✅ | — | ✅ -| *Done* | *0 / 3* | *1 / 3* | *1 / 3* | *1 / 3* +| *Done* | *0 / 58* | *1 / 58* | *1 / 58* | *55 / 58* | *1 / 58* |=== Per-function detail including notes lives in xref:reference/operators/generated/index.adoc[Operator reference]. diff --git a/docs/modules/ROOT/partials/ops/tensorops/matmul.adoc b/docs/modules/ROOT/partials/ops/tensorops/matmul.adoc new file mode 100644 index 00000000..c741fe67 --- /dev/null +++ b/docs/modules/ROOT/partials/ops/tensorops/matmul.adoc @@ -0,0 +1,41 @@ +// tag::math[] +Given two matrices stem:[A \in \mathbb{R}^{m \times k}] and stem:[B \in \mathbb{R}^{k \times n}], the matrix product stem:[C = AB] is defined as: + +[stem] +++++ +C_{ij} = \sum_{l=1}^{k} A_{il} \cdot B_{lj} +++++ + +Where stem:[C \in \mathbb{R}^{m \times n}], stem:[i] ranges over rows stem:[1..m], stem:[j] over columns stem:[1..n], and stem:[l] is the summation index over the shared dimension stem:[k]. +// end::math[] + +// tag::intuition[] +Matrix multiplication composes two linear transformations: each output element is the dot product of a row of stem:[A] with a column of stem:[B]. It is the core primitive behind fully-connected layers, attention projections, and any linear map in a neural network's forward pass. + +Key properties: + +* *Associativity*: stem:[(AB)C = A(BC)] +* *Distributivity*: stem:[A(B + C) = AB + AC] +* *Non-commutativity*: in general stem:[AB \neq BA] +* *Identity*: stem:[AI = IA = A] + +Complexity: + +* Standard algorithm: stem:[O(mnk)] +* Strassen's algorithm: stem:[O(n^{2.807})] for square matrices +* Current theoretical best: stem:[O(n^{2.373})] +// end::intuition[] + +// tag::examples[] +[source,kotlin] +---- +val a: Tensor = tensor(shape(2, 3)) { ... } +val b: Tensor = tensor(shape(3, 4)) { ... } +val c = ops.matmul(a, b) // shape(2, 4) +---- +// end::examples[] + +// tag::references[] +* https://en.wikipedia.org/wiki/Matrix_multiplication[Matrix multiplication — Wikipedia] +* Strassen, V. (1969). _Gaussian elimination is not optimal_. Numerische Mathematik. +// end::references[] diff --git a/skainet-backends/skainet-backend-cpu/src/appleMain/kotlin/sk/ainet/exec/tensor/ops/AccelerateCpuOps.kt b/skainet-backends/skainet-backend-cpu/src/appleMain/kotlin/sk/ainet/exec/tensor/ops/AccelerateCpuOps.kt index e8e5540b..f0051d84 100644 --- a/skainet-backends/skainet-backend-cpu/src/appleMain/kotlin/sk/ainet/exec/tensor/ops/AccelerateCpuOps.kt +++ b/skainet-backends/skainet-backend-cpu/src/appleMain/kotlin/sk/ainet/exec/tensor/ops/AccelerateCpuOps.kt @@ -19,6 +19,7 @@ import sk.ainet.lang.tensor.Shape import sk.ainet.lang.tensor.Tensor import sk.ainet.lang.tensor.data.FloatArrayTensorData import sk.ainet.lang.tensor.data.TensorDataFactory +import sk.ainet.lang.ops.Backend import sk.ainet.lang.types.DType import sk.ainet.lang.types.FP32 @@ -30,6 +31,7 @@ import sk.ainet.lang.types.FP32 * Falls through to [DefaultCpuOpsBase] for non-FP32, non-contiguous, * or complex broadcasting cases. */ +@Backend(id = "apple", displayName = "Apple Accelerate") public class AccelerateCpuOps( dataFactory: TensorDataFactory, ) : DefaultCpuOpsBase(dataFactory) { diff --git a/skainet-backends/skainet-backend-cpu/src/commonMain/kotlin/sk/ainet/exec/tensor/ops/DefaultCpuOps.kt b/skainet-backends/skainet-backend-cpu/src/commonMain/kotlin/sk/ainet/exec/tensor/ops/DefaultCpuOps.kt index 5f9c7e90..a7463462 100644 --- a/skainet-backends/skainet-backend-cpu/src/commonMain/kotlin/sk/ainet/exec/tensor/ops/DefaultCpuOps.kt +++ b/skainet-backends/skainet-backend-cpu/src/commonMain/kotlin/sk/ainet/exec/tensor/ops/DefaultCpuOps.kt @@ -5,6 +5,7 @@ import sk.ainet.lang.tensor.Shape import sk.ainet.lang.tensor.Tensor import sk.ainet.lang.tensor.ops.TensorOps import sk.ainet.lang.types.DType +import sk.ainet.lang.ops.Backend import sk.ainet.lang.ops.TensorOp import sk.ainet.lang.ops.InProgress import sk.ainet.lang.tensor.data.FloatArrayTensorData @@ -13,6 +14,7 @@ import sk.ainet.lang.tensor.ops.UpsampleMode import sk.ainet.lang.types.FP32 import kotlin.math.sqrt +@Backend(id = "cpu", displayName = "CPU") @InProgress("cpu", owner = "team:cpu", issue = "task-ops.md#defaultcpuops") public open class DefaultCpuOpsBase(protected val dataFactory: TensorDataFactory) : TensorOps { diff --git a/skainet-lang/skainet-lang-core/src/commonMain/kotlin/sk/ainet/lang/tensor/ops/VoidTensorOps.kt b/skainet-lang/skainet-lang-core/src/commonMain/kotlin/sk/ainet/lang/tensor/ops/VoidTensorOps.kt index e753ab31..4ff09b3a 100644 --- a/skainet-lang/skainet-lang-core/src/commonMain/kotlin/sk/ainet/lang/tensor/ops/VoidTensorOps.kt +++ b/skainet-lang/skainet-lang-core/src/commonMain/kotlin/sk/ainet/lang/tensor/ops/VoidTensorOps.kt @@ -1,5 +1,6 @@ package sk.ainet.lang.tensor.ops +import sk.ainet.lang.ops.Backend import sk.ainet.lang.ops.InProgress import sk.ainet.lang.tensor.Shape import sk.ainet.lang.tensor.Tensor @@ -8,6 +9,7 @@ import sk.ainet.lang.tensor.data.DenseTensorDataFactory import sk.ainet.lang.types.DType import sk.ainet.lang.tensor.data.views.UnsqueezedTensorData +@Backend(id = "void", displayName = "Shape-only") public class VoidTensorOps : TensorOps { private val dataFactory = DenseTensorDataFactory() diff --git a/skainet-lang/skainet-lang-ksp-annotations/src/commonMain/kotlin/sk/ainet/lang/ops/TensorOp.kt b/skainet-lang/skainet-lang-ksp-annotations/src/commonMain/kotlin/sk/ainet/lang/ops/TensorOp.kt index 97c5c062..76358d2c 100644 --- a/skainet-lang/skainet-lang-ksp-annotations/src/commonMain/kotlin/sk/ainet/lang/ops/TensorOp.kt +++ b/skainet-lang/skainet-lang-ksp-annotations/src/commonMain/kotlin/sk/ainet/lang/ops/TensorOp.kt @@ -51,3 +51,22 @@ public annotation class InProgress( val owner: String = "", val issue: String = "" ) + +/** + * Marks a class as a concrete compute backend implementation of the + * `TensorOps` interface. The docs KSP processor uses this to derive the + * `statusByBackend` map for each operator automatically, so adding a new + * backend is one annotation instead of N hand-edits to `@InProgress`. + * + * @param id Stable identifier used as a column key in the ops status + * matrix (e.g. `"cpu"`, `"apple"`, `"wasm"`, `"cuda"`). Keep it short + * and lowercase. + * @param displayName Human-readable label for rendered tables. Defaults + * to [id] if left empty. + */ +@Target(AnnotationTarget.CLASS) +@Retention(AnnotationRetention.SOURCE) +public annotation class Backend( + val id: String, + val displayName: String = "" +) diff --git a/skainet-lang/skainet-lang-ksp-processor/src/main/kotlin/sk/ainet/lang/ops/ksp/OperatorDocProcessor.kt b/skainet-lang/skainet-lang-ksp-processor/src/main/kotlin/sk/ainet/lang/ops/ksp/OperatorDocProcessor.kt index 0dae9bc4..9e4e42ba 100644 --- a/skainet-lang/skainet-lang-ksp-processor/src/main/kotlin/sk/ainet/lang/ops/ksp/OperatorDocProcessor.kt +++ b/skainet-lang/skainet-lang-ksp-processor/src/main/kotlin/sk/ainet/lang/ops/ksp/OperatorDocProcessor.kt @@ -1,5 +1,7 @@ package sk.ainet.lang.ops.ksp +import com.google.devtools.ksp.getClassDeclarationByName +import com.google.devtools.ksp.getDeclaredFunctions import com.google.devtools.ksp.processing.* import com.google.devtools.ksp.symbol.* import com.google.devtools.ksp.validate @@ -53,7 +55,10 @@ class OperatorDocProcessor( private val logger: KSPLogger ) : SymbolProcessor { + private var alreadyGenerated = false + override fun process(resolver: Resolver): List { + if (alreadyGenerated) return emptyList() logger.info("Starting OperatorDocProcessor...") val notImplementedSymbols = resolver @@ -78,15 +83,26 @@ class OperatorDocProcessor( val allSymbols = (notImplementedSymbols + inProgressSymbols + testInProgressSymbols + dslOpSymbols).toList() - if (allSymbols.isEmpty()) { - logger.info("No annotated symbols found") - return emptyList() - } - logger.info("Found ${allSymbols.size} annotated symbols") - // Group symbols by their containing class/package to create operators - val operatorDocs = groupSymbolsByOperator(allSymbols) + // Group annotation-discovered symbols by their containing class/package to create operators + val annotationOps = if (allSymbols.isNotEmpty()) groupSymbolsByOperator(allSymbols) else emptyList() + + // Additionally discover the full TensorOps surface by walking the interface + // and any `@Backend`-tagged implementors visible in this compilation unit. + // This scales coverage beyond the hand-annotated symbols and makes the + // backend matrix track ground truth instead of annotation drift. + val interfaceOps = discoverTensorOpsSurface(resolver) + + // Prefer interface-scan for the TensorOps operator; keep annotation-derived + // operators (like synthetic Similarity from @DslOp) untouched. + val interfaceNames = interfaceOps.map { it.name }.toSet() + val operatorDocs = interfaceOps + annotationOps.filter { it.name !in interfaceNames } + + if (operatorDocs.isEmpty()) { + logger.info("No operators discovered (no annotations and no TensorOps visible)") + return emptyList() + } // Create the module documentation val module = OperatorDocModule( @@ -99,10 +115,73 @@ class OperatorDocProcessor( // Generate JSON output generateJsonOutput(module) + alreadyGenerated = true return emptyList() // No symbols need further processing } + /** + * Walk the `TensorOps` interface and every `@Backend`-annotated class + * visible in this compilation unit to produce a single `OperatorDoc` + * covering the full op surface. Each function's `statusByBackend` maps + * every visible backend id to `implemented` when that backend's class + * declares an override of the method, or `inherited` otherwise. + * + * Returns an empty list when `TensorOps` is not on the compilation + * classpath — non-`skainet-lang-core` modules simply fall back to the + * annotation-driven path. + */ + private fun discoverTensorOpsSurface(resolver: Resolver): List { + val tensorOpsName = resolver.getKSNameFromString("sk.ainet.lang.tensor.ops.TensorOps") + val tensorOps = resolver.getClassDeclarationByName(tensorOpsName) ?: return emptyList() + + val backendClasses: List> = resolver + .getSymbolsWithAnnotation("sk.ainet.lang.ops.Backend") + .filterIsInstance() + .mapNotNull { cls -> + val ann = cls.annotations.find { it.shortName.asString() == "Backend" } ?: return@mapNotNull null + val id = ann.arguments.find { it.name?.asString() == "id" }?.value?.toString() + ?: return@mapNotNull null + id to cls + } + .toList() + + logger.info("Discovered ${backendClasses.size} @Backend classes for TensorOps surface scan") + + // Interface methods only — skip default implementations authored on + // the interface (they still show up here but their status defaults + // to "inherited" for every backend unless overridden). + val interfaceFunctions = tensorOps.getDeclaredFunctions().toList() + + val functionDocs = interfaceFunctions.map { fn -> + val statusByBackend = mutableMapOf() + for ((backendId, backendClass) in backendClasses) { + val overrides = backendClass.getAllFunctions().any { candidate -> + candidate.simpleName.asString() == fn.simpleName.asString() && + candidate.findOverridee()?.simpleName?.asString() == fn.simpleName.asString() + } + statusByBackend[backendId] = if (overrides) "implemented" else "inherited" + } + FunctionDoc( + name = fn.simpleName.asString(), + signature = fn.toSignatureString(), + parameters = extractParameters(fn), + returnType = extractReturnType(fn), + statusByBackend = statusByBackend, + notes = emptyList() + ) + } + + return listOf( + OperatorDoc( + name = "TensorOps", + packageName = tensorOps.packageName.asString(), + modality = "core", + functions = functionDocs + ) + ) + } + private fun groupSymbolsByOperator(symbols: List): List { return symbols .groupBy { symbol -> @@ -386,7 +465,7 @@ class OperatorDocProcessor( logger.info("Generated operators.json with ${module.operators.size} operators") } catch (e: Exception) { - logger.error("Failed to generate JSON output: ${e.message}") + logger.error("Failed to generate JSON output: ${e::class.simpleName}: ${e.message}") } } }