Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 11 additions & 4 deletions cmd/epp/runner/runner.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,7 +245,7 @@ func (r *Runner) Run(ctx context.Context) error {
}

// --- Setup Datastore ---
epf, err := r.setupMetricsCollection(setupLog, r.featureGates[datalayer.FeatureGate])
epf, err := r.setupMetricsCollection(setupLog, r.featureGates[datalayer.ExperimentalDatalayerFeatureGate])
if err != nil {
return err
}
Expand Down Expand Up @@ -387,7 +387,7 @@ func (r *Runner) Run(ctx context.Context) error {
MetricsStalenessThreshold: *metricsStalenessThreshold,
Director: director,
SaturationDetector: saturationDetector,
UseExperimentalDatalayerV2: r.featureGates[datalayer.FeatureGate], // pluggable data layer feature flag
UseExperimentalDatalayerV2: r.featureGates[datalayer.ExperimentalDatalayerFeatureGate], // pluggable data layer feature flag
}
if err := serverRunner.SetupWithManager(ctx, mgr); err != nil {
setupLog.Error(err, "Failed to setup EPP controllers")
Expand Down Expand Up @@ -478,8 +478,9 @@ func (r *Runner) parseConfigurationPhaseOne(ctx context.Context) (*configapi.End
}
}

loader.RegisterFeatureGate(datalayer.FeatureGate)
loader.RegisterFeatureGate(datalayer.ExperimentalDatalayerFeatureGate)
loader.RegisterFeatureGate(flowcontrol.FeatureGate)
loader.RegisterFeatureGate(datalayer.PrepareDataPluginsFeatureGate)

r.registerInTreePlugins()

Expand Down Expand Up @@ -519,10 +520,16 @@ func (r *Runner) parseConfigurationPhaseTwo(ctx context.Context, rawConfig *conf

// Add requestControl plugins
r.requestControlConfig.AddPlugins(handle.GetAllPlugins()...)

// Sort prepare data plugins in DAG order (topological sort). Also check prepare data plugins for cycles.
if r.requestControlConfig.PrepareDataPluginGraph() != nil {
return nil, errors.New("failed to load the configuration - prepare data plugins have cyclic dependencies")
}
// TODO(#1970): Remove feature gate check once prepare data plugins are stable.
if !r.featureGates[datalayer.PrepareDataPluginsFeatureGate] {
// If the feature gate is disabled, clear any prepare data plugins so they are not used.
r.requestControlConfig.WithPrepareDataPlugins()
}

// Handler deprecated configuration options
r.deprecatedConfigurationHelper(cfg, logger)
Expand All @@ -544,7 +551,7 @@ func (r *Runner) deprecatedConfigurationHelper(cfg *config.Config, logger logr.L

if _, ok := os.LookupEnv(enableExperimentalDatalayerV2); ok {
logger.Info("Enabling the experimental Data Layer V2 using environment variables is deprecated and will be removed in next version")
r.featureGates[datalayer.FeatureGate] = env.GetEnvBool(enableExperimentalDatalayerV2, false, logger)
r.featureGates[datalayer.ExperimentalDatalayerFeatureGate] = env.GetEnvBool(enableExperimentalDatalayerV2, false, logger)
}
if _, ok := os.LookupEnv(enableExperimentalFlowControlLayer); ok {
logger.Info("Enabling the experimental Flow Control layer using environment variables is deprecated and will be removed in next version")
Expand Down
2 changes: 1 addition & 1 deletion pkg/epp/config/loader/configloader.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func InstantiateAndConfigure(
}

featureGates := loadFeatureConfig(rawConfig.FeatureGates)
dataConfig, err := buildDataLayerConfig(rawConfig.Data, featureGates[datalayer.FeatureGate], handle)
dataConfig, err := buildDataLayerConfig(rawConfig.Data, featureGates[datalayer.ExperimentalDatalayerFeatureGate], handle)
if err != nil {
return nil, fmt.Errorf("data layer config build failed: %w", err)
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/epp/config/loader/configloader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func TestLoadRawConfiguration(t *testing.T) {
t.Parallel()

// Register known feature gates for validation.
RegisterFeatureGate(datalayer.FeatureGate)
RegisterFeatureGate(datalayer.ExperimentalDatalayerFeatureGate)

tests := []struct {
name string
Expand Down Expand Up @@ -90,7 +90,7 @@ func TestLoadRawConfiguration(t *testing.T) {
},
},
},
FeatureGates: configapi.FeatureGates{datalayer.FeatureGate},
FeatureGates: configapi.FeatureGates{datalayer.ExperimentalDatalayerFeatureGate},
SaturationDetector: &configapi.SaturationDetector{
QueueDepthThreshold: 10,
KVCacheUtilThreshold: 0.8,
Expand Down Expand Up @@ -150,7 +150,7 @@ func TestInstantiateAndConfigure(t *testing.T) {
// Not parallel because it modifies global plugin registry.
registerTestPlugins(t)

RegisterFeatureGate(datalayer.FeatureGate)
RegisterFeatureGate(datalayer.ExperimentalDatalayerFeatureGate)

tests := []struct {
name string
Expand Down
3 changes: 2 additions & 1 deletion pkg/epp/datalayer/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@ import (
)

const (
FeatureGate = "dataLayer"
ExperimentalDatalayerFeatureGate = "dataLayer"
PrepareDataPluginsFeatureGate = "prepareDataPlugins"
)

// PoolInfo represents the DataStore information needed for endpoints.
Expand Down
52 changes: 52 additions & 0 deletions pkg/epp/datalayer/plugins/approximateprefix/data_types.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
/*
Copyright 2025 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package approximateprefix

import (
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/datalayer"
)

const (
PrefixCacheMatchInfoKey = "PrefixCacheMatchInfoKey"
)

type PrefixCacheMatchInfo struct {
matchLength int
totalBlocks int
}

func NewPrefixCacheMatchInfo(matchLen int, blockHashLen int) *PrefixCacheMatchInfo {
return &PrefixCacheMatchInfo{
matchLength: matchLen,
totalBlocks: blockHashLen,
}
}

func (p *PrefixCacheMatchInfo) MatchLength() int {
return p.matchLength
}

func (p *PrefixCacheMatchInfo) TotalLength() int {
return p.totalBlocks
}

func (p *PrefixCacheMatchInfo) Clone() datalayer.Cloneable {
return &PrefixCacheMatchInfo{
matchLength: p.matchLength,
totalBlocks: p.totalBlocks,
}
}
15 changes: 9 additions & 6 deletions pkg/epp/requestcontrol/dag.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ import (

// buildDAG builds a dependency graph among data preparation plugins based on their
// produced and consumed data keys.
func buildDAG(plugins []PrepareDataPlugin) map[string][]string {
func buildDAG(plugins []PrepareDataPlugin) (map[string][]string, error) {
dag := make(map[string][]string)
for _, plugin := range plugins {
dag[plugin.TypedName().String()] = []string{}
Expand All @@ -36,11 +36,14 @@ func buildDAG(plugins []PrepareDataPlugin) map[string][]string {
}
// Check whether plugin[i] produces something consumed by plugin[j]. In that case, j depends on i.
if plugins[i].Produces() != nil && plugins[j].Consumes() != nil {
// For all the keys produced by plugin i, check if plugin j consumes any of them.
// If yes, then j depends on i.
for producedKey := range plugins[i].Produces() {
for producedKey, producedData := range plugins[i].Produces() {
// If plugin j consumes the produced key, then j depends on i. We can break after the first match.
if _, ok := plugins[j].Consumes()[producedKey]; ok {
if consumedData, ok := plugins[j].Consumes()[producedKey]; ok {
// Check types are same. Reflection is avoided here for simplicity.
// TODO(#1985): Document this detail in IGW docs.
if producedData != consumedData {
return nil, errors.New("data type mismatch between produced and consumed data for key: " + producedKey)
}
iPluginName := plugins[i].TypedName().String()
jPluginName := plugins[j].TypedName().String()
dag[jPluginName] = append(dag[jPluginName], iPluginName)
Expand All @@ -50,7 +53,7 @@ func buildDAG(plugins []PrepareDataPlugin) map[string][]string {
}
}
}
return dag
return dag, nil
}

// sortPlugins builds the dependency graph and returns the plugins ordered in topological order.
Expand Down
18 changes: 17 additions & 1 deletion pkg/epp/requestcontrol/dag_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,10 @@ func TestPrepareDataGraph(t *testing.T) {
pluginX := &mockPrepareRequestDataP{name: "X", produces: map[string]any{"keyX": nil}, consumes: map[string]any{"keyY": nil}}
pluginY := &mockPrepareRequestDataP{name: "Y", produces: map[string]any{"keyY": nil}, consumes: map[string]any{"keyX": nil}}

// Data type mismatch plugin.
pluginZ1 := &mockPrepareRequestDataP{name: "Z1", produces: map[string]any{"keyZ": int(0)}}
pluginZ2 := &mockPrepareRequestDataP{name: "Z2", consumes: map[string]any{"keyZ": string("")}}

testCases := []struct {
name string
plugins []PrepareDataPlugin
Expand Down Expand Up @@ -109,11 +113,23 @@ func TestPrepareDataGraph(t *testing.T) {
expectedDAG: nil,
expectError: true,
},
{
name: "Data type mismatch between produced and consumed data",
plugins: []PrepareDataPlugin{pluginZ1, pluginZ2},
expectedDAG: nil,
expectError: true,
},
}

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
dag := buildDAG(tc.plugins)
dag, err := buildDAG(tc.plugins)
if err != nil {
if tc.expectError {
assert.Error(t, err)
return
}
}
orderedPlugins, err := sortPlugins(dag, tc.plugins)

if tc.expectError {
Expand Down
3 changes: 3 additions & 0 deletions pkg/epp/requestcontrol/director.go
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,9 @@ func (d *Director) runPreRequestPlugins(ctx context.Context, request *scheduling

func (d *Director) runPrepareDataPlugins(ctx context.Context,
request *schedulingtypes.LLMRequest, pods []schedulingtypes.Pod) error {
if len(d.requestControlPlugins.prepareDataPlugins) == 0 {
return nil
}
return prepareDataPluginsWithTimeout(prepareDataTimeout, d.requestControlPlugins.prepareDataPlugins, ctx, request, pods)
}

Expand Down
9 changes: 8 additions & 1 deletion pkg/epp/requestcontrol/request_control_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,14 @@ func (c *Config) AddPlugins(pluginObjects ...plugins.Plugin) {
// PrepareDataPluginGraph creates data dependency graph and sorts the plugins in topological order.
// If a cycle is detected, it returns an error.
func (c *Config) PrepareDataPluginGraph() error {
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we create an issue that we should create a dag, even when the prepare data plugins are not in use? we want to make sure that all data consumers have a corresponding consumer, and they are not cyclical. It's not scoped to just prepare data

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I updated the code to do the validation regardless of feature flag. Its safe to do that. Thanks!

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The DAG should be built, completely agnostic of if any PrepareData plugins are in use. Will add suggestions to help clarify what I mean

Copy link
Contributor Author

@rahulgurnani rahulgurnani Dec 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ohh. I understand. I think that's a bigger change. I want to address it in a separate PR since, as a fast follow. For this PR, lets scope the DAG to only prepare data plugins. Added a TODO for now. Thanks!

dag := buildDAG(c.prepareDataPlugins)
// TODO(#1988): Add all producer and consumer plugins to the graph.
if len(c.prepareDataPlugins) == 0 {
return nil
}
dag, err := buildDAG(c.prepareDataPlugins)
if err != nil {
return err
}
plugins, err := sortPlugins(dag, c.prepareDataPlugins)
if err != nil {
return err
Expand Down
25 changes: 25 additions & 0 deletions pkg/epp/scheduling/framework/plugins/multi/prefix/plugin.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
k8stypes "k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/log"

"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/datalayer/plugins/approximateprefix"
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/metrics"
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/plugins"
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/requestcontrol"
Expand Down Expand Up @@ -206,6 +207,30 @@ func (p *Plugin) WithName(name string) *Plugin {
return p
}

func (p *Plugin) Produces() map[string]any {
return map[string]any{approximateprefix.PrefixCacheMatchInfoKey: approximateprefix.PrefixCacheMatchInfo{}}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Others Looks good, just one question for my understanding. what's the Produces() used for?

here it does not populate any data in PrefixCacheMatchInfo{}. It's only used for constructing the DAG. Are we planning to populate PrefixCacheMatchInfo in produces? And when/how should we populate it?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We do it in PrepareRequestData. Produces is used for DAG validation on EPP startup.

Refer: https://docs.google.com/document/d/1EQwXL2pCuUyM1B917FUgP_8pFS3VF8F_bUfjy8IE7gM/edit?tab=t.vmaefhinvkl5#heading=h.s9mr6kynb3ls for more context. Thanks!

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, Produces output a map[string]any. I get the idea of DAG validation of using the key of the output map. I'm wondering what's the usage of the value here. Currently you put a placeholder approximateprefix.PrefixCacheMatchInfo{}. I'm wondering if it will be populated in the future.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if the Produces output type is meant to be a set, should we change it to a map[string]struct{} or map[string]bool which is more go idiomatic? @rahulgurnani @kfswain

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@zetxqx as far as I understand the validation is not only for key existence, but also that the producer output type correlates to the type the consumer wants.

the way it’s done here is by setting an empty struct, and then the validation code can use reflection (or alike) to validate the types match

Copy link
Collaborator

@kfswain kfswain Dec 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@zetxqx as far as I understand the validation is not only for key existence, but also that the producer output type correlates to the type the consumer wants.

This is correct, the value is used in reflection on startup to ensure that they value of the key type is expected. Allowing for confidence in type usage, even in an environment where only out of tree plugins are used. So using a set would not allow for a fully reliable dag (a key name may be used with an unexpected type, or force reflection to happen on every plugin call)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see, thank you all. Non-blocking for this PR, just two points from my understanding of the current code path:

  1. Currently the DAG construction is only checking the Key here:
    for producedKey := range plugins[i].Produces() {
    // If plugin j consumes the produced key, then j depends on i. We can break after the first match.
    if _, ok := plugins[j].Consumes()[producedKey]; ok {
  2. if we only want to do pure type checking not information checking in the type. Should we consider Produces output map[string]reflect.Type or map[reflect.Type]struct{}

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. That should be fixed, the DAG needs more work, I have other comments in this PR suggesting as such.
  2. You could , it wouldn't really make a material difference other than making the implementers call reflect.TypeOf(pluginProducingType). We will do that all under the hood in the DAG checker anyway. Just depends on what the preferred UX is

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I added the validation in this PR itself. Please take another look at the last commit. I think data check is simpler than using reflection. We could document this behavior. Thanks!

}

func (p *Plugin) Consumes() map[string]any {
return map[string]any{}
}

// PrepareRequestData hashes prompt, finds longest prefix match and stores it in pod as attribute.
func (p *Plugin) PrepareRequestData(ctx context.Context, request *types.LLMRequest, pods []types.Pod) error {
hashes := hashPrompt(ctx, request, getBlockSize(pods, p.config), p.config.MaxPrefixBlocksToMatch)
state := &SchedulingContextState{
PrefixHashes: hashes,
PrefixCacheServers: p.matchLongestPrefix(ctx, hashes),
}
total := len(state.PrefixHashes)

for _, pod := range pods {
matchLen := state.PrefixCacheServers[ServerID(pod.GetPod().NamespacedName)]
pod.Put(approximateprefix.PrefixCacheMatchInfoKey, approximateprefix.NewPrefixCacheMatchInfo(matchLen, total))
}
return nil
}

// Score returns the scoring result for the given list of pods based on context.
func (p *Plugin) Score(ctx context.Context, cycleState *types.CycleState, request *types.LLMRequest, pods []types.Pod) map[types.Pod]float64 {
// pre score step, hashing prompt and find longest prefix match.
Expand Down
67 changes: 67 additions & 0 deletions pkg/epp/scheduling/framework/plugins/multi/prefix/plugin_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,16 @@ import (

"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/backend"
backendmetrics "sigs.k8s.io/gateway-api-inference-extension/pkg/epp/backend/metrics"
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/datalayer"
dplugins "sigs.k8s.io/gateway-api-inference-extension/pkg/epp/datalayer/plugins/approximateprefix"
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/plugins"
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/requestcontrol"
"sigs.k8s.io/gateway-api-inference-extension/pkg/epp/scheduling/types"
)

// static check to ensure Plugin implements the PrepareDataPlugin interface.
var _ requestcontrol.PrepareDataPlugin = &Plugin{}

func TestPrefixPluginCompletion(t *testing.T) {
config := Config{
BlockSize: 4,
Expand Down Expand Up @@ -571,6 +577,67 @@ func randomPrompt(n int) string {
return sb.String()
}

func TestPrepareRequestData(t *testing.T) {
config := Config{
BlockSize: 4,
MaxPrefixBlocksToMatch: DefaultMaxPrefixBlocks,
LRUCapacityPerServer: DefaultLRUCapacityPerServer,
}
plugin := New(context.Background(), config)

pod1 := &types.PodMetrics{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod1"}}, MetricsState: backendmetrics.NewMetricsState(), AttributeMap: datalayer.NewAttributes()}
pod2 := &types.PodMetrics{Pod: &backend.Pod{NamespacedName: k8stypes.NamespacedName{Name: "pod2"}}, MetricsState: backendmetrics.NewMetricsState(), AttributeMap: datalayer.NewAttributes()}
pods := []types.Pod{pod1, pod2}

// First request to populate cache.
req1 := &types.LLMRequest{
RequestId: uuid.NewString(),
TargetModel: "test-model1",
Body: &types.LLMRequestBody{
Completions: &types.CompletionsRequest{
Prompt: "aaaabbbb",
},
},
}
_ = plugin.Score(context.Background(), types.NewCycleState(), req1, pods)
schedulingResult := &types.SchedulingResult{
PrimaryProfileName: "default",
ProfileResults: map[string]*types.ProfileRunResult{
"default": {TargetPods: []types.Pod{pod1}},
},
}
plugin.PreRequest(context.Background(), req1, schedulingResult)
plugin.wg.Wait()

// Second request that shares a prefix.
req2 := &types.LLMRequest{
RequestId: uuid.NewString(),
TargetModel: "test-model1",
Body: &types.LLMRequestBody{
Completions: &types.CompletionsRequest{
Prompt: "aaaacccc",
},
},
}

err := plugin.PrepareRequestData(context.Background(), req2, pods)
assert.NoError(t, err)

// Verify pod1 has the correct prefix match info
info1, ok := pod1.Get(dplugins.PrefixCacheMatchInfoKey)
assert.True(t, ok)
prefixInfo1 := info1.(*dplugins.PrefixCacheMatchInfo)
assert.Equal(t, 1, prefixInfo1.MatchLength()) // "aaaa" matches
assert.Equal(t, 2, prefixInfo1.TotalLength()) // "aaaacccc" -> 2 blocks

// Verify pod2 has no match info
info2, ok := pod2.Get(dplugins.PrefixCacheMatchInfoKey)
assert.True(t, ok)
prefixInfo2 := info2.(*dplugins.PrefixCacheMatchInfo)
assert.Equal(t, 0, prefixInfo2.MatchLength()) // No match for pod2
assert.Equal(t, 2, prefixInfo2.TotalLength())
}

// BenchmarkPrefixPluginChatCompletionsStress is a stress test for chat completions with varying message counts and lengths
func BenchmarkPrefixPluginChatCompletionsStress(b *testing.B) {
blockSize := 8
Expand Down