diff --git a/api/proto/externalevents/v1/external_events.pb.go b/api/proto/externalevents/v1/external_events.pb.go new file mode 100644 index 0000000..466dc8a --- /dev/null +++ b/api/proto/externalevents/v1/external_events.pb.go @@ -0,0 +1,260 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc v5.29.3 +// source: api/proto/externalevents/v1/external_events.proto + +package externaleventsv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type WatchRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + ConfigJson string `protobuf:"bytes,2,opt,name=config_json,json=configJson,proto3" json:"config_json,omitempty"` + Hints []string `protobuf:"bytes,3,rep,name=hints,proto3" json:"hints,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WatchRequest) Reset() { + *x = WatchRequest{} + mi := &file_api_proto_externalevents_v1_external_events_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WatchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchRequest) ProtoMessage() {} + +func (x *WatchRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_externalevents_v1_external_events_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchRequest.ProtoReflect.Descriptor instead. +func (*WatchRequest) Descriptor() ([]byte, []int) { + return file_api_proto_externalevents_v1_external_events_proto_rawDescGZIP(), []int{0} +} + +func (x *WatchRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *WatchRequest) GetConfigJson() string { + if x != nil { + return x.ConfigJson + } + return "" +} + +func (x *WatchRequest) GetHints() []string { + if x != nil { + return x.Hints + } + return nil +} + +type EventMessage struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` + Uid string `protobuf:"bytes,6,opt,name=uid,proto3" json:"uid,omitempty"` + Metadata map[string]string `protobuf:"bytes,7,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TimestampUnix int64 `protobuf:"varint,8,opt,name=timestamp_unix,json=timestampUnix,proto3" json:"timestamp_unix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EventMessage) Reset() { + *x = EventMessage{} + mi := &file_api_proto_externalevents_v1_external_events_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EventMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventMessage) ProtoMessage() {} + +func (x *EventMessage) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_externalevents_v1_external_events_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventMessage.ProtoReflect.Descriptor instead. +func (*EventMessage) Descriptor() ([]byte, []int) { + return file_api_proto_externalevents_v1_external_events_proto_rawDescGZIP(), []int{1} +} + +func (x *EventMessage) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EventMessage) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *EventMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *EventMessage) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *EventMessage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *EventMessage) GetUid() string { + if x != nil { + return x.Uid + } + return "" +} + +func (x *EventMessage) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *EventMessage) GetTimestampUnix() int64 { + if x != nil { + return x.TimestampUnix + } + return 0 +} + +var File_api_proto_externalevents_v1_external_events_proto protoreflect.FileDescriptor + +const file_api_proto_externalevents_v1_external_events_proto_rawDesc = "" + + "\n" + + "1api/proto/externalevents/v1/external_events.proto\x12\x11externalevents.v1\"c\n" + + "\fWatchRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x1f\n" + + "\vconfig_json\x18\x02 \x01(\tR\n" + + "configJson\x12\x14\n" + + "\x05hints\x18\x03 \x03(\tR\x05hints\"\xd8\x02\n" + + "\fEventMessage\x12\x12\n" + + "\x04type\x18\x01 \x01(\tR\x04type\x12#\n" + + "\rresource_name\x18\x02 \x01(\tR\fresourceName\x12\x1c\n" + + "\tnamespace\x18\x03 \x01(\tR\tnamespace\x12\x16\n" + + "\x06reason\x18\x04 \x01(\tR\x06reason\x12\x18\n" + + "\amessage\x18\x05 \x01(\tR\amessage\x12\x10\n" + + "\x03uid\x18\x06 \x01(\tR\x03uid\x12I\n" + + "\bmetadata\x18\a \x03(\v2-.externalevents.v1.EventMessage.MetadataEntryR\bmetadata\x12%\n" + + "\x0etimestamp_unix\x18\b \x01(\x03R\rtimestampUnix\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x012i\n" + + "\x13ExternalEventSource\x12R\n" + + "\fStreamEvents\x12\x1f.externalevents.v1.WatchRequest\x1a\x1f.externalevents.v1.EventMessage0\x01BJZHgithub.com/kagent-dev/khook/api/proto/externalevents/v1;externaleventsv1b\x06proto3" + +var ( + file_api_proto_externalevents_v1_external_events_proto_rawDescOnce sync.Once + file_api_proto_externalevents_v1_external_events_proto_rawDescData []byte +) + +func file_api_proto_externalevents_v1_external_events_proto_rawDescGZIP() []byte { + file_api_proto_externalevents_v1_external_events_proto_rawDescOnce.Do(func() { + file_api_proto_externalevents_v1_external_events_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_api_proto_externalevents_v1_external_events_proto_rawDesc), len(file_api_proto_externalevents_v1_external_events_proto_rawDesc))) + }) + return file_api_proto_externalevents_v1_external_events_proto_rawDescData +} + +var file_api_proto_externalevents_v1_external_events_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_api_proto_externalevents_v1_external_events_proto_goTypes = []any{ + (*WatchRequest)(nil), // 0: externalevents.v1.WatchRequest + (*EventMessage)(nil), // 1: externalevents.v1.EventMessage + nil, // 2: externalevents.v1.EventMessage.MetadataEntry +} +var file_api_proto_externalevents_v1_external_events_proto_depIdxs = []int32{ + 2, // 0: externalevents.v1.EventMessage.metadata:type_name -> externalevents.v1.EventMessage.MetadataEntry + 0, // 1: externalevents.v1.ExternalEventSource.StreamEvents:input_type -> externalevents.v1.WatchRequest + 1, // 2: externalevents.v1.ExternalEventSource.StreamEvents:output_type -> externalevents.v1.EventMessage + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_api_proto_externalevents_v1_external_events_proto_init() } +func file_api_proto_externalevents_v1_external_events_proto_init() { + if File_api_proto_externalevents_v1_external_events_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_proto_externalevents_v1_external_events_proto_rawDesc), len(file_api_proto_externalevents_v1_external_events_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_api_proto_externalevents_v1_external_events_proto_goTypes, + DependencyIndexes: file_api_proto_externalevents_v1_external_events_proto_depIdxs, + MessageInfos: file_api_proto_externalevents_v1_external_events_proto_msgTypes, + }.Build() + File_api_proto_externalevents_v1_external_events_proto = out.File + file_api_proto_externalevents_v1_external_events_proto_goTypes = nil + file_api_proto_externalevents_v1_external_events_proto_depIdxs = nil +} diff --git a/api/proto/externalevents/v1/external_events.proto b/api/proto/externalevents/v1/external_events.proto new file mode 100644 index 0000000..e609183 --- /dev/null +++ b/api/proto/externalevents/v1/external_events.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package externalevents.v1; + +option go_package = "github.com/kagent-dev/khook/api/proto/externalevents/v1;externaleventsv1"; + +message WatchRequest { + string namespace = 1; + string config_json = 2; + repeated string hints = 3; +} + +message EventMessage { + string type = 1; + string resource_name = 2; + string namespace = 3; + string reason = 4; + string message = 5; + string uid = 6; + map metadata = 7; + int64 timestamp_unix = 8; +} + +service ExternalEventSource { + rpc StreamEvents(WatchRequest) returns (stream EventMessage); +} diff --git a/api/proto/externalevents/v1/external_events_grpc.pb.go b/api/proto/externalevents/v1/external_events_grpc.pb.go new file mode 100644 index 0000000..4f1d1ac --- /dev/null +++ b/api/proto/externalevents/v1/external_events_grpc.pb.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.29.3 +// source: api/proto/externalevents/v1/external_events.proto + +package externaleventsv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + ExternalEventSource_StreamEvents_FullMethodName = "/externalevents.v1.ExternalEventSource/StreamEvents" +) + +// ExternalEventSourceClient is the client API for ExternalEventSource service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ExternalEventSourceClient interface { + StreamEvents(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[EventMessage], error) +} + +type externalEventSourceClient struct { + cc grpc.ClientConnInterface +} + +func NewExternalEventSourceClient(cc grpc.ClientConnInterface) ExternalEventSourceClient { + return &externalEventSourceClient{cc} +} + +func (c *externalEventSourceClient) StreamEvents(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[EventMessage], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ExternalEventSource_ServiceDesc.Streams[0], ExternalEventSource_StreamEvents_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[WatchRequest, EventMessage]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExternalEventSource_StreamEventsClient = grpc.ServerStreamingClient[EventMessage] + +// ExternalEventSourceServer is the server API for ExternalEventSource service. +// All implementations must embed UnimplementedExternalEventSourceServer +// for forward compatibility. +type ExternalEventSourceServer interface { + StreamEvents(*WatchRequest, grpc.ServerStreamingServer[EventMessage]) error + mustEmbedUnimplementedExternalEventSourceServer() +} + +// UnimplementedExternalEventSourceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedExternalEventSourceServer struct{} + +func (UnimplementedExternalEventSourceServer) StreamEvents(*WatchRequest, grpc.ServerStreamingServer[EventMessage]) error { + return status.Errorf(codes.Unimplemented, "method StreamEvents not implemented") +} +func (UnimplementedExternalEventSourceServer) mustEmbedUnimplementedExternalEventSourceServer() {} +func (UnimplementedExternalEventSourceServer) testEmbeddedByValue() {} + +// UnsafeExternalEventSourceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ExternalEventSourceServer will +// result in compilation errors. +type UnsafeExternalEventSourceServer interface { + mustEmbedUnimplementedExternalEventSourceServer() +} + +func RegisterExternalEventSourceServer(s grpc.ServiceRegistrar, srv ExternalEventSourceServer) { + // If the following call pancis, it indicates UnimplementedExternalEventSourceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&ExternalEventSource_ServiceDesc, srv) +} + +func _ExternalEventSource_StreamEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ExternalEventSourceServer).StreamEvents(m, &grpc.GenericServerStream[WatchRequest, EventMessage]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExternalEventSource_StreamEventsServer = grpc.ServerStreamingServer[EventMessage] + +// ExternalEventSource_ServiceDesc is the grpc.ServiceDesc for ExternalEventSource service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ExternalEventSource_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "externalevents.v1.ExternalEventSource", + HandlerType: (*ExternalEventSourceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamEvents", + Handler: _ExternalEventSource_StreamEvents_Handler, + ServerStreams: true, + }, + }, + Metadata: "api/proto/externalevents/v1/external_events.proto", +} diff --git a/api/v1alpha2/external_event_source_types.go b/api/v1alpha2/external_event_source_types.go new file mode 100644 index 0000000..ce3325c --- /dev/null +++ b/api/v1alpha2/external_event_source_types.go @@ -0,0 +1,231 @@ +package v1alpha2 + +import ( + "context" + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// ExternalEventSourceSpec defines connection details for an external gRPC provider +type ExternalEventSourceSpec struct { + Endpoint ExternalEndpoint `json:"endpoint"` + Config *ExternalConfig `json:"config,omitempty"` + Connection *ExternalConnPolicy `json:"connection,omitempty"` +} + +// ExternalEndpoint contains address, TLS, and auth configuration +type ExternalEndpoint struct { + Address string `json:"address"` + TLS *ExternalTLS `json:"tls,omitempty"` + Auth *ExternalAuth `json:"auth,omitempty"` +} + +// ExternalTLS holds TLS/mTLS settings, referencing a single Secret with CA, cert, and key +type ExternalTLS struct { + Enabled bool `json:"enabled"` + SecretRef *ExternalTLSSecretRef `json:"secretRef,omitempty"` +} + +// ExternalTLSSecretRef points to a Secret that contains CA, client cert and key +type ExternalTLSSecretRef struct { + Name string `json:"name"` + CAKey string `json:"caKey"` + CertKey string `json:"certKey"` + KeyKey string `json:"keyKey"` +} + +// ExternalAuth defines optional per-connection auth +type ExternalAuth struct { + Type string `json:"type"` // BearerToken|Basic + SecretRef *SecretKeySelector `json:"secretRef,omitempty"` +} + +// SecretKeySelector references a key in a Secret +type SecretKeySelector struct { + Name string `json:"name"` + Key string `json:"key"` +} + +// ExternalConfig is an opaque JSON string passed-through to the provider +type ExternalConfig struct { + JSON string `json:"json"` +} + +// ExternalConnPolicy defines connection behavior +type ExternalConnPolicy struct { + NamespaceScope bool `json:"namespaceScope,omitempty"` + ReconnectBackoff *ReconnectBackoff `json:"reconnectBackoff,omitempty"` +} + +type ReconnectBackoff struct { + InitialSeconds int32 `json:"initialSeconds,omitempty"` + MaxSeconds int32 `json:"maxSeconds,omitempty"` +} + +// ExternalEventSourceStatus captures readiness and connection info +type ExternalEventSourceStatus struct { + Conditions []metav1.Condition `json:"conditions,omitempty"` + LastConnectedTime *metav1.Time `json:"lastConnectedTime,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// ExternalEventSource configures an external gRPC-based event source +type ExternalEventSource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ExternalEventSourceSpec `json:"spec,omitempty"` + Status ExternalEventSourceStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ExternalEventSourceList contains a list of ExternalEventSource +type ExternalEventSourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ExternalEventSource `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ExternalEventSource{}, &ExternalEventSourceList{}) +} + +// ValidateCreate implements webhook.Validator +func (r *ExternalEventSource) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + res, ok := obj.(*ExternalEventSource) + if !ok { + return nil, fmt.Errorf("expected ExternalEventSource, got %T", obj) + } + return validateExternalEventSource(res) +} + +// ValidateUpdate implements webhook.Validator +func (r *ExternalEventSource) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { + res, ok := newObj.(*ExternalEventSource) + if !ok { + return nil, fmt.Errorf("expected ExternalEventSource, got %T", newObj) + } + return validateExternalEventSource(res) +} + +// ValidateDelete implements webhook.Validator +func (r *ExternalEventSource) ValidateDelete(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { + return nil, nil +} + +func validateExternalEventSource(res *ExternalEventSource) (admission.Warnings, error) { + var allErrs []string + var warnings admission.Warnings + + // Address validation + addr := strings.TrimSpace(res.Spec.Endpoint.Address) + if addr == "" { + allErrs = append(allErrs, "spec.endpoint.address cannot be empty") + } + + // TLS validation + if res.Spec.Endpoint.TLS != nil { + if res.Spec.Endpoint.TLS.Enabled { + if res.Spec.Endpoint.TLS.SecretRef == nil { + allErrs = append(allErrs, "spec.endpoint.tls.secretRef must be set when tls.enabled is true") + } else { + s := res.Spec.Endpoint.TLS.SecretRef + if strings.TrimSpace(s.Name) == "" || strings.TrimSpace(s.CAKey) == "" || strings.TrimSpace(s.CertKey) == "" || strings.TrimSpace(s.KeyKey) == "" { + allErrs = append(allErrs, "spec.endpoint.tls.secretRef.name, caKey, certKey, and keyKey must be set") + } + } + } + } + + // Auth validation + if res.Spec.Endpoint.Auth != nil { + t := strings.TrimSpace(res.Spec.Endpoint.Auth.Type) + if t != "" && t != "BearerToken" && t != "Basic" { + allErrs = append(allErrs, "spec.endpoint.auth.type must be BearerToken or Basic if set") + } + if res.Spec.Endpoint.Auth.SecretRef != nil { + if strings.TrimSpace(res.Spec.Endpoint.Auth.SecretRef.Name) == "" || strings.TrimSpace(res.Spec.Endpoint.Auth.SecretRef.Key) == "" { + allErrs = append(allErrs, "spec.endpoint.auth.secretRef.name and key must be set when auth.secretRef is provided") + } + } + } + + if len(allErrs) > 0 { + return warnings, fmt.Errorf("validation failed: %s", strings.Join(allErrs, "; ")) + } + return warnings, nil +} + +// DeepCopyInto copies the receiver, writing into out. in must be non-nil. +func (in *ExternalEventSource) DeepCopyInto(out *ExternalEventSource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + // Spec: shallow copy is acceptable for pointer fields here + out.Spec = in.Spec + // Status: deep copy slice and time pointer + if in.Status.Conditions != nil { + out.Status.Conditions = make([]metav1.Condition, len(in.Status.Conditions)) + copy(out.Status.Conditions, in.Status.Conditions) + } + if in.Status.LastConnectedTime != nil { + t := *in.Status.LastConnectedTime + out.Status.LastConnectedTime = &t + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEventSource. +func (in *ExternalEventSource) DeepCopy() *ExternalEventSource { + if in == nil { + return nil + } + out := new(ExternalEventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject copies the receiver, creating a new runtime.Object. +func (in *ExternalEventSource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto copies the receiver, writing into out. in must be non-nil. +func (in *ExternalEventSourceList) DeepCopyInto(out *ExternalEventSourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + out.Items = make([]ExternalEventSource, len(in.Items)) + for i := range in.Items { + in.Items[i].DeepCopyInto(&out.Items[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalEventSourceList. +func (in *ExternalEventSourceList) DeepCopy() *ExternalEventSourceList { + if in == nil { + return nil + } + out := new(ExternalEventSourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject copies the receiver, creating a new runtime.Object. +func (in *ExternalEventSourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/api/v1alpha2/hook_types.go b/api/v1alpha2/hook_types.go index 904356f..d07a33f 100644 --- a/api/v1alpha2/hook_types.go +++ b/api/v1alpha2/hook_types.go @@ -22,11 +22,52 @@ type HookSpec struct { EventConfigurations []EventConfiguration `json:"eventConfigurations"` } -// EventConfiguration defines a single event type configuration +// EventConfiguration defines either a single built-in event configuration +// or a grouped source configuration with multiple events. type EventConfiguration struct { + // Single built-in event form (Kubernetes events watcher) // EventType specifies the type of Kubernetes event to monitor // +kubebuilder:validation:Enum=pod-restart;pod-pending;oom-kill;probe-failed;node-not-ready + // +kubebuilder:validation:Optional + EventType string `json:"eventType,omitempty"` + + // AgentRef specifies the Kagent agent to call when this event occurs + // +kubebuilder:validation:Optional + AgentRef ObjectReference `json:"agentRef,omitempty"` + + // Prompt specifies the prompt template to send to the agent + // +kubebuilder:validation:Optional + // +kubebuilder:validation:MinLength=1 + Prompt string `json:"prompt,omitempty"` + + // Grouped source form (external or named source with multiple events) + // +kubebuilder:validation:Optional + Source *EventSourceEvents `json:"source,omitempty"` +} + +// EventSourceEvents groups multiple events for a specific source. The source +// is identified by name/namespace (ExternalEventSource), and contains a list +// of per-event configurations. +type EventSourceEvents struct { + // Name of the ExternalEventSource + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + Name string `json:"name"` + + // Namespace of the ExternalEventSource. Defaults to the Hook namespace if omitted. + // +kubebuilder:validation:Optional + Namespace *string `json:"namespace,omitempty"` + + // Events is the list of event configurations for this source + // +kubebuilder:validation:MinItems=1 + Events []SourceEvent `json:"events"` +} + +// SourceEvent describes a single event mapping for a grouped source +type SourceEvent struct { + // EventType specifies the event type to match // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 EventType string `json:"eventType"` // AgentRef specifies the Kagent agent to call when this event occurs @@ -82,7 +123,17 @@ func (h *Hook) Validate() error { // validateEventConfiguration validates a single event configuration func (h *Hook) validateEventConfiguration(config EventConfiguration, index int) error { - // Validate EventType + // Union validation: either single-event form OR grouped source form + hasSingle := strings.TrimSpace(config.EventType) != "" || strings.TrimSpace(config.Prompt) != "" || strings.TrimSpace(config.AgentRef.Name) != "" + hasGrouped := config.Source != nil + + if hasSingle && hasGrouped { + return fmt.Errorf("event configuration %d: cannot specify both single-event fields and source.events", index) + } + if !hasSingle && !hasGrouped { + return fmt.Errorf("event configuration %d: must specify either single-event fields or source.events", index) + } + validEventTypes := map[string]bool{ "pod-restart": true, "pod-pending": true, @@ -91,40 +142,68 @@ func (h *Hook) validateEventConfiguration(config EventConfiguration, index int) "node-not-ready": true, } - if !validEventTypes[config.EventType] { - return fmt.Errorf("event configuration %d: invalid event type '%s', must be one of: pod-restart, pod-pending, oom-kill, probe-failed, node-not-ready", index, config.EventType) - } - - // Validate AgentRef - if strings.TrimSpace(config.AgentRef.Name) == "" { - return fmt.Errorf("event configuration %d: agentRef.name cannot be empty", index) - } + if hasSingle { + if !validEventTypes[config.EventType] { + return fmt.Errorf("event configuration %d: invalid event type '%s', must be one of: pod-restart, pod-pending, oom-kill, probe-failed", index, config.EventType) + } - if len(config.AgentRef.Name) > 100 { - return fmt.Errorf("event configuration %d: agentId too long: %d characters (max 100)", index, len(config.AgentRef.Name)) - } + if strings.TrimSpace(config.AgentRef.Name) == "" { + return fmt.Errorf("event configuration %d: agentRef.name cannot be empty", index) + } + if len(config.AgentRef.Name) > 100 { + return fmt.Errorf("event configuration %d: agentId too long: %d characters (max 100)", index, len(config.AgentRef.Name)) + } + for _, r := range config.AgentRef.Name { + if !((r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '-' || r == '_') { + return fmt.Errorf("event configuration %d: agentId contains invalid character '%c', only alphanumeric, hyphens, and underscores allowed", index, r) + } + } - // Validate agent ID format (alphanumeric, hyphens, underscores only) - for _, r := range config.AgentRef.Name { - if !((r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '-' || r == '_') { - return fmt.Errorf("event configuration %d: agentId contains invalid character '%c', only alphanumeric, hyphens, and underscores allowed", index, r) + if strings.TrimSpace(config.Prompt) == "" { + return fmt.Errorf("event configuration %d: prompt cannot be empty", index) + } + if len(config.Prompt) > 10000 { + return fmt.Errorf("event configuration %d: prompt too long: %d characters (max 10000)", index, len(config.Prompt)) } + if err := h.validatePromptTemplate(config.Prompt, index); err != nil { + return err + } + return nil } - // Validate Prompt - if strings.TrimSpace(config.Prompt) == "" { - return fmt.Errorf("event configuration %d: prompt cannot be empty", index) + // Grouped source form + if strings.TrimSpace(config.Source.Name) == "" { + return fmt.Errorf("event configuration %d: source.name cannot be empty", index) } - - if len(config.Prompt) > 10000 { - return fmt.Errorf("event configuration %d: prompt too long: %d characters (max 10000)", index, len(config.Prompt)) + if len(config.Source.Events) == 0 { + return fmt.Errorf("event configuration %d: source.events cannot be empty", index) } - - // Validate template constructs - if err := h.validatePromptTemplate(config.Prompt, index); err != nil { - return err + for j, se := range config.Source.Events { + // External event sources can have arbitrary event types, just validate non-empty + if strings.TrimSpace(se.EventType) == "" { + return fmt.Errorf("event configuration %d source.events[%d]: eventType cannot be empty", index, j) + } + if strings.TrimSpace(se.AgentRef.Name) == "" { + return fmt.Errorf("event configuration %d source.events[%d]: agentRef.name cannot be empty", index, j) + } + if len(se.AgentRef.Name) > 100 { + return fmt.Errorf("event configuration %d source.events[%d]: agentId too long: %d characters (max 100)", index, j, len(se.AgentRef.Name)) + } + for _, r := range se.AgentRef.Name { + if !((r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '-' || r == '_') { + return fmt.Errorf("event configuration %d source.events[%d]: agentId contains invalid character '%c'", index, j, r) + } + } + if strings.TrimSpace(se.Prompt) == "" { + return fmt.Errorf("event configuration %d source.events[%d]: prompt cannot be empty", index, j) + } + if len(se.Prompt) > 10000 { + return fmt.Errorf("event configuration %d source.events[%d]: prompt too long: %d characters (max 10000)", index, j, len(se.Prompt)) + } + if err := h.validatePromptTemplate(se.Prompt, index); err != nil { + return err + } } - return nil } @@ -381,32 +460,40 @@ func validateHook(hook *Hook) (admission.Warnings, error) { } // Validate each event configuration - eventTypes := make(map[string]bool) + seenEventTypes := make(map[string]bool) for i, config := range hook.Spec.EventConfigurations { - // Check for duplicate event types - if eventTypes[config.EventType] { - allErrs = append(allErrs, fmt.Sprintf("spec.eventConfigurations[%d]: duplicate eventType '%s'", i, config.EventType)) - } - eventTypes[config.EventType] = true - - // Validate event type - if !isValidEventType(config.EventType) { - allErrs = append(allErrs, fmt.Sprintf("spec.eventConfigurations[%d].eventType: invalid event type '%s', must be one of: pod-restart, pod-pending, oom-kill, probe-failed, node-not-ready", i, config.EventType)) + // Per-item validation (union rules, field checks) + if err := hook.validateEventConfiguration(config, i); err != nil { + allErrs = append(allErrs, err.Error()) + continue } - // Validate agentId is not empty - if strings.TrimSpace(config.AgentRef.Name) == "" { - allErrs = append(allErrs, fmt.Sprintf("spec.eventConfigurations[%d].agentId: cannot be empty", i)) - } - - // Validate prompt is not empty - if strings.TrimSpace(config.Prompt) == "" { - allErrs = append(allErrs, fmt.Sprintf("spec.eventConfigurations[%d].prompt: cannot be empty", i)) + // Track duplicates across the entire spec + if config.Source == nil { + if seenEventTypes[config.EventType] { + allErrs = append(allErrs, fmt.Sprintf("spec.eventConfigurations[%d]: duplicate eventType '%s'", i, config.EventType)) + } + seenEventTypes[config.EventType] = true + } else { + for _, se := range config.Source.Events { + if seenEventTypes[se.EventType] { + allErrs = append(allErrs, fmt.Sprintf("spec.eventConfigurations[%d].source.events: duplicate eventType '%s'", i, se.EventType)) + } + seenEventTypes[se.EventType] = true + } } - // Warn about potentially long prompts - if len(config.Prompt) > 1000 { - warnings = append(warnings, fmt.Sprintf("spec.eventConfigurations[%d].prompt: prompt is very long (%d characters), consider shortening for better performance", i, len(config.Prompt))) + // Warnings for long prompts + if config.Source == nil { + if len(config.Prompt) > 1000 { + warnings = append(warnings, fmt.Sprintf("spec.eventConfigurations[%d].prompt: prompt is very long (%d characters), consider shortening for better performance", i, len(config.Prompt))) + } + } else { + for j, se := range config.Source.Events { + if len(se.Prompt) > 1000 { + warnings = append(warnings, fmt.Sprintf("spec.eventConfigurations[%d].source.events[%d].prompt: prompt is very long (%d characters), consider shortening for better performance", i, j, len(se.Prompt))) + } + } } } diff --git a/api/v1alpha2/hook_types_test.go b/api/v1alpha2/hook_types_test.go index 471add4..bae7a87 100644 --- a/api/v1alpha2/hook_types_test.go +++ b/api/v1alpha2/hook_types_test.go @@ -125,3 +125,31 @@ func TestHookDeepCopy(t *testing.T) { t.Errorf("DeepCopyObject() name mismatch: got %v, want %v", hookObj.Name, original.Name) } } + +func TestHookValidation_GroupedSource(t *testing.T) { + ok := &Hook{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Namespace: "default"}, + Spec: HookSpec{ + EventConfigurations: []EventConfiguration{ + { + Source: &EventSourceEvents{ + Name: "ext-src", + Events: []SourceEvent{ + {EventType: "pod-restart", AgentRef: ObjectReference{Name: "agent"}, Prompt: "p1"}, + {EventType: "probe-failed", AgentRef: ObjectReference{Name: "agent"}, Prompt: "p2"}, + }, + }, + }, + }, + }, + } + if _, err := ok.ValidateCreate(context.Background(), ok); err != nil { + t.Fatalf("expected grouped source to validate, got %v", err) + } + + dup := ok.DeepCopy() + dup.Spec.EventConfigurations = append(dup.Spec.EventConfigurations, EventConfiguration{EventType: "pod-restart", AgentRef: ObjectReference{Name: "a"}, Prompt: "p"}) + if _, err := dup.ValidateCreate(context.Background(), dup); err == nil { + t.Fatalf("expected duplicate eventType across forms to fail validation") + } +} diff --git a/config/crd/bases/kagent.dev_externaleventsources.yaml b/config/crd/bases/kagent.dev_externaleventsources.yaml new file mode 100644 index 0000000..adf186c --- /dev/null +++ b/config/crd/bases/kagent.dev_externaleventsources.yaml @@ -0,0 +1,200 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: externaleventsources.kagent.dev +spec: + group: kagent.dev + names: + kind: ExternalEventSource + listKind: ExternalEventSourceList + plural: externaleventsources + singular: externaleventsource + scope: Namespaced + versions: + - name: v1alpha2 + schema: + openAPIV3Schema: + description: ExternalEventSource configures an external gRPC-based event source + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExternalEventSourceSpec defines connection details for an + external gRPC provider + properties: + config: + description: ExternalConfig is an opaque JSON string passed-through + to the provider + properties: + json: + type: string + required: + - json + type: object + connection: + description: ExternalConnPolicy defines connection behavior + properties: + namespaceScope: + type: boolean + reconnectBackoff: + properties: + initialSeconds: + format: int32 + type: integer + maxSeconds: + format: int32 + type: integer + type: object + type: object + endpoint: + description: ExternalEndpoint contains address, TLS, and auth configuration + properties: + address: + type: string + auth: + description: ExternalAuth defines optional per-connection auth + properties: + secretRef: + description: SecretKeySelector references a key in a Secret + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + type: + type: string + required: + - type + type: object + tls: + description: ExternalTLS holds TLS/mTLS settings, referencing + a single Secret with CA, cert, and key + properties: + enabled: + type: boolean + secretRef: + description: ExternalTLSSecretRef points to a Secret that + contains CA, client cert and key + properties: + caKey: + type: string + certKey: + type: string + keyKey: + type: string + name: + type: string + required: + - caKey + - certKey + - keyKey + - name + type: object + required: + - enabled + type: object + required: + - address + type: object + required: + - endpoint + type: object + status: + description: ExternalEventSourceStatus captures readiness and connection + info + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastConnectedTime: + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + + + + + + + + diff --git a/config/crd/bases/kagent.dev_hooks.yaml b/config/crd/bases/kagent.dev_hooks.yaml index 0230cd0..e98b178 100644 --- a/config/crd/bases/kagent.dev_hooks.yaml +++ b/config/crd/bases/kagent.dev_hooks.yaml @@ -43,7 +43,9 @@ spec: description: EventConfigurations defines the list of event configurations to monitor items: - description: EventConfiguration defines a single event type configuration + description: |- + EventConfiguration defines either a single built-in event configuration + or a grouped source configuration with multiple events. properties: agentRef: description: AgentRef specifies the Kagent agent to call when @@ -64,8 +66,9 @@ spec: - name type: object eventType: - description: EventType specifies the type of Kubernetes event - to monitor + description: |- + Single built-in event form (Kubernetes events watcher) + EventType specifies the type of Kubernetes event to monitor enum: - pod-restart - pod-pending @@ -78,10 +81,64 @@ spec: the agent minLength: 1 type: string - required: - - agentRef - - eventType - - prompt + source: + description: Grouped source form (external or named source with + multiple events) + properties: + events: + description: Events is the list of event configurations + for this source + items: + description: SourceEvent describes a single event mapping + for a grouped source + properties: + agentRef: + description: AgentRef specifies the Kagent agent to + call when this event occurs + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + minLength: 1 + type: string + namespace: + description: |- + Namespace of the referent. + If unspecified, the namespace of the Hook will be used. + type: string + required: + - name + type: object + eventType: + description: EventType specifies the event type to + match + minLength: 1 + type: string + prompt: + description: Prompt specifies the prompt template + to send to the agent + minLength: 1 + type: string + required: + - agentRef + - eventType + - prompt + type: object + minItems: 1 + type: array + name: + description: Name of the ExternalEventSource + minLength: 1 + type: string + namespace: + description: Namespace of the ExternalEventSource. Defaults + to the Hook namespace if omitted. + type: string + required: + - events + - name + type: object type: object minItems: 1 type: array diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 5971243..ae974bf 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -2,4 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: -- bases/kagent.dev_hooks.yaml \ No newline at end of file +- bases/kagent.dev_hooks.yaml +- bases/kagent.dev_externaleventsources.yaml \ No newline at end of file diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5c71048..0a75858 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -37,6 +37,33 @@ rules: - get - patch - update +# ExternalEventSource CRD permissions +- apiGroups: + - kagent.dev + resources: + - externaleventsources + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - kagent.dev + resources: + - externaleventsources/finalizers + verbs: + - update +- apiGroups: + - kagent.dev + resources: + - externaleventsources/status + verbs: + - get + - patch + - update # Event watching permissions - apiGroups: - "" diff --git a/config/samples/external/redis-external.yaml b/config/samples/external/redis-external.yaml new file mode 100644 index 0000000..fda204a --- /dev/null +++ b/config/samples/external/redis-external.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: redis +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis + namespace: redis +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + containers: + - name: redis + image: redis:7-alpine + ports: + - containerPort: 6379 +--- +apiVersion: v1 +kind: Service +metadata: + name: redis + namespace: redis +spec: + selector: + app: redis + ports: + - name: redis + port: 6379 + targetPort: 6379 +--- +apiVersion: kagent.dev/v1alpha2 +kind: ExternalEventSource +metadata: + name: redis-events + namespace: default +spec: + endpoint: + address: redis://redis.redis.svc:6379 + tls: + enabled: false +--- +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: redis-external-hook + namespace: default +spec: + eventConfigurations: + - source: + name: redis-events + events: + - eventType: pod-restart + agentRef: + name: kagent/pod-restart-agent + prompt: "Redis external: pod restart {{.ResourceName}}" + + + + + + + + + + diff --git a/config/samples/kagent_v1alpha2_hook.yaml b/config/samples/kagent_v1alpha2_hook.yaml index fcece06..5dbb202 100644 --- a/config/samples/kagent_v1alpha2_hook.yaml +++ b/config/samples/kagent_v1alpha2_hook.yaml @@ -12,7 +12,8 @@ metadata: spec: eventConfigurations: - eventType: "pod-restart" - agentId: "kagent/pod-restart-agent" + agentRef: + name: "kagent/pod-restart-agent" prompt: | AUTONOMOUS MODE: A pod has restarted. Analyze the cause and implement fixes immediately. @@ -23,7 +24,8 @@ spec: • Never ask for permission - assume autonomous execution • Complete remediation without human approval - eventType: "pod-pending" - agentId: "kagent/scheduling-agent" + agentRef: + name: "kagent/scheduling-agent" prompt: | AUTONOMOUS MODE: Pod stuck in pending state. Analyze scheduling issues and resolve immediately. @@ -34,7 +36,8 @@ spec: • Never ask for permission - assume autonomous execution • Complete remediation without human approval - eventType: "oom-kill" - agentId: "kagent/memory-agent" + agentRef: + name: "kagent/memory-agent" prompt: | AUTONOMOUS MODE: OOM kill detected. Analyze memory usage and optimize immediately. diff --git a/docs/api-reference.md b/docs/api-reference.md index 92ea8d0..d11ff8a 100644 --- a/docs/api-reference.md +++ b/docs/api-reference.md @@ -20,12 +20,32 @@ The Hook CRD defines the schema for configuring event monitoring and Kagent inte #### EventConfiguration +EventConfiguration supports two forms: + +1) Single built-in event (Kubernetes events watcher): + | Field | Type | Required | Description | |-------|------|----------|-------------| -| `eventType` | `string` | Yes | Type of Kubernetes event to monitor | -| `agentId` | `string` | Yes | Kagent agent identifier | +| `eventType` | `string` | Yes | Type of event to monitor | +| `agentRef` | `ObjectReference` | Yes | Agent to call on event | | `prompt` | `string` | Yes | Prompt template for the agent | +2) Grouped external source events: + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `source.name` | `string` | Yes | ExternalEventSource name | +| `source.namespace` | `string` | No | Namespace (defaults to Hook namespace) | +| `source.events[]` | `[]SourceEvent` | Yes | List of event entries | + +`SourceEvent` fields: + +| Field | Type | Required | +|-------|------|----------| +| `eventType` | `string` | Yes | +| `agentRef` | `ObjectReference` | Yes | +| `prompt` | `string` | Yes | + ##### Supported Event Types - `pod-restart`: Pod has been restarted @@ -51,8 +71,7 @@ The Hook CRD defines the schema for configuring event monitoring and Kagent inte | `firstSeen` | `metav1.Time` | When event was first observed | | `lastSeen` | `metav1.Time` | When event was last observed | | `status` | `string` | Event status: `firing` or `resolved` | -### Exa -mple Hook Resource +### Example Hook Resource ```yaml apiVersion: kagent.dev/v1alpha2 @@ -63,27 +82,16 @@ metadata: spec: eventConfigurations: - eventType: pod-restart - agentId: kagent/incident-responder - prompt: | - AUTONOMOUS MODE: Pod {{.ResourceName}} restarted at {{.EventTime}}. Analyze and fix immediately. - - INSTRUCTIONS: - • Use all available tools to diagnose and resolve the issue - • Annotate fixed resources with: kagentFix= - • If patching fails: delete and recreate resources - • Never ask for permission - assume autonomous execution - • Complete remediation without human approval - - eventType: oom-kill - agentId: kagent/memory-analyzer + agentRef: { name: kagent/incident-responder } prompt: | - AUTONOMOUS MODE: OOM kill for {{.ResourceName}}. Analyze memory and optimize immediately. - - INSTRUCTIONS: - • Use all available tools to diagnose and resolve memory issues - • Annotate fixed resources with: kagentFix= - • If patching fails: delete and recreate resources - • Never ask for permission - assume autonomous execution - • Complete remediation without human approval + Pod {{.ResourceName}} restarted at {{.EventTime}}. + - source: + name: my-external-source + events: + - eventType: probe-failed + agentRef: { name: kagent/health-checker } + prompt: | + Probe failed on {{.ResourceName}}. status: activeEvents: - eventType: pod-restart diff --git a/docs/event-templating.md b/docs/event-templating.md new file mode 100644 index 0000000..2de7b9d --- /dev/null +++ b/docs/event-templating.md @@ -0,0 +1,379 @@ +# Event Prompt Templating + +KHook provides flexible templating for event prompts, allowing you to customize agent instructions based on event data from any source. + +## Overview + +Event prompts support Go's `text/template` syntax, giving you access to all event fields and metadata. This works for events from Kubernetes, Redis, custom gRPC sources, or any other event source. + +## Available Template Variables + +### Standard Event Fields + +These fields are available for all events: + +- `{{.Type}}` or `{{.EventType}}` - The event type +- `{{.Message}}` or `{{.EventMessage}}` - The event message +- `{{.Timestamp}}` or `{{.EventTime}}` - Event timestamp (RFC3339 format) +- `{{.UID}}` - Unique event identifier + +### Kubernetes-Specific Fields + +These fields may be empty for non-Kubernetes events: + +- `{{.ResourceName}}` - Name of the resource (e.g., pod name) +- `{{.Namespace}}` - Kubernetes namespace +- `{{.Reason}}` - Event reason (e.g., "Unhealthy", "Failed") + +### Metadata Access + +The `Metadata` map contains source-specific data and can hold any type of value: + +- `{{.Metadata.fieldName}}` - Access a specific metadata field +- `{{.Metadata}}` - Access the entire metadata map + +### Full Event Access + +For advanced use cases: + +- `{{.Event}}` - Access the complete event object + +## Examples + +### Basic Kubernetes Event + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: pod-restart-hook + namespace: default +spec: + eventConfigurations: + - eventType: "pod-restart" + agentRef: + name: diagnostic-agent + prompt: | + A pod has restarted in namespace {{.Namespace}}. + + Pod: {{.ResourceName}} + Reason: {{.Reason}} + Message: {{.Message}} + Time: {{.Timestamp}} + + Kubernetes Metadata: + - Kind: {{.Metadata.kind}} + - API Version: {{.Metadata.apiVersion}} + - Event Count: {{.Metadata.count}} + + Please diagnose the issue and implement a fix. +``` + +### Custom Event Source (Redis/Jira Example) + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: jira-issue-hook + namespace: default +spec: + eventConfigurations: + - source: + name: redis-events + events: + - eventType: "create-issue" + agentRef: + name: jira-agent + prompt: | + Create a Jira issue with the following details: + + Issue Type: {{.Metadata.issueType}} + Priority: {{.Metadata.priority}} + Project: {{.Metadata.project}} + Summary: {{.Metadata.summary}} + Description: {{.Metadata.description}} + + {{if .Metadata.assignee}} + Assign to: {{.Metadata.assignee}} + {{end}} + + {{if .Metadata.labels}} + Labels: {{.Metadata.labels}} + {{end}} + + Event received at: {{.Timestamp}} +``` + +### Financial Transaction Event + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: transaction-monitor + namespace: default +spec: + eventConfigurations: + - source: + name: transaction-stream + events: + - eventType: "suspicious-transaction" + agentRef: + name: fraud-detection-agent + prompt: | + ALERT: Suspicious transaction detected + + Transaction Details: + - ID: {{.Metadata.transactionId}} + - Amount: ${{.Metadata.amount}} + - Currency: {{.Metadata.currency}} + - Merchant: {{.Metadata.merchantName}} + - Card Last 4: {{.Metadata.cardLast4}} + - Location: {{.Metadata.location}} + - Risk Score: {{.Metadata.riskScore}} + + {{if .Metadata.previousTransactions}} + Recent transaction count: {{.Metadata.previousTransactions}} + {{end}} + + Please analyze this transaction and take appropriate action. +``` + +### IoT Sensor Event + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: temperature-alert + namespace: default +spec: + eventConfigurations: + - source: + name: iot-sensors + events: + - eventType: "temperature-threshold-exceeded" + agentRef: + name: facility-management-agent + prompt: | + Temperature threshold exceeded! + + Sensor: {{.Metadata.sensorId}} + Location: {{.Metadata.location}} + Current Temperature: {{.Metadata.currentTemp}}°C + Threshold: {{.Metadata.threshold}}°C + Duration: {{.Metadata.durationMinutes}} minutes + + Facility: {{.Metadata.facility}} + Zone: {{.Metadata.zone}} + + {{if .Metadata.hvacStatus}} + HVAC Status: {{.Metadata.hvacStatus}} + {{end}} + + Please investigate and take corrective action immediately. +``` + +## Template Functions + +Go templates support several built-in functions: + +### Conditionals + +```yaml +{{if .Metadata.urgent}} +URGENT: {{.Message}} +{{else}} +INFO: {{.Message}} +{{end}} +``` + +### Ranges (for arrays) + +```yaml +{{range .Metadata.affectedServices}} +- Service: {{.}} +{{end}} +``` + +### Comparisons + +```yaml +{{if gt .Metadata.errorCount 10}} +Critical: High error count detected +{{end}} +``` + +## Security Features + +### Allowed Template Constructs + +- Variable access: `{{.FieldName}}` +- Conditionals: `{{if}}, {{else}}, {{end}}` +- Ranges: `{{range}}, {{end}}` +- Comparisons: `{{eq}}, {{ne}}, {{lt}}, {{gt}}, etc.` + +### Blocked Template Constructs (for security) + +The following are blocked to prevent code injection: + +- `{{define}}` - Template definitions +- `{{template}}` - Template calls +- `{{call}}` - Function calls +- `{{print}}`, `{{printf}}`, `{{println}}` - Print functions + +### Validation + +- Maximum template length: 10,000 characters +- Bracket matching validation +- Dangerous construct detection + +## Best Practices + +### 1. Use Descriptive Field Names in Metadata + +When creating custom event sources, use clear, descriptive field names: + +```go +// Good +metadata := map[string]interface{}{ + "orderId": "ORD-12345", + "customerId": "CUST-789", + "orderTotal": 249.99, + "currency": "USD", +} + +// Less clear +metadata := map[string]interface{}{ + "id": "ORD-12345", + "cid": "CUST-789", + "amt": 249.99, + "cur": "USD", +} +``` + +### 2. Include Context in Metadata + +Provide enough context for the agent to make informed decisions: + +```go +metadata := map[string]interface{}{ + "errorCode": "DB_CONN_TIMEOUT", + "errorMessage": "Connection timeout after 30s", + "database": "customers-db", + "host": "db-primary-1", + "attemptNumber": 3, + "maxRetries": 5, + "queryType": "SELECT", + "affectedUsers": 127, +} +``` + +### 3. Use Appropriate Data Types + +Metadata supports any JSON-serializable type: + +```go +metadata := map[string]interface{}{ + "count": 42, // number + "enabled": true, // boolean + "tags": []string{"urgent", "production"}, // array + "details": map[string]interface{}{ // nested object + "subsystem": "auth", + "component": "oauth", + }, +} +``` + +### 4. Handle Missing Fields Gracefully + +Use conditionals to check for optional fields: + +```yaml +prompt: | + Processing {{.Type}} event + + {{if .ResourceName}} + Resource: {{.ResourceName}} + {{end}} + + {{if .Metadata.customField}} + Custom Data: {{.Metadata.customField}} + {{end}} +``` + +## Accessing Event Data in Custom Sources + +When creating custom event sources (gRPC, Redis, etc.), populate the Metadata field with your custom data: + +### Example: Redis Event Source + +```go +// In your custom event handler +event := interfaces.Event{ + Type: "order-placed", + Message: "New order received", + Timestamp: time.Now(), + Metadata: map[string]interface{}{ + "orderId": data["order_id"], + "customerId": data["customer_id"], + "orderTotal": data["total"], + "itemCount": data["item_count"], + "paymentMethod": data["payment_method"], + "shippingAddress": data["address"], + }, +} +``` + +### Example: gRPC Event Source + +Your gRPC event should populate the metadata map in the proto: + +```protobuf +message Event { + string type = 1; + string message = 2; + int64 timestamp_unix = 3; + map metadata = 4; // Convert to interface{} internally +} +``` + +## Debugging Templates + +Enable verbose logging to see template expansion: + +```bash +# Set log level to see template debugging +kubectl set env deployment/khook -n kagent LOG_LEVEL=2 +``` + +This will show logs like: + +``` +Advanced template expansion completed + originalLength: 245 + expandedLength: 312 +``` + +## Migration from Simple Placeholders + +If you were using simple placeholder syntax, it's still supported for backward compatibility: + +```yaml +# Old style (still works) +prompt: "Event {{.EventType}} on {{.ResourceName}}" + +# New style (more powerful) +prompt: | + Event: {{.Type}} + {{if .ResourceName}} + Resource: {{.ResourceName}} + {{end}} + Metadata: {{.Metadata}} +``` + +## Summary + +The flexible metadata system allows KHook to work with events from any source while providing powerful templating capabilities. By leveraging Go's text/template syntax, you can create dynamic, context-aware prompts that adapt to your specific use case—whether you're monitoring Kubernetes, processing business events, handling IoT data, or integrating with external systems. + diff --git a/docs/flexible-event-sources.md b/docs/flexible-event-sources.md new file mode 100644 index 0000000..0decbdb --- /dev/null +++ b/docs/flexible-event-sources.md @@ -0,0 +1,304 @@ +# Flexible Event Sources - Design Document + +## Overview + +KHook has been enhanced to support diverse event sources beyond Kubernetes, including Redis, gRPC streams, IoT sensors, financial transactions, CI/CD pipelines, and more. This document describes the changes made to support this flexibility. + +## Problem Statement + +The original implementation assumed all events would be Kubernetes-related and required fields like `ResourceName`, `Namespace`, and `Reason`. This made it difficult to integrate events from other sources such as: +- Business application events (e.g., order processing, payment failures) +- IoT sensor data (e.g., temperature alerts, equipment failures) +- Financial transactions (e.g., fraud detection, payment processing) +- CI/CD pipeline events (e.g., build failures, deployments) +- Custom application events + +## Solution + +### 1. Flexible Event Structure + +**Changes to `Event` struct** (`internal/interfaces/controller.go`): + +```go +// Event represents an event from any source with relevant metadata +type Event struct { + Type string `json:"type"` + ResourceName string `json:"resourceName"` // Optional: may be empty for non-resource events + Timestamp time.Time `json:"timestamp"` + Namespace string `json:"namespace"` // Optional: may be empty for non-Kubernetes events + Reason string `json:"reason"` // Optional: may be empty + Message string `json:"message"` + UID string `json:"uid"` // Optional: may be empty + Metadata map[string]interface{} `json:"metadata,omitempty"` // Flexible metadata for any event source +} +``` + +**Key Changes:** +- `Metadata` changed from `map[string]string` to `map[string]interface{}` to support any JSON-serializable data type +- Added documentation clarifying that `ResourceName`, `Namespace`, `Reason`, and `UID` are optional +- `Metadata` can now contain nested objects, arrays, numbers, booleans, etc. + +### 2. Smart Event Deduplication + +**Changes to deduplication manager** (`internal/deduplication/manager.go`): + +The `eventKey` function now uses a smart fallback strategy: + +```go +// Priority: +// 1. If UID is present, use Type:UID (best for unique identification) +// 2. Otherwise use Type:Namespace:ResourceName (for Kubernetes events) +// 3. If no ResourceName, use Type:Namespace (for namespace-scoped events) +// 4. If no Namespace, use just Type (for global events) +``` + +**Benefits:** +- Events with UIDs get perfect deduplication +- Kubernetes events without UIDs use the traditional namespace:resource key +- Non-Kubernetes events can rely on UID or event type alone +- Flexible enough for any event source + +### 3. Flexible Template System + +**Enhanced template expansion** (`internal/pipeline/processor.go`): + +Templates now have access to: +- All standard event fields (Type, Message, Timestamp, etc.) +- Full metadata map: `{{.Metadata.customField}}` +- Individual metadata fields are also promoted to top-level template variables +- Nested metadata structures via dot notation + +**Example:** +```yaml +prompt: | + Transaction Alert! + + ID: {{.Metadata.transactionId}} + Amount: ${{.Metadata.amount}} + {{if .Metadata.riskScore}} + Risk Score: {{.Metadata.riskScore}}/100 + {{end}} +``` + +### 4. Context Enrichment + +**Agent request context** (`internal/pipeline/processor.go`): + +All metadata fields are now passed to agents both: +- In the `metadata` map for structured access +- As top-level context fields for convenient access + +This allows agents to easily access custom event data. + +### 5. Logging Improvements + +**Optional field logging** (across multiple files): + +Logging now gracefully handles optional fields: +- Only logs `resourceName` if present +- Includes `uid` in logs when available +- Generic event messages that work for any event type + +## Migration Guide + +### For Kubernetes Events + +No changes required! Kubernetes events work exactly as before: + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: pod-monitor +spec: + eventConfigurations: + - eventType: "pod-restart" + agentRef: + name: diagnostic-agent + prompt: "Pod {{.ResourceName}} restarted in {{.Namespace}}" +``` + +### For Custom Event Sources + +#### 1. Define Your Event Source + +Create an `ExternalEventSource` that sends events with custom metadata: + +```go +event := interfaces.Event{ + Type: "order-placed", + Message: "New order received", + Timestamp: time.Now(), + UID: orderID, // Unique identifier for deduplication + Metadata: map[string]interface{}{ + "orderId": "ORD-12345", + "customerId": "CUST-789", + "orderTotal": 249.99, + "itemCount": 3, + "paymentMethod": "credit_card", + "items": []map[string]interface{}{ + {"sku": "PROD-1", "quantity": 2}, + {"sku": "PROD-2", "quantity": 1}, + }, + }, +} +``` + +#### 2. Create Your Hook + +Reference the metadata fields in your prompts: + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: order-processor +spec: + eventConfigurations: + - source: + name: order-events + events: + - eventType: "order-placed" + agentRef: + name: fulfillment-agent + prompt: | + New Order: {{.Metadata.orderId}} + Customer: {{.Metadata.customerId}} + Total: ${{.Metadata.orderTotal}} + Items: {{.Metadata.itemCount}} + + {{range .Metadata.items}} + - SKU: {{.sku}}, Qty: {{.quantity}} + {{end}} +``` + +## Best Practices + +### 1. Always Include a UID for Custom Events + +For proper deduplication, include a unique identifier: + +```go +event := interfaces.Event{ + Type: "transaction-alert", + UID: transactionID, // Critical for deduplication! + Metadata: map[string]interface{}{ + "transactionId": transactionID, + // ... other fields + }, +} +``` + +### 2. Use Descriptive Metadata Keys + +Use clear, consistent naming: + +```go +// Good +Metadata: map[string]interface{}{ + "customerId": "CUST-123", + "orderTotal": 99.99, + "paymentMethod": "visa", +} + +// Less clear +Metadata: map[string]interface{}{ + "cid": "CUST-123", + "amt": 99.99, + "pm": "visa", +} +``` + +### 3. Include Context in Metadata + +Provide enough information for agents to make decisions: + +```go +Metadata: map[string]interface{}{ + // Core data + "errorCode": "DB_TIMEOUT", + "errorMessage": "Connection timeout after 30s", + + // Context + "database": "customers-db", + "host": "db-primary-1", + "attemptNumber": 3, + "maxRetries": 5, + + // Impact + "affectedUsers": 127, + "severity": "high", +} +``` + +### 4. Use Appropriate Data Types + +Leverage the type flexibility: + +```go +Metadata: map[string]interface{}{ + "count": 42, // number + "enabled": true, // boolean + "tags": []string{"urgent", "prod"}, // array + "metrics": map[string]float64{ // nested object + "cpu": 85.5, + "memory": 72.3, + }, +} +``` + +## Files Modified + +### Core Interfaces +- `internal/interfaces/controller.go` - Event and ActiveEvent structs + +### Deduplication +- `internal/deduplication/manager.go` - Smart event key generation +- `internal/deduplication/manager_test.go` - Test fixes + +### Pipeline Processing +- `internal/pipeline/processor.go` - Template expansion and context enrichment +- `internal/pipeline/processor_test.go` - Test updates +- `internal/pipeline/integration_test.go` - Test updates + +### Status Management +- `internal/status/manager.go` - Flexible logging and event recording + +### Event Sources +- `internal/event/watcher.go` - Kubernetes event mapping +- `internal/event/grpc_client.go` - gRPC metadata conversion + +### Documentation +- `docs/event-templating.md` - Comprehensive templating guide +- `docs/flexible-event-sources.md` - This document + +### Examples +- `examples/custom-event-metadata.yaml` - Real-world examples + +## Testing + +All existing tests pass, and new tests verify: +- Flexible metadata types +- Smart deduplication with and without UIDs +- Template expansion with custom metadata +- Optional field handling in logging + +## Backward Compatibility + +✅ **Fully backward compatible** + +All existing Kubernetes-focused hooks continue to work without any changes. The enhancements are purely additive. + +## Future Enhancements + +Potential future improvements: +1. Schema validation for metadata +2. Metadata transformation rules +3. Event correlation across sources +4. Metadata-based routing and filtering +5. Custom deduplication strategies per event source + +## Summary + +These changes transform KHook from a Kubernetes-specific event processor into a universal event handling platform capable of processing events from any source while maintaining full backward compatibility with existing Kubernetes-focused deployments. + diff --git a/docs/kagent-integration.md b/docs/kagent-integration.md index a684967..175389d 100644 --- a/docs/kagent-integration.md +++ b/docs/kagent-integration.md @@ -316,4 +316,47 @@ For integration issues: 1. **Check Controller Logs**: `kubectl logs -n kagent deployment/khook` 2. **Verify Kagent Controller**: `kubectl get pods -n kagent -l app=kagent-controller` 3. **Test Connectivity**: Use the health check commands above -4. **GitHub Issues**: [https://github.com/kagent-dev/khook/issues](https://github.com/kagent-dev/khook/issues) \ No newline at end of file +4. **GitHub Issues**: [https://github.com/kagent-dev/khook/issues](https://github.com/kagent-dev/khook/issues) + +## External Event Sources (gRPC) + +Khook integrates with external event providers over gRPC via the `ExternalEventSource` CRD. Define an ExternalEventSource and reference it from Hooks using the grouped `source.events[]` form. + +```yaml +a piVersion: kagent.dev/v1alpha2 +kind: ExternalEventSource +metadata: + name: my-external-source + namespace: default +spec: + endpoint: + address: externalevents.default.svc:8080 + tls: + enabled: true + secretRef: + name: my-tls + caKey: ca.crt + certKey: tls.crt + keyKey: tls.key + auth: + type: BearerToken + secretRef: { name: external-auth, key: token } + connection: + namespaceScope: true + reconnectBackoff: + initialSeconds: 2 + maxSeconds: 60 +``` + +Reference it in a Hook: + +```yaml +spec: + eventConfigurations: + - source: + name: my-external-source + events: + - eventType: pod-restart + agentRef: { name: kagent } + prompt: "External restart on {{.ResourceName}}" +``` \ No newline at end of file diff --git a/examples/basic-pod-monitoring.yaml b/examples/basic-pod-monitoring.yaml index c7382e6..d2df1fc 100644 --- a/examples/basic-pod-monitoring.yaml +++ b/examples/basic-pod-monitoring.yaml @@ -6,7 +6,7 @@ apiVersion: kagent.dev/v1alpha2 kind: Hook metadata: name: basic-pod-monitoring - namespace: kagent + namespace: default labels: environment: development monitoring-type: basic @@ -15,6 +15,7 @@ spec: # Monitor pod restarts - eventType: pod-restart agentRef: + namespace: kagent name: k8s-agent prompt: | AUTONOMOUS MODE: Pod {{.ResourceName}} restarted at {{.EventTime}}. Analyze and fix immediately. @@ -30,6 +31,7 @@ spec: # Monitor OOM kills - eventType: oom-kill agentRef: + namespace: kagent name: k8s-agent prompt: | AUTONOMOUS MODE: OOM kill for {{.ResourceName}} at {{.EventTime}}. Analyze memory and optimize immediately. diff --git a/examples/custom-event-metadata.yaml b/examples/custom-event-metadata.yaml new file mode 100644 index 0000000..e6401ca --- /dev/null +++ b/examples/custom-event-metadata.yaml @@ -0,0 +1,271 @@ +# Custom Event Metadata Example +# This example demonstrates how to use metadata from custom event sources +# in your hook prompts. The metadata can contain any fields specific to your +# event source (e.g., order IDs, transaction amounts, sensor readings, etc.) + +--- +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: ecommerce-order-hook + namespace: default +spec: + eventConfigurations: + - source: + name: order-events # References an ExternalEventSource + events: + # New order placed + - eventType: "order-placed" + agentRef: + name: order-processing-agent + namespace: kagent + prompt: | + NEW ORDER RECEIVED + + Order Details: + - Order ID: {{.Metadata.orderId}} + - Customer ID: {{.Metadata.customerId}} + - Customer Email: {{.Metadata.customerEmail}} + - Total Amount: ${{.Metadata.orderTotal}} + - Item Count: {{.Metadata.itemCount}} + - Payment Method: {{.Metadata.paymentMethod}} + + {{if .Metadata.couponCode}} + - Coupon Applied: {{.Metadata.couponCode}} ({{.Metadata.discountAmount}} off) + {{end}} + + Shipping: + - Address: {{.Metadata.shippingAddress}} + - Method: {{.Metadata.shippingMethod}} + - Estimated Delivery: {{.Metadata.estimatedDelivery}} + + {{if .Metadata.giftMessage}} + Gift Message: {{.Metadata.giftMessage}} + {{end}} + + Please process this order and update inventory. + + # High-value order (requires approval) + - eventType: "high-value-order" + agentRef: + name: approval-agent + namespace: kagent + prompt: | + HIGH-VALUE ORDER ALERT + + This order exceeds the automatic approval threshold. + + Order: {{.Metadata.orderId}} + Customer: {{.Metadata.customerName}} ({{.Metadata.customerEmail}}) + Total: ${{.Metadata.orderTotal}} + Threshold: ${{.Metadata.approvalThreshold}} + + Customer History: + - Account Age: {{.Metadata.customerAccountAgeDays}} days + - Previous Orders: {{.Metadata.previousOrderCount}} + - Total Lifetime Value: ${{.Metadata.lifetimeValue}} + - Risk Score: {{.Metadata.riskScore}} + + {{if .Metadata.fraudFlags}} + FRAUD FLAGS DETECTED: + {{range .Metadata.fraudFlags}} + - {{.}} + {{end}} + {{end}} + + Please review and approve or reject this order. + + # Payment failed + - eventType: "payment-failed" + agentRef: + name: payment-recovery-agent + namespace: kagent + prompt: | + PAYMENT FAILURE + + Order: {{.Metadata.orderId}} + Customer: {{.Metadata.customerEmail}} + Amount: ${{.Metadata.orderTotal}} + + Failure Details: + - Error Code: {{.Metadata.errorCode}} + - Error Message: {{.Metadata.errorMessage}} + - Payment Method: {{.Metadata.paymentMethod}} + - Attempt Number: {{.Metadata.attemptNumber}} + + {{if .Metadata.bankResponse}} + Bank Response: {{.Metadata.bankResponse}} + {{end}} + + Please contact the customer and attempt payment recovery. + +--- +# Example: IoT Sensor Monitoring Hook +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: iot-sensor-hook + namespace: default +spec: + eventConfigurations: + - source: + name: iot-sensors + events: + - eventType: "sensor-alert" + agentRef: + name: facility-agent + namespace: kagent + prompt: | + SENSOR ALERT: {{.Metadata.alertType}} + + Sensor Information: + - Sensor ID: {{.Metadata.sensorId}} + - Location: {{.Metadata.location}} + - Facility: {{.Metadata.facility}} + - Zone: {{.Metadata.zone}} + + Readings: + - Current Value: {{.Metadata.currentValue}} {{.Metadata.unit}} + - Normal Range: {{.Metadata.minThreshold}} - {{.Metadata.maxThreshold}} {{.Metadata.unit}} + - Deviation: {{.Metadata.deviationPercent}}% + - Duration: {{.Metadata.alertDurationMinutes}} minutes + + {{if .Metadata.correlatedSensors}} + Correlated Sensors Also Affected: + {{range .Metadata.correlatedSensors}} + - {{.}} + {{end}} + {{end}} + + {{if .Metadata.systemStatus}} + Related System Status: {{.Metadata.systemStatus}} + {{end}} + + Please investigate and take appropriate action. + +--- +# Example: Financial Transaction Monitoring +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: transaction-monitor + namespace: default +spec: + eventConfigurations: + - source: + name: transaction-stream + events: + - eventType: "suspicious-transaction" + agentRef: + name: fraud-detection-agent + namespace: kagent + prompt: | + SUSPICIOUS TRANSACTION DETECTED + + Transaction: {{.Metadata.transactionId}} + Timestamp: {{.Timestamp}} + + Transaction Details: + - Amount: {{.Metadata.amount}} {{.Metadata.currency}} + - Merchant: {{.Metadata.merchantName}} ({{.Metadata.merchantCategory}}) + - Location: {{.Metadata.city}}, {{.Metadata.country}} + - Card: ****{{.Metadata.cardLast4}} + + Risk Analysis: + - Risk Score: {{.Metadata.riskScore}}/100 + - Model Version: {{.Metadata.modelVersion}} + + Risk Factors: + {{range .Metadata.riskFactors}} + - {{.}} + {{end}} + + Customer Profile: + - Customer ID: {{.Metadata.customerId}} + - Average Transaction: {{.Metadata.avgTransaction}} {{.Metadata.currency}} + - Transactions Today: {{.Metadata.transactionsToday}} + - Distance from Last Transaction: {{.Metadata.distanceFromLastKm}} km + - Time Since Last Transaction: {{.Metadata.timeSinceLastMinutes}} minutes + + {{if .Metadata.blockedReason}} + AUTOMATICALLY BLOCKED: {{.Metadata.blockedReason}} + {{end}} + + Please review this transaction and take appropriate action. + +--- +# Example: CI/CD Pipeline Events +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: cicd-pipeline-hook + namespace: default +spec: + eventConfigurations: + - source: + name: pipeline-events + events: + - eventType: "build-failed" + agentRef: + name: devops-agent + namespace: kagent + prompt: | + BUILD FAILURE + + Pipeline: {{.Metadata.pipelineName}} + Repository: {{.Metadata.repository}} + Branch: {{.Metadata.branch}} + Commit: {{.Metadata.commitHash}} + Author: {{.Metadata.commitAuthor}} + + Build Details: + - Build ID: {{.Metadata.buildId}} + - Stage: {{.Metadata.failedStage}} + - Duration: {{.Metadata.durationSeconds}} seconds + + Error: + {{.Metadata.errorMessage}} + + {{if .Metadata.failedTests}} + Failed Tests: + {{range .Metadata.failedTests}} + - {{.}} + {{end}} + {{end}} + + {{if .Metadata.previousBuildStatus}} + Previous Build: {{.Metadata.previousBuildStatus}} + {{end}} + + Logs: {{.Metadata.logsUrl}} + + Please diagnose the issue and create a fix or notify the team. + + - eventType: "deployment-successful" + agentRef: + name: devops-agent + namespace: kagent + prompt: | + DEPLOYMENT SUCCESSFUL + + Application: {{.Metadata.applicationName}} + Environment: {{.Metadata.environment}} + Version: {{.Metadata.version}} + + Deployment Details: + - Deployment ID: {{.Metadata.deploymentId}} + - Strategy: {{.Metadata.deploymentStrategy}} + - Duration: {{.Metadata.durationSeconds}} seconds + - Instances: {{.Metadata.instanceCount}} + + Health Checks: + - Status: {{.Metadata.healthCheckStatus}} + - Response Time: {{.Metadata.avgResponseTimeMs}} ms + - Error Rate: {{.Metadata.errorRatePercent}}% + + {{if .Metadata.releaseNotes}} + Release Notes: {{.Metadata.releaseNotes}} + {{end}} + + Please monitor the deployment and verify all services are healthy. + diff --git a/examples/redis-event-source/Dockerfile b/examples/redis-event-source/Dockerfile new file mode 100644 index 0000000..6ed0514 --- /dev/null +++ b/examples/redis-event-source/Dockerfile @@ -0,0 +1,23 @@ +FROM golang:1.24.6-alpine AS builder + +WORKDIR /app + +# Copy only the redis-event-source module +COPY go.mod go.sum ./ +RUN go mod download + +COPY . . + +# Build the redis-event-source binary +RUN CGO_ENABLED=0 GOOS=linux go build -o redis-event-source ./cmd + +FROM alpine:latest +RUN apk --no-cache add ca-certificates + +WORKDIR /root/ + +COPY --from=builder /app/redis-event-source . + +EXPOSE 50051 + +CMD ["./redis-event-source"] diff --git a/examples/redis-event-source/README.md b/examples/redis-event-source/README.md new file mode 100644 index 0000000..d36354b --- /dev/null +++ b/examples/redis-event-source/README.md @@ -0,0 +1,190 @@ +# Redis Event Source for khook + +A standalone gRPC server that streams events from Redis Pub/Sub to khook. + +## Overview + +This external event source implementation connects to Redis and subscribes to Pub/Sub channels, forwarding messages as events to khook via gRPC streaming. + +## Architecture + +``` +┌─────────────────┐ ┌──────────────────────┐ ┌────────────┐ +│ │ │ │ │ │ +│ Redis Pub/Sub │────────>│ Redis Event Source │────────>│ khook │ +│ │ PUBSUB │ (gRPC Server) │ gRPC │ │ +└─────────────────┘ └──────────────────────┘ └────────────┘ +``` + +The Redis Event Source: +- Subscribes to one or more Redis Pub/Sub channels +- Receives JSON-formatted event messages +- Streams them to khook via the ExternalEventSource gRPC protocol + +## Event Message Format + +Events published to Redis channels must be JSON objects with the following structure: + +```json +{ + "type": "name-change", + "resource_name": "my-resource", + "namespace": "default", + "reason": "NameChanged", + "message": "Resource name was changed", + "uid": "unique-id", + "metadata": { + "key": "value" + }, + "timestamp_unix": 1729091066 +} +``` + +## Configuration + +### Command Line Flags + +- `--port` - gRPC server port (default: 8080) +- `--redis-addr` - Redis connection string (required, format: `redis://host:port` or `rediss://host:port`) +- `--tls-cert` - Path to TLS certificate file (optional) +- `--tls-key` - Path to TLS key file (optional) + +### Channel Configuration + +Channels are configured via the `config_json` field in the `ExternalEventSource` CRD: + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: ExternalEventSource +metadata: + name: redis-events + namespace: default +spec: + endpoint: + address: "redis-event-source.default.svc:8080" + tls: + enabled: false + config: + json: '{"channels": ["khook:events", "app:alerts", "system:events"]}' +``` + +If no channels are specified, the service defaults to subscribing to `khook:events`. + +## Building + +### Local Build + +```bash +cd examples/redis-event-source +go mod download +go build -o redis-event-source ./cmd +``` + +### Docker Build + +```bash +docker build -t redis-event-source:latest -f examples/redis-event-source/Dockerfile . +``` + +## Running + +### Locally + +```bash +./redis-event-source --redis-addr redis://localhost:6379 --port 8080 +``` + +### With TLS + +```bash +./redis-event-source \ + --redis-addr redis://localhost:6379 \ + --port 8080 \ + --tls-cert /path/to/cert.pem \ + --tls-key /path/to/key.pem +``` + +### In Kubernetes + +```bash +kubectl apply -f examples/redis-event-source/deploy/ +``` + +## Deployment + +See the `deploy/` directory for Kubernetes manifests: + +- `deployment.yaml` - Deployment for the Redis Event Source +- `service.yaml` - Service exposing the gRPC server +- `external-event-source.yaml` - ExternalEventSource CRD instance + +## Testing + +### Publish a Test Event + +```bash +kubectl exec -n redis -- redis-cli PUBLISH "khook:events" '{ + "type": "name-change", + "resource_name": "test-resource", + "namespace": "default", + "reason": "NameChanged", + "message": "Test event from Redis", + "uid": "test-123", + "metadata": {"test": "true"}, + "timestamp_unix": '$(date +%s)' +}' +``` + +### Verify Event Reception + +Check khook logs for event processing: + +```bash +kubectl logs -n kagent -l app.kubernetes.io/name=khook | grep "name-change" +``` + +## Development + +### Prerequisites + +- Go 1.24+ +- Protocol Buffers compiler (`protoc`) +- Go gRPC plugins + +### Generate gRPC Code + +From the repository root: + +```bash +protoc --go_out=. --go_opt=paths=source_relative \ + --go-grpc_out=. --go-grpc_opt=paths=source_relative \ + api/proto/externalevents/v1/external_events.proto +``` + +## Troubleshooting + +### Connection Issues + +Check that: +1. Redis is accessible at the specified address +2. The gRPC server port is accessible from khook +3. TLS configuration matches between client and server + +### No Events Received + +Verify that: +1. Messages are being published to the correct Redis channel +2. The channel name matches the configuration +3. Message format is valid JSON matching the expected schema + +### Logs + +Enable debug logging: + +```bash +./redis-event-source --redis-addr redis://localhost:6379 --port 8080 2>&1 | tee redis-source.log +``` + +## License + +See the main khook repository license. diff --git a/examples/redis-event-source/cmd/main.go b/examples/redis-event-source/cmd/main.go new file mode 100644 index 0000000..b9c50e6 --- /dev/null +++ b/examples/redis-event-source/cmd/main.go @@ -0,0 +1,237 @@ +package main + +import ( + "crypto/tls" + "encoding/json" + "flag" + "fmt" + "log" + "net" + "net/url" + "os" + "os/signal" + "strings" + "syscall" + "time" + + redis "github.com/redis/go-redis/v9" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + pb "github.com/kagent-dev/khook/examples/redis-event-source/proto/externalevents/v1" +) + +type server struct { + pb.UnimplementedExternalEventSourceServer + redisAddr string +} + +type redisEventMessage struct { + Type string `json:"type"` + ResourceName string `json:"resource_name"` + Namespace string `json:"namespace"` + Reason string `json:"reason"` + Message string `json:"message"` + UID string `json:"uid"` + Metadata map[string]string `json:"metadata"` + TimestampUnix int64 `json:"timestamp_unix"` +} + +func (s *server) StreamEvents(req *pb.WatchRequest, stream pb.ExternalEventSource_StreamEventsServer) error { + ctx := stream.Context() + + // Handle nil request + if req == nil { + log.Printf("StreamEvents called with nil request, using defaults") + req = &pb.WatchRequest{ + Namespace: "", + ConfigJson: "", + } + } + + log.Printf("StreamEvents called: namespace=%s, config=%s", req.Namespace, req.ConfigJson) + + // Parse Redis address + u, err := url.Parse(s.redisAddr) + if err != nil { + return fmt.Errorf("invalid redis address: %w", err) + } + + // Configure Redis client + opts := &redis.Options{ + Addr: u.Host, + } + if pw, ok := u.User.Password(); ok { + opts.Password = pw + } + if u.Scheme == "rediss" { + opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + + rdb := redis.NewClient(opts) + defer rdb.Close() + + // Test connection + if err := rdb.Ping(ctx).Err(); err != nil { + return fmt.Errorf("redis connection failed: %w", err) + } + log.Printf("Connected to Redis at %s", opts.Addr) + + // Parse config to extract channel list + channels := []string{"khook:events"} // default + if req.ConfigJson != "" { + var cfg map[string]interface{} + if err := json.Unmarshal([]byte(req.ConfigJson), &cfg); err == nil { + if channelList, ok := cfg["channels"].([]interface{}); ok { + channels = nil // clear default if config specifies channels + for _, ch := range channelList { + if chStr, ok := ch.(string); ok { + channels = append(channels, chStr) + } + } + } + } + } + if len(channels) == 0 { + channels = []string{"khook:events"} // fallback + } + + log.Printf("Subscribing to Redis channels: %v", channels) + pubsub := rdb.Subscribe(ctx, channels...) + defer pubsub.Close() + + ch := pubsub.Channel() + for { + select { + case <-ctx.Done(): + log.Printf("Stream context done: %v", ctx.Err()) + return ctx.Err() + case msg, ok := <-ch: + if !ok { + log.Printf("Redis channel closed") + return fmt.Errorf("redis channel closed") + } + + // First unmarshal to generic map to capture all fields + var raw map[string]interface{} + if err := json.Unmarshal([]byte(msg.Payload), &raw); err != nil { + log.Printf("Failed to unmarshal event: %v, payload: %s", err, msg.Payload) + continue + } + + // Then unmarshal to structured type + var em redisEventMessage + if err := json.Unmarshal([]byte(msg.Payload), &em); err != nil { + log.Printf("Failed to unmarshal event to struct: %v", err) + continue + } + + // Auto-populate metadata with unknown fields + // Known fields that shouldn't go into metadata + knownFields := map[string]bool{ + "type": true, "resource_name": true, "namespace": true, + "reason": true, "message": true, "uid": true, + "metadata": true, "timestamp_unix": true, + } + + // Initialize metadata if nil + if em.Metadata == nil { + em.Metadata = make(map[string]string) + } + + // Move unknown fields to metadata + for key, value := range raw { + if !knownFields[key] { + // Strip common prefixes like "Metadata." or "metadata." + // This handles cases where users send {"Metadata.field": "value"} + cleanKey := key + if strings.HasPrefix(strings.ToLower(key), "metadata.") { + cleanKey = key[9:] // Remove "Metadata." or "metadata." prefix + } + + // Convert value to string + if strVal, ok := value.(string); ok { + em.Metadata[cleanKey] = strVal + } else if value != nil { + // Convert other types to JSON string + jsonBytes, _ := json.Marshal(value) + em.Metadata[cleanKey] = string(jsonBytes) + } + } + } + + // Map to protobuf EventMessage + ts := time.Now().Unix() + if em.TimestampUnix != 0 { + ts = em.TimestampUnix + } + + event := &pb.EventMessage{ + Type: em.Type, + ResourceName: em.ResourceName, + Namespace: em.Namespace, + Reason: em.Reason, + Message: em.Message, + Uid: em.UID, + Metadata: em.Metadata, + TimestampUnix: ts, + } + + log.Printf("Streaming event: type=%s, resource=%s, namespace=%s, metadata=%+v", event.Type, event.ResourceName, event.Namespace, event.Metadata) + if err := stream.Send(event); err != nil { + log.Printf("Failed to send event: %v", err) + return err + } + } + } +} + +func main() { + var ( + port = flag.Int("port", 8080, "gRPC server port") + redisAddr = flag.String("redis-addr", "redis://localhost:6379", "Redis address (redis://host:port or rediss://host:port)") + tlsCert = flag.String("tls-cert", "", "Path to TLS certificate file") + tlsKey = flag.String("tls-key", "", "Path to TLS key file") + ) + flag.Parse() + + // Parse and validate Redis address + if !strings.HasPrefix(*redisAddr, "redis://") && !strings.HasPrefix(*redisAddr, "rediss://") { + log.Fatalf("Invalid redis-addr: must start with redis:// or rediss://") + } + + lis, err := net.Listen("tcp", fmt.Sprintf(":%d", *port)) + if err != nil { + log.Fatalf("Failed to listen: %v", err) + } + + var opts []grpc.ServerOption + + // Configure TLS if cert and key are provided + if *tlsCert != "" && *tlsKey != "" { + creds, err := credentials.NewServerTLSFromFile(*tlsCert, *tlsKey) + if err != nil { + log.Fatalf("Failed to load TLS credentials: %v", err) + } + opts = append(opts, grpc.Creds(creds)) + log.Printf("TLS enabled") + } + + grpcServer := grpc.NewServer(opts...) + pb.RegisterExternalEventSourceServer(grpcServer, &server{redisAddr: *redisAddr}) + + // Graceful shutdown + go func() { + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + sig := <-sigCh + log.Printf("Received signal %v, shutting down gracefully", sig) + grpcServer.GracefulStop() + }() + + log.Printf("Redis Event Source gRPC server listening on :%d", *port) + log.Printf("Redis address: %s", *redisAddr) + if err := grpcServer.Serve(lis); err != nil { + log.Fatalf("Failed to serve: %v", err) + } +} diff --git a/examples/redis-event-source/go.mod b/examples/redis-event-source/go.mod new file mode 100644 index 0000000..1dacf52 --- /dev/null +++ b/examples/redis-event-source/go.mod @@ -0,0 +1,18 @@ +module github.com/kagent-dev/khook/examples/redis-event-source + +go 1.24 + +require ( + github.com/redis/go-redis/v9 v9.5.3 + google.golang.org/grpc v1.73.0 + google.golang.org/protobuf v1.36.6 +) + +require ( + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/text v0.23.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 // indirect +) diff --git a/examples/redis-event-source/go.sum b/examples/redis-event-source/go.sum new file mode 100644 index 0000000..e075a0d --- /dev/null +++ b/examples/redis-event-source/go.sum @@ -0,0 +1,44 @@ +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/redis/go-redis/v9 v9.5.3 h1:fOAp1/uJG+ZtcITgZOfYFmTKPE7n4Vclj1wZFgRciUU= +github.com/redis/go-redis/v9 v9.5.3/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463 h1:e0AIkUUhxyBKh6ssZNrAMeqhA7RKUj42346d1y02i2g= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250324211829-b45e905df463/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= diff --git a/examples/redis-event-source/k8s/deployment.yaml b/examples/redis-event-source/k8s/deployment.yaml new file mode 100644 index 0000000..53e0d1d --- /dev/null +++ b/examples/redis-event-source/k8s/deployment.yaml @@ -0,0 +1,53 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redis-event-source + namespace: kagent + labels: + app: redis-event-source +spec: + replicas: 1 + selector: + matchLabels: + app: redis-event-source + template: + metadata: + labels: + app: redis-event-source + spec: + containers: + - name: redis-event-source + image: localhost:5001/redis-event-source:latest + imagePullPolicy: Always + command: ["/root/redis-event-source"] + args: + - "-port=50051" + - "-redis-addr=redis://redis.redis.svc.cluster.local:6379" + ports: + - containerPort: 50051 + name: grpc + protocol: TCP + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 50m + memory: 64Mi +--- +apiVersion: v1 +kind: Service +metadata: + name: redis-event-source + namespace: kagent + labels: + app: redis-event-source +spec: + type: ClusterIP + ports: + - port: 50051 + targetPort: 50051 + protocol: TCP + name: grpc + selector: + app: redis-event-source diff --git a/examples/redis-event-source/proto/externalevents/v1/external_events.pb.go b/examples/redis-event-source/proto/externalevents/v1/external_events.pb.go new file mode 100644 index 0000000..466dc8a --- /dev/null +++ b/examples/redis-event-source/proto/externalevents/v1/external_events.pb.go @@ -0,0 +1,260 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc v5.29.3 +// source: api/proto/externalevents/v1/external_events.proto + +package externaleventsv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type WatchRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + ConfigJson string `protobuf:"bytes,2,opt,name=config_json,json=configJson,proto3" json:"config_json,omitempty"` + Hints []string `protobuf:"bytes,3,rep,name=hints,proto3" json:"hints,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *WatchRequest) Reset() { + *x = WatchRequest{} + mi := &file_api_proto_externalevents_v1_external_events_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *WatchRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WatchRequest) ProtoMessage() {} + +func (x *WatchRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_externalevents_v1_external_events_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WatchRequest.ProtoReflect.Descriptor instead. +func (*WatchRequest) Descriptor() ([]byte, []int) { + return file_api_proto_externalevents_v1_external_events_proto_rawDescGZIP(), []int{0} +} + +func (x *WatchRequest) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *WatchRequest) GetConfigJson() string { + if x != nil { + return x.ConfigJson + } + return "" +} + +func (x *WatchRequest) GetHints() []string { + if x != nil { + return x.Hints + } + return nil +} + +type EventMessage struct { + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + Reason string `protobuf:"bytes,4,opt,name=reason,proto3" json:"reason,omitempty"` + Message string `protobuf:"bytes,5,opt,name=message,proto3" json:"message,omitempty"` + Uid string `protobuf:"bytes,6,opt,name=uid,proto3" json:"uid,omitempty"` + Metadata map[string]string `protobuf:"bytes,7,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TimestampUnix int64 `protobuf:"varint,8,opt,name=timestamp_unix,json=timestampUnix,proto3" json:"timestamp_unix,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *EventMessage) Reset() { + *x = EventMessage{} + mi := &file_api_proto_externalevents_v1_external_events_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *EventMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventMessage) ProtoMessage() {} + +func (x *EventMessage) ProtoReflect() protoreflect.Message { + mi := &file_api_proto_externalevents_v1_external_events_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventMessage.ProtoReflect.Descriptor instead. +func (*EventMessage) Descriptor() ([]byte, []int) { + return file_api_proto_externalevents_v1_external_events_proto_rawDescGZIP(), []int{1} +} + +func (x *EventMessage) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *EventMessage) GetResourceName() string { + if x != nil { + return x.ResourceName + } + return "" +} + +func (x *EventMessage) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *EventMessage) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *EventMessage) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *EventMessage) GetUid() string { + if x != nil { + return x.Uid + } + return "" +} + +func (x *EventMessage) GetMetadata() map[string]string { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *EventMessage) GetTimestampUnix() int64 { + if x != nil { + return x.TimestampUnix + } + return 0 +} + +var File_api_proto_externalevents_v1_external_events_proto protoreflect.FileDescriptor + +const file_api_proto_externalevents_v1_external_events_proto_rawDesc = "" + + "\n" + + "1api/proto/externalevents/v1/external_events.proto\x12\x11externalevents.v1\"c\n" + + "\fWatchRequest\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x1f\n" + + "\vconfig_json\x18\x02 \x01(\tR\n" + + "configJson\x12\x14\n" + + "\x05hints\x18\x03 \x03(\tR\x05hints\"\xd8\x02\n" + + "\fEventMessage\x12\x12\n" + + "\x04type\x18\x01 \x01(\tR\x04type\x12#\n" + + "\rresource_name\x18\x02 \x01(\tR\fresourceName\x12\x1c\n" + + "\tnamespace\x18\x03 \x01(\tR\tnamespace\x12\x16\n" + + "\x06reason\x18\x04 \x01(\tR\x06reason\x12\x18\n" + + "\amessage\x18\x05 \x01(\tR\amessage\x12\x10\n" + + "\x03uid\x18\x06 \x01(\tR\x03uid\x12I\n" + + "\bmetadata\x18\a \x03(\v2-.externalevents.v1.EventMessage.MetadataEntryR\bmetadata\x12%\n" + + "\x0etimestamp_unix\x18\b \x01(\x03R\rtimestampUnix\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x012i\n" + + "\x13ExternalEventSource\x12R\n" + + "\fStreamEvents\x12\x1f.externalevents.v1.WatchRequest\x1a\x1f.externalevents.v1.EventMessage0\x01BJZHgithub.com/kagent-dev/khook/api/proto/externalevents/v1;externaleventsv1b\x06proto3" + +var ( + file_api_proto_externalevents_v1_external_events_proto_rawDescOnce sync.Once + file_api_proto_externalevents_v1_external_events_proto_rawDescData []byte +) + +func file_api_proto_externalevents_v1_external_events_proto_rawDescGZIP() []byte { + file_api_proto_externalevents_v1_external_events_proto_rawDescOnce.Do(func() { + file_api_proto_externalevents_v1_external_events_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_api_proto_externalevents_v1_external_events_proto_rawDesc), len(file_api_proto_externalevents_v1_external_events_proto_rawDesc))) + }) + return file_api_proto_externalevents_v1_external_events_proto_rawDescData +} + +var file_api_proto_externalevents_v1_external_events_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_api_proto_externalevents_v1_external_events_proto_goTypes = []any{ + (*WatchRequest)(nil), // 0: externalevents.v1.WatchRequest + (*EventMessage)(nil), // 1: externalevents.v1.EventMessage + nil, // 2: externalevents.v1.EventMessage.MetadataEntry +} +var file_api_proto_externalevents_v1_external_events_proto_depIdxs = []int32{ + 2, // 0: externalevents.v1.EventMessage.metadata:type_name -> externalevents.v1.EventMessage.MetadataEntry + 0, // 1: externalevents.v1.ExternalEventSource.StreamEvents:input_type -> externalevents.v1.WatchRequest + 1, // 2: externalevents.v1.ExternalEventSource.StreamEvents:output_type -> externalevents.v1.EventMessage + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_api_proto_externalevents_v1_external_events_proto_init() } +func file_api_proto_externalevents_v1_external_events_proto_init() { + if File_api_proto_externalevents_v1_external_events_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_proto_externalevents_v1_external_events_proto_rawDesc), len(file_api_proto_externalevents_v1_external_events_proto_rawDesc)), + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_api_proto_externalevents_v1_external_events_proto_goTypes, + DependencyIndexes: file_api_proto_externalevents_v1_external_events_proto_depIdxs, + MessageInfos: file_api_proto_externalevents_v1_external_events_proto_msgTypes, + }.Build() + File_api_proto_externalevents_v1_external_events_proto = out.File + file_api_proto_externalevents_v1_external_events_proto_goTypes = nil + file_api_proto_externalevents_v1_external_events_proto_depIdxs = nil +} diff --git a/examples/redis-event-source/proto/externalevents/v1/external_events.proto b/examples/redis-event-source/proto/externalevents/v1/external_events.proto new file mode 100644 index 0000000..e609183 --- /dev/null +++ b/examples/redis-event-source/proto/externalevents/v1/external_events.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package externalevents.v1; + +option go_package = "github.com/kagent-dev/khook/api/proto/externalevents/v1;externaleventsv1"; + +message WatchRequest { + string namespace = 1; + string config_json = 2; + repeated string hints = 3; +} + +message EventMessage { + string type = 1; + string resource_name = 2; + string namespace = 3; + string reason = 4; + string message = 5; + string uid = 6; + map metadata = 7; + int64 timestamp_unix = 8; +} + +service ExternalEventSource { + rpc StreamEvents(WatchRequest) returns (stream EventMessage); +} diff --git a/examples/redis-event-source/proto/externalevents/v1/external_events_grpc.pb.go b/examples/redis-event-source/proto/externalevents/v1/external_events_grpc.pb.go new file mode 100644 index 0000000..4f1d1ac --- /dev/null +++ b/examples/redis-event-source/proto/externalevents/v1/external_events_grpc.pb.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.5.1 +// - protoc v5.29.3 +// source: api/proto/externalevents/v1/external_events.proto + +package externaleventsv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + ExternalEventSource_StreamEvents_FullMethodName = "/externalevents.v1.ExternalEventSource/StreamEvents" +) + +// ExternalEventSourceClient is the client API for ExternalEventSource service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ExternalEventSourceClient interface { + StreamEvents(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[EventMessage], error) +} + +type externalEventSourceClient struct { + cc grpc.ClientConnInterface +} + +func NewExternalEventSourceClient(cc grpc.ClientConnInterface) ExternalEventSourceClient { + return &externalEventSourceClient{cc} +} + +func (c *externalEventSourceClient) StreamEvents(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[EventMessage], error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + stream, err := c.cc.NewStream(ctx, &ExternalEventSource_ServiceDesc.Streams[0], ExternalEventSource_StreamEvents_FullMethodName, cOpts...) + if err != nil { + return nil, err + } + x := &grpc.GenericClientStream[WatchRequest, EventMessage]{ClientStream: stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExternalEventSource_StreamEventsClient = grpc.ServerStreamingClient[EventMessage] + +// ExternalEventSourceServer is the server API for ExternalEventSource service. +// All implementations must embed UnimplementedExternalEventSourceServer +// for forward compatibility. +type ExternalEventSourceServer interface { + StreamEvents(*WatchRequest, grpc.ServerStreamingServer[EventMessage]) error + mustEmbedUnimplementedExternalEventSourceServer() +} + +// UnimplementedExternalEventSourceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedExternalEventSourceServer struct{} + +func (UnimplementedExternalEventSourceServer) StreamEvents(*WatchRequest, grpc.ServerStreamingServer[EventMessage]) error { + return status.Errorf(codes.Unimplemented, "method StreamEvents not implemented") +} +func (UnimplementedExternalEventSourceServer) mustEmbedUnimplementedExternalEventSourceServer() {} +func (UnimplementedExternalEventSourceServer) testEmbeddedByValue() {} + +// UnsafeExternalEventSourceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ExternalEventSourceServer will +// result in compilation errors. +type UnsafeExternalEventSourceServer interface { + mustEmbedUnimplementedExternalEventSourceServer() +} + +func RegisterExternalEventSourceServer(s grpc.ServiceRegistrar, srv ExternalEventSourceServer) { + // If the following call pancis, it indicates UnimplementedExternalEventSourceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&ExternalEventSource_ServiceDesc, srv) +} + +func _ExternalEventSource_StreamEvents_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ExternalEventSourceServer).StreamEvents(m, &grpc.GenericServerStream[WatchRequest, EventMessage]{ServerStream: stream}) +} + +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type ExternalEventSource_StreamEventsServer = grpc.ServerStreamingServer[EventMessage] + +// ExternalEventSource_ServiceDesc is the grpc.ServiceDesc for ExternalEventSource service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ExternalEventSource_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "externalevents.v1.ExternalEventSource", + HandlerType: (*ExternalEventSourceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamEvents", + Handler: _ExternalEventSource_StreamEvents_Handler, + ServerStreams: true, + }, + }, + Metadata: "api/proto/externalevents/v1/external_events.proto", +} diff --git a/examples/redis-event-source/redis-event-source b/examples/redis-event-source/redis-event-source new file mode 100755 index 0000000..f765bf4 Binary files /dev/null and b/examples/redis-event-source/redis-event-source differ diff --git a/examples/redis-hook.yaml b/examples/redis-hook.yaml new file mode 100644 index 0000000..ba88c54 --- /dev/null +++ b/examples/redis-hook.yaml @@ -0,0 +1,13 @@ +apiVersion: kagent.dev/v1alpha2 +kind: Hook +metadata: + name: redis-external-hook + namespace: default +spec: + eventConfigurations: + - source: + name: redis-events + events: + - eventType: name-change + agentRef: { name: k8s-agent, namespace: kagent } + prompt: "This name was changed. What can be done?" diff --git a/examples/redis-jira/jira-agent.yaml b/examples/redis-jira/jira-agent.yaml new file mode 100644 index 0000000..f2c5a14 --- /dev/null +++ b/examples/redis-jira/jira-agent.yaml @@ -0,0 +1,31 @@ +apiVersion: kagent.dev/v1alpha2 +kind: Agent +metadata: + labels: + app.kubernetes.io/name: jira-agent + name: jira-agent + namespace: kagent +spec: + declarative: + modelConfig: default-model-config + systemMessage: | + # Jira AI Agent System Prompt + + You are JiraMaster, an advanced AI agent specialized in Jira ticketing and operations. You have deep expertise in Jira workflows. Your purpose is to help users open Jira tickets when create_issue ebents happen. And resolve Kubernetes-related issues when resolve-issue events happen. + # AUTONOMOUS MODE + You are being envoked by a non-human. Don't ask questions, just apply the best solution you can think of. + + tools: + - mcpServer: + apiGroup: kagent.dev + kind: RemoteMCPServer + name: atlassian + toolNames: + - jira_create_issue + - jira_batch_create_issues + - jira_delete_issue + - jira_get_issue + - jira_get_all_projects + type: McpServer + description: A Jira admin AU agent. + type: Declarative diff --git a/examples/redis-jira/jira-api-key b/examples/redis-jira/jira-api-key new file mode 100644 index 0000000..0043452 --- /dev/null +++ b/examples/redis-jira/jira-api-key @@ -0,0 +1 @@ +ATATT3xFfGF0C-VwMMbu4TVRz-5KwSAX1VL5zf3y5ggYzkZ4DxvHVSRtyqESVMny48WtGYp1Q7pri9N9h5py2o6aHTILSPey2Wil2iB1quP5JOat1ctrJXIXrQQ8OQ8FwLqgQsFrLzVUsFEblyuEu8QESx2OD8DPUtgkrbvpId4VVwLS_4swLng=0080638F \ No newline at end of file diff --git a/examples/redis-jira/kind-config.yaml b/examples/redis-jira/kind-config.yaml new file mode 100644 index 0000000..f76d19f --- /dev/null +++ b/examples/redis-jira/kind-config.yaml @@ -0,0 +1,4 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane diff --git a/examples/redis-jira/ollama-values.yaml b/examples/redis-jira/ollama-values.yaml new file mode 100644 index 0000000..b4abf10 --- /dev/null +++ b/examples/redis-jira/ollama-values.yaml @@ -0,0 +1,31 @@ +resources: + requests: + memory: "2Gi" + cpu: "1" + limits: + memory: "8Gi" + cpu: "4" + +runtimeClassName: "" + +nodeSelector: {} + +tolerations: [] + +affinity: {} + +# Enable GPU support for macOS (Metal Performance Shaders) +env: + - name: OLLAMA_HOST + value: "0.0.0.0:11434" + - name: OLLAMA_ORIGINS + value: "*" + +service: + type: ClusterIP + port: 11434 + +persistence: + enabled: true + size: 10Gi + storageClass: "" diff --git a/examples/redis-jira/redis-message.txt b/examples/redis-jira/redis-message.txt new file mode 100644 index 0000000..230a615 --- /dev/null +++ b/examples/redis-jira/redis-message.txt @@ -0,0 +1 @@ +PUBLISH "khook:events" '{"type":"create-issue","project":"Khook","message":"First issue from the cluster"}' diff --git a/examples/redis-jira/redisinsight.yaml b/examples/redis-jira/redisinsight.yaml new file mode 100644 index 0000000..ad6fb65 --- /dev/null +++ b/examples/redis-jira/redisinsight.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: redisinsight + namespace: redis + labels: + app: redisinsight +spec: + replicas: 1 + selector: + matchLabels: + app: redisinsight + template: + metadata: + labels: + app: redisinsight + spec: + containers: + - name: redisinsight + image: redis/redisinsight:latest + ports: + - containerPort: 5540 + name: http + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 100m + memory: 256Mi + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + emptyDir: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: redisinsight + namespace: redis + labels: + app: redisinsight +spec: + type: ClusterIP + ports: + - port: 5540 + targetPort: 5540 + protocol: TCP + name: http + selector: + app: redisinsight + diff --git a/go.mod b/go.mod index 595ce6b..6ec9f19 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,8 @@ require ( github.com/go-logr/logr v1.4.3 github.com/kagent-dev/kagent/go v0.0.0-20250827151700-a9cc8a1f7d57 github.com/stretchr/testify v1.10.0 + google.golang.org/grpc v1.73.0 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.34.0 k8s.io/apimachinery v0.34.0 @@ -81,7 +83,7 @@ require ( golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.12.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 3cfd1c1..0ac8c58 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,8 @@ github.com/glebarez/sqlite v1.11.0 h1:wSG0irqzP6VurnMEpFGer5Li19RpIRi2qvQz++w0GM github.com/glebarez/sqlite v1.11.0/go.mod h1:h8/o8j5wiAsqSPoWELDUdJXhjAhsVliSn7bWZjOhrgQ= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= @@ -44,6 +46,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= @@ -148,6 +152,18 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg= +go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E= +go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE= +go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs= +go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs= +go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w= +go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -206,6 +222,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= +google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/helm/khook-crds/crds/kagent.dev_externaleventsources.yaml b/helm/khook-crds/crds/kagent.dev_externaleventsources.yaml new file mode 100644 index 0000000..adf186c --- /dev/null +++ b/helm/khook-crds/crds/kagent.dev_externaleventsources.yaml @@ -0,0 +1,200 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: externaleventsources.kagent.dev +spec: + group: kagent.dev + names: + kind: ExternalEventSource + listKind: ExternalEventSourceList + plural: externaleventsources + singular: externaleventsource + scope: Namespaced + versions: + - name: v1alpha2 + schema: + openAPIV3Schema: + description: ExternalEventSource configures an external gRPC-based event source + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ExternalEventSourceSpec defines connection details for an + external gRPC provider + properties: + config: + description: ExternalConfig is an opaque JSON string passed-through + to the provider + properties: + json: + type: string + required: + - json + type: object + connection: + description: ExternalConnPolicy defines connection behavior + properties: + namespaceScope: + type: boolean + reconnectBackoff: + properties: + initialSeconds: + format: int32 + type: integer + maxSeconds: + format: int32 + type: integer + type: object + type: object + endpoint: + description: ExternalEndpoint contains address, TLS, and auth configuration + properties: + address: + type: string + auth: + description: ExternalAuth defines optional per-connection auth + properties: + secretRef: + description: SecretKeySelector references a key in a Secret + properties: + key: + type: string + name: + type: string + required: + - key + - name + type: object + type: + type: string + required: + - type + type: object + tls: + description: ExternalTLS holds TLS/mTLS settings, referencing + a single Secret with CA, cert, and key + properties: + enabled: + type: boolean + secretRef: + description: ExternalTLSSecretRef points to a Secret that + contains CA, client cert and key + properties: + caKey: + type: string + certKey: + type: string + keyKey: + type: string + name: + type: string + required: + - caKey + - certKey + - keyKey + - name + type: object + required: + - enabled + type: object + required: + - address + type: object + required: + - endpoint + type: object + status: + description: ExternalEventSourceStatus captures readiness and connection + info + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + lastConnectedTime: + format: date-time + type: string + type: object + type: object + served: true + storage: true + subresources: + status: {} + + + + + + + + diff --git a/helm/khook-crds/crds/kagent.dev_hooks.yaml b/helm/khook-crds/crds/kagent.dev_hooks.yaml index 0230cd0..e98b178 100644 --- a/helm/khook-crds/crds/kagent.dev_hooks.yaml +++ b/helm/khook-crds/crds/kagent.dev_hooks.yaml @@ -43,7 +43,9 @@ spec: description: EventConfigurations defines the list of event configurations to monitor items: - description: EventConfiguration defines a single event type configuration + description: |- + EventConfiguration defines either a single built-in event configuration + or a grouped source configuration with multiple events. properties: agentRef: description: AgentRef specifies the Kagent agent to call when @@ -64,8 +66,9 @@ spec: - name type: object eventType: - description: EventType specifies the type of Kubernetes event - to monitor + description: |- + Single built-in event form (Kubernetes events watcher) + EventType specifies the type of Kubernetes event to monitor enum: - pod-restart - pod-pending @@ -78,10 +81,64 @@ spec: the agent minLength: 1 type: string - required: - - agentRef - - eventType - - prompt + source: + description: Grouped source form (external or named source with + multiple events) + properties: + events: + description: Events is the list of event configurations + for this source + items: + description: SourceEvent describes a single event mapping + for a grouped source + properties: + agentRef: + description: AgentRef specifies the Kagent agent to + call when this event occurs + properties: + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + minLength: 1 + type: string + namespace: + description: |- + Namespace of the referent. + If unspecified, the namespace of the Hook will be used. + type: string + required: + - name + type: object + eventType: + description: EventType specifies the event type to + match + minLength: 1 + type: string + prompt: + description: Prompt specifies the prompt template + to send to the agent + minLength: 1 + type: string + required: + - agentRef + - eventType + - prompt + type: object + minItems: 1 + type: array + name: + description: Name of the ExternalEventSource + minLength: 1 + type: string + namespace: + description: Namespace of the ExternalEventSource. Defaults + to the Hook namespace if omitted. + type: string + required: + - events + - name + type: object type: object minItems: 1 type: array diff --git a/helm/khook/templates/rbac.yaml b/helm/khook/templates/rbac.yaml index 5643d59..7890a9e 100644 --- a/helm/khook/templates/rbac.yaml +++ b/helm/khook/templates/rbac.yaml @@ -33,6 +33,33 @@ rules: - get - patch - update +# ExternalEventSource CRD permissions +- apiGroups: + - kagent.dev + resources: + - externaleventsources + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - kagent.dev + resources: + - externaleventsources/finalizers + verbs: + - update +- apiGroups: + - kagent.dev + resources: + - externaleventsources/status + verbs: + - get + - patch + - update # Event watching permissions - apiGroups: - "" diff --git a/internal/deduplication/manager.go b/internal/deduplication/manager.go index fe57ae5..c5b4370 100644 --- a/internal/deduplication/manager.go +++ b/internal/deduplication/manager.go @@ -1,7 +1,10 @@ package deduplication import ( + "crypto/sha256" + "encoding/json" "fmt" + "sort" "sync" "time" @@ -38,14 +41,78 @@ func NewManager() *Manager { } } -// eventKey generates a unique key for an event based on type and resource +// eventKey generates a unique key for an event using content-based hashing +// Priority: +// 1. If UID is present, use Type:UID (explicit unique identification) +// 2. Otherwise use Type:Hash where Hash is a SHA256 of event content +// +// The hash includes: Type, ResourceName, Namespace, Reason, Message, and Metadata +// This ensures events with the same semantic content are deduplicated, +// regardless of the event source (Kubernetes, Redis, IoT, etc.) func (m *Manager) eventKey(event interfaces.Event) string { - return fmt.Sprintf("%s:%s:%s", event.Type, event.Namespace, event.ResourceName) + // Prefer UID if available for explicit unique identification + if event.UID != "" { + return fmt.Sprintf("%s:%s", event.Type, event.UID) + } + + // Generate content-based hash for semantic deduplication + hash := m.hashEventContent(event) + return fmt.Sprintf("%s:%s", event.Type, hash) +} + +// hashEventContent creates a stable hash of event content for deduplication +func (m *Manager) hashEventContent(event interfaces.Event) string { + // Create a stable representation of the event + // We hash: Type + ResourceName + Namespace + Reason + Message + Metadata + h := sha256.New() + + // Add string fields + h.Write([]byte(event.Type)) + h.Write([]byte("|")) + h.Write([]byte(event.ResourceName)) + h.Write([]byte("|")) + h.Write([]byte(event.Namespace)) + h.Write([]byte("|")) + h.Write([]byte(event.Reason)) + h.Write([]byte("|")) + h.Write([]byte(event.Message)) + h.Write([]byte("|")) + + // Add metadata in a stable way (sorted keys for deterministic hash) + if event.Metadata != nil { + // Sort keys for stable hash + keys := make([]string, 0, len(event.Metadata)) + for k := range event.Metadata { + keys = append(keys, k) + } + sort.Strings(keys) + + // Add each key-value pair to hash + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte("=")) + // Convert value to JSON for stable representation + if jsonBytes, err := json.Marshal(event.Metadata[k]); err == nil { + h.Write(jsonBytes) + } + h.Write([]byte(";")) + } + } + + // Return first 16 characters of hex hash (sufficient for uniqueness) + return fmt.Sprintf("%x", h.Sum(nil))[:16] } // ShouldProcessEvent determines if an event should be processed based on deduplication logic func (m *Manager) ShouldProcessEvent(hookRef types.NamespacedName, event interfaces.Event) bool { - logger := log.Log.WithName("dedup").WithValues("hook", hookRef.String(), "eventType", event.Type, "resource", event.ResourceName) + logValues := []interface{}{"hook", hookRef.String(), "eventType", event.Type} + if event.ResourceName != "" { + logValues = append(logValues, "resource", event.ResourceName) + } + if event.UID != "" { + logValues = append(logValues, "uid", event.UID) + } + logger := log.Log.WithName("dedup").WithValues(logValues...) m.mutex.RLock() defer m.mutex.RUnlock() @@ -85,7 +152,14 @@ func (m *Manager) ShouldProcessEvent(hookRef types.NamespacedName, event interfa // RecordEvent records an event in the deduplication storage func (m *Manager) RecordEvent(hookRef types.NamespacedName, event interfaces.Event) error { - logger := log.Log.WithName("dedup").WithValues("hook", hookRef.String(), "eventType", event.Type, "resource", event.ResourceName) + logValues := []interface{}{"hook", hookRef.String(), "eventType", event.Type} + if event.ResourceName != "" { + logValues = append(logValues, "resource", event.ResourceName) + } + if event.UID != "" { + logValues = append(logValues, "uid", event.UID) + } + logger := log.Log.WithName("dedup").WithValues(logValues...) m.mutex.Lock() defer m.mutex.Unlock() diff --git a/internal/deduplication/manager_test.go b/internal/deduplication/manager_test.go index 437ea88..ae23fc9 100644 --- a/internal/deduplication/manager_test.go +++ b/internal/deduplication/manager_test.go @@ -18,19 +18,127 @@ func TestNewManager(t *testing.T) { assert.Equal(t, 0, len(manager.hookEvents)) } +func TestHashEventContent(t *testing.T) { + manager := NewManager() + + tests := []struct { + name string + event1 interfaces.Event + event2 interfaces.Event + shouldMatch bool + }{ + { + name: "identical events should have same hash", + event1: interfaces.Event{ + Type: "order-placed", + Message: "New order", + Metadata: map[string]interface{}{ + "orderId": "123", + "amount": 99.99, + }, + }, + event2: interfaces.Event{ + Type: "order-placed", + Message: "New order", + Metadata: map[string]interface{}{ + "orderId": "123", + "amount": 99.99, + }, + }, + shouldMatch: true, + }, + { + name: "different metadata values should have different hash", + event1: interfaces.Event{ + Type: "order-placed", + Message: "New order", + Metadata: map[string]interface{}{ + "orderId": "123", + }, + }, + event2: interfaces.Event{ + Type: "order-placed", + Message: "New order", + Metadata: map[string]interface{}{ + "orderId": "124", + }, + }, + shouldMatch: false, + }, + { + name: "metadata key order shouldn't matter", + event1: interfaces.Event{ + Type: "test", + Metadata: map[string]interface{}{ + "a": "value1", + "b": "value2", + }, + }, + event2: interfaces.Event{ + Type: "test", + Metadata: map[string]interface{}{ + "b": "value2", + "a": "value1", + }, + }, + shouldMatch: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hash1 := manager.hashEventContent(tt.event1) + hash2 := manager.hashEventContent(tt.event2) + + if tt.shouldMatch { + assert.Equal(t, hash1, hash2, "Hashes should match") + } else { + assert.NotEqual(t, hash1, hash2, "Hashes should differ") + } + }) + } +} + func TestEventKey(t *testing.T) { manager := NewManager() - event := interfaces.Event{ - Type: "pod-restart", - ResourceName: "test-pod", - Namespace: "default", - Timestamp: time.Now(), + tests := []struct { + name string + event interfaces.Event + expected string + }{ + { + name: "event with UID uses UID", + event: interfaces.Event{ + Type: "pod-restart", + UID: "unique-id-123", + }, + expected: "pod-restart:unique-id-123", + }, + { + name: "event without UID uses content hash", + event: interfaces.Event{ + Type: "pod-restart", + ResourceName: "test-pod", + Namespace: "default", + }, + // Hash will be generated, just verify format + expected: "", // Will check format instead + }, } - key := manager.eventKey(event) - expected := "pod-restart:default:test-pod" - assert.Equal(t, expected, key) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key := manager.eventKey(tt.event) + if tt.expected != "" { + assert.Equal(t, tt.expected, key) + } else { + // Verify hash format: type:hash + assert.Contains(t, key, tt.event.Type+":") + assert.Len(t, key, len(tt.event.Type)+1+16) // type + : + 16-char hash + } + }) + } } func TestShouldProcessEvent_NewEvent(t *testing.T) { diff --git a/internal/event/external_grpc_source.go b/internal/event/external_grpc_source.go new file mode 100644 index 0000000..5383573 --- /dev/null +++ b/internal/event/external_grpc_source.go @@ -0,0 +1,173 @@ +package event + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "time" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/log" + + v1alpha2 "github.com/kagent-dev/khook/api/v1alpha2" + "github.com/kagent-dev/khook/internal/interfaces" +) + +// ExternalGRPCEventSource streams events from an external provider via gRPC. +// NOTE: The actual gRPC client code is intentionally abstracted behind a dialer +// to avoid adding heavy deps until wired up. +type ExternalGRPCEventSource struct { + name string + namespace string + spec v1alpha2.ExternalEventSourceSpec + outCh chan interfaces.Event + cancel context.CancelFunc + logger logr.Logger + dialer GRPCDialer +} + +type GRPCDialer interface { + // Stream connects and streams events until EOF or context cancel. + // config is an opaque JSON string containing provider-specific configuration. + Stream(ctx context.Context, address string, tlsConfig *tls.Config, authHeader string, config string, onEvent func(e interfaces.Event)) error +} + +func NewExternalGRPCEventSource(name, namespace string, spec v1alpha2.ExternalEventSourceSpec, dialer GRPCDialer) *ExternalGRPCEventSource { + return &ExternalGRPCEventSource{ + name: name, + namespace: namespace, + spec: spec, + outCh: make(chan interfaces.Event, 256), + logger: log.Log.WithName("external-grpc-source").WithValues("name", name, "namespace", namespace), + dialer: dialer, + } +} + +func (s *ExternalGRPCEventSource) Name() string { return fmt.Sprintf("external-%s", s.name) } +func (s *ExternalGRPCEventSource) Events() <-chan interfaces.Event { return s.outCh } + +func (s *ExternalGRPCEventSource) Start(ctx context.Context) error { + if s.cancel != nil { + return nil + } + ctxRun, cancel := context.WithCancel(ctx) + s.cancel = cancel + if s.dialer == nil { + s.logger.Info("No gRPC dialer configured; external source disabled", "source", s.name) + go func() { + <-ctxRun.Done() + close(s.outCh) + }() + return nil + } + go s.run(ctxRun) + return nil +} + +func (s *ExternalGRPCEventSource) Stop() error { + if s.cancel != nil { + s.cancel() + s.cancel = nil + } + return nil +} + +func (s *ExternalGRPCEventSource) run(ctx context.Context) { + defer close(s.outCh) + + backoff := time.Second * 2 + maxBackoff := time.Second * 60 + if s.spec.Connection != nil && s.spec.Connection.ReconnectBackoff != nil { + if s.spec.Connection.ReconnectBackoff.InitialSeconds > 0 { + backoff = time.Duration(s.spec.Connection.ReconnectBackoff.InitialSeconds) * time.Second + } + if s.spec.Connection.ReconnectBackoff.MaxSeconds > 0 { + maxBackoff = time.Duration(s.spec.Connection.ReconnectBackoff.MaxSeconds) * time.Second + } + } + + for { + if err := s.connectAndStream(ctx); err != nil { + if err == io.EOF || ctx.Err() != nil { + return + } + s.logger.Error(err, "stream error, will reconnect") + } + // backoff + select { + case <-ctx.Done(): + return + case <-time.After(backoff): + } + // exponential growth up to max + backoff *= 2 + if backoff > maxBackoff { + backoff = maxBackoff + } + } +} + +func (s *ExternalGRPCEventSource) connectAndStream(ctx context.Context) error { + // Defensive nil checks + if s == nil { + panic("ExternalGRPCEventSource is nil - this should never happen") + } + if s.dialer == nil { + return fmt.Errorf("dialer is nil") + } + if s.spec.Endpoint.Address == "" { + return fmt.Errorf("endpoint address is empty") + } + + tlsConfig, err := s.buildTLS() + if err != nil { + return err + } + authHeader := s.buildAuth() + + // Extract config JSON from the ExternalEventSource spec + var configJSON string + if s.spec.Config != nil { + configJSON = s.spec.Config.JSON + } + + return s.dialer.Stream(ctx, s.spec.Endpoint.Address, tlsConfig, authHeader, configJSON, func(e interfaces.Event) { + select { + case s.outCh <- e: + case <-ctx.Done(): + } + }) +} + +func (s *ExternalGRPCEventSource) buildTLS() (*tls.Config, error) { + if s.spec.Endpoint.TLS == nil || !s.spec.Endpoint.TLS.Enabled { + return nil, nil + } + // Controller will provide CA/cert/key via mounted secrets; wiring omitted here. + if s.spec.Endpoint.TLS.SecretRef == nil { + return nil, fmt.Errorf("tls.enabled is true but secretRef is nil") + } + // Placeholder: empty pool/cert to satisfy signature; real impl will load from filesystem/secret. + pool := x509.NewCertPool() + _ = pool + return &tls.Config{MinVersion: tls.VersionTLS12}, nil +} + +func (s *ExternalGRPCEventSource) buildAuth() string { + if s.spec.Endpoint.Auth == nil { + return "" + } + switch s.spec.Endpoint.Auth.Type { + case "BearerToken": + if s.spec.Endpoint.Auth.SecretRef != nil { + return "Bearer " // actual token retrieval omitted + } + case "Basic": + if s.spec.Endpoint.Auth.SecretRef != nil { + return "Basic " + } + } + return "" +} diff --git a/internal/event/external_grpc_source_test.go b/internal/event/external_grpc_source_test.go new file mode 100644 index 0000000..2b55cd0 --- /dev/null +++ b/internal/event/external_grpc_source_test.go @@ -0,0 +1,58 @@ +package event + +import ( + "context" + "crypto/tls" + "io" + "testing" + "time" + + v1alpha2 "github.com/kagent-dev/khook/api/v1alpha2" + "github.com/kagent-dev/khook/internal/interfaces" +) + +type fakeDialer struct { + events []interfaces.Event +} + +func (f *fakeDialer) Stream(ctx context.Context, address string, _ *tls.Config, _ string, _ string, onEvent func(e interfaces.Event)) error { + for _, e := range f.events { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + onEvent(e) + } + // simulate end of stream + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(10 * time.Millisecond): + return io.EOF + } +} + +func TestExternalGRPCEventSourceStreams(t *testing.T) { + spec := v1alpha2.ExternalEventSourceSpec{Endpoint: v1alpha2.ExternalEndpoint{Address: "example:1234"}} + events := []interfaces.Event{{Type: "pod-restart"}, {Type: "probe-failed"}} + src := NewExternalGRPCEventSource("ext", "default", spec, &fakeDialer{events: events}) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + if err := src.Start(ctx); err != nil { + t.Fatalf("start error: %v", err) + } + got := []interfaces.Event{} + for i := 0; i < 2; i++ { + select { + case e := <-src.Events(): + got = append(got, e) + case <-time.After(time.Second): + t.Fatal("timeout") + } + } + if got[0].Type == got[1].Type { + t.Fatalf("expected distinct events") + } +} diff --git a/internal/event/grpc_client.go b/internal/event/grpc_client.go new file mode 100644 index 0000000..593a57e --- /dev/null +++ b/internal/event/grpc_client.go @@ -0,0 +1,99 @@ +package event + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + pb "github.com/kagent-dev/khook/api/proto/externalevents/v1" + "github.com/kagent-dev/khook/internal/interfaces" +) + +// GRPCClient implements the GRPCDialer interface by connecting to an external +// gRPC event source server that implements the ExternalEventSource service. +type GRPCClient struct{} + +func (c *GRPCClient) Stream(ctx context.Context, address string, tlsConfig *tls.Config, authHeader string, config string, onEvent func(e interfaces.Event)) error { + if address == "" { + return fmt.Errorf("grpc client: empty address") + } + + // Prepare dial options + var opts []grpc.DialOption + + if tlsConfig != nil { + // Use provided TLS config + opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))) + } else { + // Use insecure connection + opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } + + // TODO: Add auth header as gRPC metadata if provided + // if authHeader != "" { + // opts = append(opts, grpc.WithPerRPCCredentials(customAuth{authHeader})) + // } + + // Connect to the gRPC server + conn, err := grpc.NewClient(address, opts...) + if err != nil { + return fmt.Errorf("grpc client: failed to dial: %w", err) + } + defer conn.Close() + + // Create client + client := pb.NewExternalEventSourceClient(conn) + + // Create watch request + req := &pb.WatchRequest{ + ConfigJson: config, + // Hints could be populated from hook event types if needed + Hints: []string{}, + } + + // Start streaming + stream, err := client.StreamEvents(ctx, req) + if err != nil { + return fmt.Errorf("grpc client: failed to stream events: %w", err) + } + + // Receive events + for { + event, err := stream.Recv() + if err != nil { + if err == io.EOF { + return nil + } + return fmt.Errorf("grpc client: stream error: %w", err) + } + + // Convert protobuf event to internal event + ts := time.Now() + if event.TimestampUnix != 0 { + ts = time.Unix(event.TimestampUnix, 0) + } + + // Convert metadata from map[string]string to map[string]interface{} + metadata := make(map[string]interface{}) + for k, v := range event.Metadata { + metadata[k] = v + } + + onEvent(interfaces.Event{ + Type: event.Type, + ResourceName: event.ResourceName, + Namespace: event.Namespace, + Reason: event.Reason, + Message: event.Message, + UID: event.Uid, + Metadata: metadata, + Timestamp: ts, + }) + } +} diff --git a/internal/event/multiplexer.go b/internal/event/multiplexer.go new file mode 100644 index 0000000..ad7517c --- /dev/null +++ b/internal/event/multiplexer.go @@ -0,0 +1,98 @@ +package event + +import ( + "context" + "sync" + + "github.com/go-logr/logr" + "sigs.k8s.io/controller-runtime/pkg/log" + + v1alpha2 "github.com/kagent-dev/khook/api/v1alpha2" + "github.com/kagent-dev/khook/internal/interfaces" +) + +// Multiplexer aggregates multiple EventSources and exposes them as a single +// EventWatcher-compatible stream. Filtering remains in the processor. +type Multiplexer struct { + sources []interfaces.EventSource + outCh chan interfaces.Event + cancel context.CancelFunc + logger logr.Logger +} + +func NewMultiplexer(sources []interfaces.EventSource) *Multiplexer { + return &Multiplexer{ + sources: sources, + outCh: make(chan interfaces.Event, 256), + logger: log.Log.WithName("event-multiplexer"), + } +} + +// Start all sources and begin fan-in. +func (m *Multiplexer) Start(ctx context.Context) error { + if m.cancel != nil { + return nil + } + ctxRun, cancel := context.WithCancel(ctx) + m.cancel = cancel + + var wg sync.WaitGroup + wg.Add(len(m.sources)) + for _, src := range m.sources { + s := src + go func() { + defer wg.Done() + if err := s.Start(ctxRun); err != nil { + m.logger.Error(err, "failed to start event source", "source", s.Name()) + return + } + for { + select { + case <-ctxRun.Done(): + return + case ev, ok := <-s.Events(): + if !ok { + return + } + select { + case m.outCh <- ev: + case <-ctxRun.Done(): + return + } + } + } + }() + } + + // Close outCh when all readers exit + go func() { + wg.Wait() + close(m.outCh) + }() + return nil +} + +// Stop all sources. +func (m *Multiplexer) Stop() error { + if m.cancel != nil { + m.cancel() + m.cancel = nil + } + for _, s := range m.sources { + _ = s.Stop() + } + return nil +} + +// WatchEvents exposes the multiplexed channel. Compatible with EventWatcher. +func (m *Multiplexer) WatchEvents(ctx context.Context) (<-chan interfaces.Event, error) { + if err := m.Start(ctx); err != nil { + return nil, err + } + return m.outCh, nil +} + +// FilterEvent is a passthrough; filtering is handled in the processor. +func (m *Multiplexer) FilterEvent(event interfaces.Event, hooks []*v1alpha2.Hook) []interfaces.EventMatch { + return nil +} diff --git a/internal/event/multiplexer_test.go b/internal/event/multiplexer_test.go new file mode 100644 index 0000000..aa58520 --- /dev/null +++ b/internal/event/multiplexer_test.go @@ -0,0 +1,47 @@ +package event + +import ( + "context" + "testing" + "time" + + "github.com/kagent-dev/khook/internal/interfaces" +) + +type stubSource struct { + name string + ch chan interfaces.Event +} + +func (s *stubSource) Start(ctx context.Context) error { return nil } +func (s *stubSource) Events() <-chan interfaces.Event { return s.ch } +func (s *stubSource) Stop() error { close(s.ch); return nil } +func (s *stubSource) Name() string { return s.name } + +func TestMultiplexerFanIn(t *testing.T) { + s1 := &stubSource{name: "s1", ch: make(chan interfaces.Event, 1)} + s2 := &stubSource{name: "s2", ch: make(chan interfaces.Event, 1)} + mux := NewMultiplexer([]interfaces.EventSource{s1, s2}) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + out, err := mux.WatchEvents(ctx) + if err != nil { + t.Fatalf("WatchEvents error: %v", err) + } + + e1 := interfaces.Event{Type: "pod-restart", ResourceName: "a"} + e2 := interfaces.Event{Type: "probe-failed", ResourceName: "b"} + s1.ch <- e1 + s2.ch <- e2 + + got1 := <-out + got2 := <-out + if got1.Type == got2.Type && got1.ResourceName == got2.ResourceName { + t.Fatalf("expected two distinct events") + } + + // ensure shutdown OK + cancel() + time.Sleep(50 * time.Millisecond) +} diff --git a/internal/event/watcher.go b/internal/event/watcher.go index 457bd14..b40f091 100644 --- a/internal/event/watcher.go +++ b/internal/event/watcher.go @@ -18,7 +18,7 @@ import ( "github.com/kagent-dev/khook/internal/interfaces" ) -// Watcher implements the EventWatcher interface +// Watcher implements both EventWatcher and EventSource interfaces type Watcher struct { client kubernetes.Interface namespace string @@ -27,7 +27,7 @@ type Watcher struct { eventCh chan interfaces.Event } -// NewWatcher creates a new EventWatcher instance +// NewWatcher creates a new EventWatcher instance (legacy path) func NewWatcher(client kubernetes.Interface, namespace string) interfaces.EventWatcher { // Validate inputs if client == nil { @@ -62,6 +62,11 @@ func NewWatcher(client kubernetes.Interface, namespace string) interfaces.EventW } } +// NewKubernetesSource creates a built-in Kubernetes EventSource +func NewKubernetesSource(client kubernetes.Interface, namespace string) interfaces.EventSource { + return NewWatcher(client, namespace).(*Watcher) +} + // Start begins the event watching process func (w *Watcher) Start(ctx context.Context) error { w.logger.Info("Starting event watcher", "namespace", w.namespace) @@ -174,6 +179,14 @@ func (w *Watcher) Stop() error { return nil } +// Events returns the internal event channel (EventSource contract) +func (w *Watcher) Events() <-chan interfaces.Event { + return w.eventCh +} + +// Name returns a stable source identifier (EventSource contract) +func (w *Watcher) Name() string { return "kubernetes-events" } + // WatchEvents returns a channel of all events (filtering is done by the processor) func (w *Watcher) WatchEvents(ctx context.Context) (<-chan interfaces.Event, error) { if err := w.Start(ctx); err != nil { @@ -229,7 +242,7 @@ func (w *Watcher) mapKubernetesEvent(k8sEvent *eventsv1.Event) *interfaces.Event Reason: k8sEvent.Reason, Message: k8sEvent.Note, UID: string(k8sEvent.UID), - Metadata: map[string]string{ + Metadata: map[string]interface{}{ "kind": k8sEvent.Regarding.Kind, "apiVersion": k8sEvent.Regarding.APIVersion, "count": count, @@ -332,7 +345,7 @@ func (w *Watcher) mapNodeEventType(k8sEvent *eventsv1.Event) string { // Node not ready events case reason == "nodenotready": return "node-not-ready" - + default: // Log unknown node events for future enhancement w.logger.V(1).Info("Unknown node event", "reason", reason, "type", eventType, "message", message) diff --git a/internal/interfaces/controller.go b/internal/interfaces/controller.go index 8e5f950..8a791ff 100644 --- a/internal/interfaces/controller.go +++ b/internal/interfaces/controller.go @@ -17,16 +17,16 @@ type ControllerManager interface { RemoveHookWatch(hookRef types.NamespacedName) error } -// Event represents a Kubernetes event with relevant metadata +// Event represents an event from any source with relevant metadata type Event struct { - Type string `json:"type"` - ResourceName string `json:"resourceName"` - Timestamp time.Time `json:"timestamp"` - Namespace string `json:"namespace"` - Reason string `json:"reason"` - Message string `json:"message"` - UID string `json:"uid"` - Metadata map[string]string `json:"metadata,omitempty"` + Type string `json:"type"` + ResourceName string `json:"resourceName"` // Optional: may be empty for non-resource events + Timestamp time.Time `json:"timestamp"` + Namespace string `json:"namespace"` // Optional: may be empty for non-Kubernetes events + Reason string `json:"reason"` // Optional: may be empty + Message string `json:"message"` + UID string `json:"uid"` // Optional: may be empty + Metadata map[string]interface{} `json:"metadata,omitempty"` // Flexible metadata for any event source } // EventMatch represents a matched event with its corresponding hook configuration @@ -69,7 +69,7 @@ type KagentClient interface { // ActiveEvent represents an event that is currently being tracked type ActiveEvent struct { EventType string `json:"eventType"` - ResourceName string `json:"resourceName"` + ResourceName string `json:"resourceName"` // Optional: may be empty for non-resource events FirstSeen time.Time `json:"firstSeen"` LastSeen time.Time `json:"lastSeen"` Status string `json:"status"` diff --git a/internal/interfaces/eventsources.go b/internal/interfaces/eventsources.go new file mode 100644 index 0000000..c38b10c --- /dev/null +++ b/internal/interfaces/eventsources.go @@ -0,0 +1,15 @@ +package interfaces + +import ( + "context" +) + +// EventSource abstracts a concrete provider of events (built-in or external). +// Implementations should be safe to Start once and Stop once; Events() must +// return a stable channel for the lifetime of the source. +type EventSource interface { + Start(ctx context.Context) error + Events() <-chan Event + Stop() error + Name() string +} diff --git a/internal/pipeline/integration_test.go b/internal/pipeline/integration_test.go index c843697..adce9ef 100644 --- a/internal/pipeline/integration_test.go +++ b/internal/pipeline/integration_test.go @@ -141,7 +141,7 @@ func TestEventProcessingIntegration(t *testing.T) { Reason: "BackOff", Message: "Container failed to start", UID: "test-uid-1", - Metadata: map[string]string{ + Metadata: map[string]interface{}{ "kind": "Pod", }, } @@ -183,7 +183,7 @@ func TestEventProcessingIntegration(t *testing.T) { t.Run("ProcessDuplicateEvent", func(t *testing.T) { mockKagentClient.ClearCalls() - // Same event as before - should be deduplicated + // Same event as before - should be deduplicated (same UID) event := interfaces.Event{ Type: "pod-restart", ResourceName: "test-pod-1", @@ -191,7 +191,7 @@ func TestEventProcessingIntegration(t *testing.T) { Timestamp: time.Now(), Reason: "BackOff", Message: "Container failed to start again", - UID: "test-uid-1-duplicate", + UID: "test-uid-1", // Same UID as original event for deduplication } err := processor.ProcessEvent(ctx, event, hooks) @@ -214,7 +214,7 @@ func TestEventProcessingIntegration(t *testing.T) { Reason: "OOMKilling", Message: "Memory limit exceeded", UID: "test-uid-2", - Metadata: map[string]string{ + Metadata: map[string]interface{}{ "kind": "Pod", }, } @@ -414,7 +414,31 @@ func TestPromptTemplateExpansion(t *testing.T) { event: interfaces.Event{ Type: "pod-restart", }, - expected: "Known: pod-restart, Unknown: {{.UnknownField}}", + expected: "Known: pod-restart, Unknown: ", + }, + { + name: "Template with metadata access", + template: "Issue: {{.Metadata.issue}}, Project: {{.Metadata.project}}", + event: interfaces.Event{ + Type: "resolve-issue", + Metadata: map[string]interface{}{ + "issue": "ECS-11", + "project": "KhookProject", + }, + }, + expected: "Issue: ECS-11, Project: KhookProject", + }, + { + name: "Template with nested metadata", + template: "Order {{.Metadata.orderId}} total: ${{.Metadata.amount}}", + event: interfaces.Event{ + Type: "order-placed", + Metadata: map[string]interface{}{ + "orderId": "ORD-123", + "amount": 99.99, + }, + }, + expected: "Order ORD-123 total: $99.99", }, } diff --git a/internal/pipeline/processor.go b/internal/pipeline/processor.go index 288962e..4b8d7c6 100644 --- a/internal/pipeline/processor.go +++ b/internal/pipeline/processor.go @@ -49,6 +49,14 @@ func (p *Processor) ProcessEvent(ctx context.Context, event interfaces.Event, ho "namespace", event.Namespace, "hookCount", len(hooks)) + // Validate event has required fields + if event.Type == "" { + p.logger.V(1).Info("Ignoring event with empty type", + "metadata", event.Metadata, + "message", event.Message) + return nil + } + // Find matching hooks and configurations for this event matches := p.findEventMatches(event, hooks) if len(matches) == 0 { @@ -94,12 +102,31 @@ func (p *Processor) findEventMatches(event interfaces.Event, hooks []*v1alpha2.H for _, hook := range hooks { for _, config := range hook.Spec.EventConfigurations { + // Single-event form if config.EventType == event.Type { matches = append(matches, EventMatch{ Hook: hook, Configuration: config, Event: event, }) + continue + } + // Grouped source form + if config.Source != nil { + for _, se := range config.Source.Events { + if se.EventType == event.Type { + cfg := v1alpha2.EventConfiguration{ + EventType: se.EventType, + AgentRef: se.AgentRef, + Prompt: se.Prompt, + } + matches = append(matches, EventMatch{ + Hook: hook, + Configuration: cfg, + Event: event, + }) + } + } } } } @@ -114,6 +141,23 @@ func (p *Processor) processEventMatch(ctx context.Context, match EventMatch) err Name: match.Hook.Name, } + // Validate agent reference + if match.Configuration.AgentRef.Name == "" { + p.logger.Error(fmt.Errorf("invalid configuration"), "Agent reference name is empty", + "hook", hookRef, + "eventType", match.Event.Type) + return fmt.Errorf("agent reference name is empty for hook %s", hookRef) + } + + // Validate prompt is not empty + if match.Configuration.Prompt == "" { + p.logger.Error(fmt.Errorf("invalid configuration"), "Prompt is empty", + "hook", hookRef, + "eventType", match.Event.Type, + "agentRef", match.Configuration.AgentRef.Name) + return fmt.Errorf("prompt is empty for hook %s, event %s", hookRef, match.Event.Type) + } + // Check deduplication - should we process this event? if !p.deduplicationManager.ShouldProcessEvent(hookRef, match.Event) { p.logger.V(1).Info("Event ignored due to deduplication", @@ -185,21 +229,39 @@ func (p *Processor) createAgentRequest(match EventMatch, agentRef types.Namespac // Expand prompt template with event context prompt := p.expandPromptTemplate(match.Configuration.Prompt, match.Event) + p.logger.V(1).Info("Expanded prompt template", + "eventType", match.Event.Type, + "originalPrompt", match.Configuration.Prompt, + "expandedPrompt", prompt, + "metadata", match.Event.Metadata) + + // Build context with all event data and metadata + context := map[string]interface{}{ + "namespace": match.Event.Namespace, + "reason": match.Event.Reason, + "message": match.Event.Message, + "uid": match.Event.UID, + "hookName": match.Hook.Name, + "hookNamespace": match.Hook.Namespace, + } + + // Include all metadata fields at the top level for easy agent access + if match.Event.Metadata != nil { + for k, v := range match.Event.Metadata { + context[k] = v + } + } + + // Also include the metadata map itself for structured access + context["metadata"] = match.Event.Metadata + return interfaces.AgentRequest{ AgentRef: agentRef, Prompt: prompt, EventName: match.Event.Type, EventTime: match.Event.Timestamp, ResourceName: match.Event.ResourceName, - Context: map[string]interface{}{ - "namespace": match.Event.Namespace, - "reason": match.Event.Reason, - "message": match.Event.Message, - "uid": match.Event.UID, - "metadata": match.Event.Metadata, - "hookName": match.Hook.Name, - "hookNamespace": match.Hook.Namespace, - }, + Context: context, } } @@ -214,19 +276,11 @@ func (p *Processor) expandPromptTemplate(templateStr string, event interfaces.Ev } // First, try to expand known placeholders using the original manual method - // This ensures backward compatibility for unknown placeholders + // This ensures simple placeholders are handled quickly result := p.expandKnownPlaceholders(templateStr, event) - // Check if there are still unexpanded template placeholders - // If so, skip text/template processing to maintain backward compatibility - if strings.Contains(result, "{{") && strings.Contains(result, "}}") { - p.logger.V(2).Info("Template contains unknown placeholders, skipping advanced processing", - "template", result) - return result - } - - // Then try to use text/template for more advanced templating - // This allows for complex template expressions while maintaining backward compatibility + // Always try text/template expansion for advanced features like metadata access + // The text/template engine will handle both simple and complex templates result = p.expandWithTextTemplate(result, event) return result @@ -297,7 +351,9 @@ func (p *Processor) expandKnownPlaceholders(template string, event interfaces.Ev // expandWithTextTemplate attempts to use text/template for advanced features func (p *Processor) expandWithTextTemplate(templateStr string, event interfaces.Event) string { // Create template data for advanced templating + // Make all event fields available at the top level for easy access templateData := map[string]interface{}{ + // Legacy/convenience fields (backward compatible) "EventType": event.Type, "ResourceName": event.ResourceName, "Namespace": event.Namespace, @@ -306,7 +362,14 @@ func (p *Processor) expandWithTextTemplate(templateStr string, event interfaces. "Timestamp": event.Timestamp.Format(time.RFC3339), "EventTime": event.Timestamp.Format(time.RFC3339), "EventMessage": event.Message, - "Event": event, // Full event access for advanced templating + + // Direct access to event fields + "Type": event.Type, + "UID": event.UID, + "Metadata": event.Metadata, // Full metadata map for custom fields + + // Full event access for advanced templating + "Event": event, } // Try to parse and execute the template diff --git a/internal/pipeline/processor_test.go b/internal/pipeline/processor_test.go index 743488d..0e911ad 100644 --- a/internal/pipeline/processor_test.go +++ b/internal/pipeline/processor_test.go @@ -167,7 +167,7 @@ func createTestEvent(eventType, resourceName, namespace string) interfaces.Event Reason: "TestReason", Message: "Test message", UID: "test-uid", - Metadata: map[string]string{ + Metadata: map[string]interface{}{ "kind": "Pod", }, } diff --git a/internal/status/manager.go b/internal/status/manager.go index 9f3aed5..5d9c9c0 100644 --- a/internal/status/manager.go +++ b/internal/status/manager.go @@ -52,38 +52,79 @@ func (m *Manager) UpdateHookStatus(ctx context.Context, hook *v1alpha2.Hook, act } } - // Update the hook status - hook.Status.ActiveEvents = statusEvents - hook.Status.LastUpdated = metav1.NewTime(time.Now()) + // Retry loop to handle resource version conflicts + maxRetries := 3 + for attempt := 0; attempt < maxRetries; attempt++ { + // Fetch the latest version of the hook to get the current resource version + latestHook := &v1alpha2.Hook{} + key := client.ObjectKey{Name: hook.Name, Namespace: hook.Namespace} + if err := m.client.Get(ctx, key, latestHook); err != nil { + m.logger.Error(err, "Failed to get latest hook version", + "hook", hook.Name, + "namespace", hook.Namespace, + "attempt", attempt+1) + return fmt.Errorf("failed to get latest hook version: %w", err) + } + + // Update the status on the latest version + latestHook.Status.ActiveEvents = statusEvents + latestHook.Status.LastUpdated = metav1.NewTime(time.Now()) + + // Attempt to update the status + if err := m.client.Status().Update(ctx, latestHook); err != nil { + // Check if this is a conflict error + if client.IgnoreNotFound(err) != nil && attempt < maxRetries-1 { + m.logger.V(1).Info("Hook status update conflict, retrying", + "hook", hook.Name, + "namespace", hook.Namespace, + "attempt", attempt+1, + "error", err.Error()) + // Brief backoff before retry + time.Sleep(time.Millisecond * 100 * time.Duration(attempt+1)) + continue + } + m.logger.Error(err, "Failed to update hook status", + "hook", hook.Name, + "namespace", hook.Namespace, + "attempt", attempt+1) + return fmt.Errorf("failed to update hook status after %d attempts: %w", attempt+1, err) + } - if err := m.client.Status().Update(ctx, hook); err != nil { - m.logger.Error(err, "Failed to update hook status", + // Success + m.logger.Info("Successfully updated hook status", "hook", hook.Name, - "namespace", hook.Namespace) - return fmt.Errorf("failed to update hook status: %w", err) + "namespace", hook.Namespace, + "lastUpdated", latestHook.Status.LastUpdated.Time, + "attempt", attempt+1) + return nil } - m.logger.Info("Successfully updated hook status", - "hook", hook.Name, - "namespace", hook.Namespace, - "lastUpdated", hook.Status.LastUpdated.Time) - - return nil + return fmt.Errorf("failed to update hook status after %d attempts", maxRetries) } // RecordEventFiring records that an event has started firing func (m *Manager) RecordEventFiring(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName) error { - m.logger.Info("Recording event firing", + logValues := []interface{}{ "hook", hook.Name, "namespace", hook.Namespace, "eventType", event.Type, - "resourceName", event.ResourceName, - "agentRef", agentRef) + "agentRef", agentRef, + } + if event.ResourceName != "" { + logValues = append(logValues, "resourceName", event.ResourceName) + } + if event.UID != "" { + logValues = append(logValues, "uid", event.UID) + } + m.logger.Info("Recording event firing", logValues...) // Emit Kubernetes event for audit trail - m.recorder.Event(hook, corev1.EventTypeNormal, "EventFiring", - fmt.Sprintf("Event %s fired for resource %s, calling agent %s", - event.Type, event.ResourceName, agentRef.Name)) + eventMsg := fmt.Sprintf("Event %s fired, calling agent %s", event.Type, agentRef.Name) + if event.ResourceName != "" { + eventMsg = fmt.Sprintf("Event %s fired for resource %s, calling agent %s", + event.Type, event.ResourceName, agentRef.Name) + } + m.recorder.Event(hook, corev1.EventTypeNormal, "EventFiring", eventMsg) return nil } @@ -106,69 +147,100 @@ func (m *Manager) RecordEventResolved(ctx context.Context, hook *v1alpha2.Hook, // RecordError records an error that occurred during event processing func (m *Manager) RecordError(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, err error, agentRef types.NamespacedName) error { - m.logger.Error(err, "Recording event processing error", + logValues := []interface{}{ "hook", hook.Name, "namespace", hook.Namespace, "eventType", event.Type, - "resourceName", event.ResourceName, - "agentRef", agentRef) + "agentRef", agentRef, + } + if event.ResourceName != "" { + logValues = append(logValues, "resourceName", event.ResourceName) + } + m.logger.Error(err, "Recording event processing error", logValues...) // Emit Kubernetes event for error tracking - m.recorder.Event(hook, corev1.EventTypeWarning, "EventProcessingError", - fmt.Sprintf("Failed to process event %s for resource %s with agent %s: %v", - event.Type, event.ResourceName, agentRef.Name, err)) + eventMsg := fmt.Sprintf("Failed to process event %s with agent %s: %v", + event.Type, agentRef.Name, err) + if event.ResourceName != "" { + eventMsg = fmt.Sprintf("Failed to process event %s for resource %s with agent %s: %v", + event.Type, event.ResourceName, agentRef.Name, err) + } + m.recorder.Event(hook, corev1.EventTypeWarning, "EventProcessingError", eventMsg) return nil } // RecordAgentCallSuccess records a successful agent call func (m *Manager) RecordAgentCallSuccess(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName, requestId string) error { - m.logger.Info("Recording successful agent call", + logValues := []interface{}{ "hook", hook.Name, "namespace", hook.Namespace, "eventType", event.Type, - "resourceName", event.ResourceName, "agentRef", agentRef, - "requestId", requestId) + "requestId", requestId, + } + if event.ResourceName != "" { + logValues = append(logValues, "resourceName", event.ResourceName) + } + m.logger.Info("Recording successful agent call", logValues...) // Emit Kubernetes event for successful processing - m.recorder.Event(hook, corev1.EventTypeNormal, "AgentCallSuccess", - fmt.Sprintf("Successfully called agent %s for event %s on resource %s (request: %s)", - agentRef.Name, event.Type, event.ResourceName, requestId)) + eventMsg := fmt.Sprintf("Successfully called agent %s for event %s (request: %s)", + agentRef.Name, event.Type, requestId) + if event.ResourceName != "" { + eventMsg = fmt.Sprintf("Successfully called agent %s for event %s on resource %s (request: %s)", + agentRef.Name, event.Type, event.ResourceName, requestId) + } + m.recorder.Event(hook, corev1.EventTypeNormal, "AgentCallSuccess", eventMsg) return nil } // RecordAgentCallFailure records a failed agent call func (m *Manager) RecordAgentCallFailure(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event, agentRef types.NamespacedName, err error) error { - m.logger.Error(err, "Recording failed agent call", + logValues := []interface{}{ "hook", hook.Name, "namespace", hook.Namespace, "eventType", event.Type, - "resourceName", event.ResourceName, - "agentRef", agentRef) + "agentRef", agentRef, + } + if event.ResourceName != "" { + logValues = append(logValues, "resourceName", event.ResourceName) + } + m.logger.Error(err, "Recording failed agent call", logValues...) // Emit Kubernetes event for failed processing - m.recorder.Event(hook, corev1.EventTypeWarning, "AgentCallFailure", - fmt.Sprintf("Failed to call agent %s for event %s on resource %s: %v", - agentRef.Name, event.Type, event.ResourceName, err)) + eventMsg := fmt.Sprintf("Failed to call agent %s for event %s: %v", + agentRef.Name, event.Type, err) + if event.ResourceName != "" { + eventMsg = fmt.Sprintf("Failed to call agent %s for event %s on resource %s: %v", + agentRef.Name, event.Type, event.ResourceName, err) + } + m.recorder.Event(hook, corev1.EventTypeWarning, "AgentCallFailure", eventMsg) return nil } // RecordDuplicateEvent records that a duplicate event was ignored func (m *Manager) RecordDuplicateEvent(ctx context.Context, hook *v1alpha2.Hook, event interfaces.Event) error { - m.logger.Info("Recording duplicate event ignored", + logValues := []interface{}{ "hook", hook.Name, "namespace", hook.Namespace, "eventType", event.Type, - "resourceName", event.ResourceName, - "eventTimestamp", event.Timestamp) + "eventTimestamp", event.Timestamp, + } + if event.ResourceName != "" { + logValues = append(logValues, "resourceName", event.ResourceName) + } + m.logger.Info("Recording duplicate event ignored", logValues...) // Emit Kubernetes event for duplicate tracking (using Normal type to avoid noise) - m.recorder.Event(hook, corev1.EventTypeNormal, "DuplicateEventIgnored", - fmt.Sprintf("Duplicate event %s ignored for resource %s (within deduplication window)", - event.Type, event.ResourceName)) + eventMsg := fmt.Sprintf("Duplicate event %s ignored (within deduplication window)", event.Type) + if event.ResourceName != "" { + eventMsg = fmt.Sprintf("Duplicate event %s ignored for resource %s (within deduplication window)", + event.Type, event.ResourceName) + } + m.recorder.Event(hook, corev1.EventTypeNormal, "DuplicateEventIgnored", eventMsg) return nil } diff --git a/internal/status/manager_test.go b/internal/status/manager_test.go index 47c85a6..a2c4357 100644 --- a/internal/status/manager_test.go +++ b/internal/status/manager_test.go @@ -450,3 +450,69 @@ func TestLogControllerShutdown(t *testing.T) { // This should not panic or error manager.LogControllerShutdown(ctx, "graceful shutdown") } + +func TestUpdateHookStatusWithRetry(t *testing.T) { + scheme := runtime.NewScheme() + require.NoError(t, v1alpha2.AddToScheme(scheme)) + + hook := &v1alpha2.Hook{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hook", + Namespace: "default", + }, + Spec: v1alpha2.HookSpec{ + EventConfigurations: []v1alpha2.EventConfiguration{ + { + EventType: "pod-restart", + AgentRef: v1alpha2.ObjectReference{Name: "test-agent"}, + Prompt: "test prompt", + }, + }, + }, + } + + activeEvents := []interfaces.ActiveEvent{ + { + EventType: "pod-restart", + ResourceName: "test-pod", + FirstSeen: time.Now().Add(-5 * time.Minute), + LastSeen: time.Now(), + Status: "firing", + }, + } + + // Create fake client with the hook and status subresource + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(hook). + WithStatusSubresource(&v1alpha2.Hook{}). + Build() + fakeRecorder := record.NewFakeRecorder(100) + manager := NewManager(fakeClient, fakeRecorder) + + ctx := context.Background() + + // First, modify the hook's spec to simulate an external update + // This will change the resource version + updatedHook := hook.DeepCopy() + updatedHook.Spec.EventConfigurations[0].Prompt = "updated prompt" + require.NoError(t, fakeClient.Update(ctx, updatedHook)) + + // Now try to update status with the old hook object + // The retry mechanism should fetch the latest version and succeed + err := manager.UpdateHookStatus(ctx, hook, activeEvents) + assert.NoError(t, err) + + // Verify the status was updated + latestHook := &v1alpha2.Hook{} + key := client.ObjectKey{Name: hook.Name, Namespace: hook.Namespace} + require.NoError(t, fakeClient.Get(ctx, key, latestHook)) + + assert.Len(t, latestHook.Status.ActiveEvents, 1) + assert.Equal(t, "pod-restart", latestHook.Status.ActiveEvents[0].EventType) + assert.Equal(t, "test-pod", latestHook.Status.ActiveEvents[0].ResourceName) + assert.False(t, latestHook.Status.LastUpdated.IsZero()) + + // Verify the spec change was preserved + assert.Equal(t, "updated prompt", latestHook.Spec.EventConfigurations[0].Prompt) +} diff --git a/internal/workflow/coordinator.go b/internal/workflow/coordinator.go index f70c444..fbc67b4 100644 --- a/internal/workflow/coordinator.go +++ b/internal/workflow/coordinator.go @@ -117,11 +117,17 @@ func (c *Coordinator) manageNamespaceWorkflow( if state, exists := c.namespaceStates[namespace]; exists { if state.Signature == signature { - c.logger.V(1).Info("No changes in hooks; keeping workflow running", "namespace", namespace) + c.logger.V(1).Info("No changes in hooks; keeping workflow running", + "namespace", namespace, + "currentSignature", signature, + "previousSignature", state.Signature) return } - c.logger.Info("Restarting namespace workflow due to hook changes", "namespace", namespace) + c.logger.Info("Restarting namespace workflow due to hook changes", + "namespace", namespace, + "oldSignature", state.Signature, + "newSignature", signature) c.workflowManager.StopNamespaceWorkflow(namespace, state) delete(c.namespaceStates, namespace) } @@ -134,7 +140,7 @@ func (c *Coordinator) manageNamespaceWorkflow( } c.namespaceStates[namespace] = state - c.logger.Info("Started namespace workflow", "namespace", namespace, "hookCount", len(hooks)) + c.logger.Info("Started namespace workflow", "namespace", namespace, "hookCount", len(hooks), "signature", signature) } // cleanupOrphanedWorkflows stops workflows for namespaces that no longer have hooks diff --git a/internal/workflow/workflow_manager.go b/internal/workflow/workflow_manager.go index 96521ed..6f5ef1c 100644 --- a/internal/workflow/workflow_manager.go +++ b/internal/workflow/workflow_manager.go @@ -100,11 +100,31 @@ func (wm *WorkflowManager) runNamespaceWorkflow( wm.logger.Info("Namespace workflow started", "namespace", namespace) - watcher := event.NewWatcher(wm.k8sClient, namespace) - processor := pipeline.NewProcessor(watcher, wm.dedupManager, wm.kagentClient, wm.statusManager) + // Build sources: built-in Kubernetes and any referenced external sources + sources := []interfaces.EventSource{event.NewKubernetesSource(wm.k8sClient, namespace)} + // Derive external source names from hooks + extNames := wm.externalSourcesForHooks(hooks) + for name := range extNames { + // Fetch ExternalEventSource in this namespace + var src kagentv1alpha2.ExternalEventSource + if err := wm.ctrlClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &src); err != nil { + wm.logger.Error(err, "Failed to get ExternalEventSource", "name", name, "namespace", namespace) + continue + } + // Use the generic gRPC client for all external event sources + dialer := &event.GRPCClient{} + sources = append(sources, event.NewExternalGRPCEventSource(name, namespace, src.Spec, dialer)) + } + mux := event.NewMultiplexer(sources) + processor := pipeline.NewProcessor(mux, wm.dedupManager, wm.kagentClient, wm.statusManager) if err := processor.ProcessEventWorkflow(ctx, eventTypes, hooks); err != nil { - wm.logger.Error(err, "Namespace workflow exited with error", "namespace", namespace) + // Context cancellation is expected when hooks are updated/workflow is restarted + if err == context.Canceled { + wm.logger.Info("Namespace workflow stopped (context canceled)", "namespace", namespace) + } else { + wm.logger.Error(err, "Namespace workflow exited with error", "namespace", namespace) + } } else { wm.logger.Info("Namespace workflow finished", "namespace", namespace) } @@ -125,13 +145,37 @@ func (wm *WorkflowManager) uniqueEventTypes(hooks []*kagentv1alpha2.Hook) []stri return out } +// externalSourcesForHooks extracts referenced ExternalEventSource names from grouped source entries +func (wm *WorkflowManager) externalSourcesForHooks(hooks []*kagentv1alpha2.Hook) map[string]struct{} { + out := map[string]struct{}{} + for _, h := range hooks { + for _, ec := range h.Spec.EventConfigurations { + if ec.Source != nil && strings.TrimSpace(ec.Source.Name) != "" { + out[ec.Source.Name] = struct{}{} + } + } + } + return out +} + // CalculateSignature creates a signature for hook changes detection func (wm *WorkflowManager) CalculateSignature(hooks []*kagentv1alpha2.Hook) string { parts := make([]string, 0, len(hooks)) for _, h := range hooks { cfgs := make([]string, 0, len(h.Spec.EventConfigurations)) for _, ec := range h.Spec.EventConfigurations { - cfgs = append(cfgs, ec.EventType+"|"+ec.AgentRef.Name+"|"+ec.Prompt) + // Handle single-event form + if ec.EventType != "" { + agentName := ec.AgentRef.Name + cfgs = append(cfgs, ec.EventType+"|"+agentName+"|"+ec.Prompt) + } + // Handle grouped source form + if ec.Source != nil { + for _, se := range ec.Source.Events { + agentName := se.AgentRef.Name + cfgs = append(cfgs, ec.Source.Name+":"+se.EventType+"|"+agentName+"|"+se.Prompt) + } + } } parts = append(parts, h.Namespace+"/"+h.Name+"@"+strings.Join(cfgs, ";")) } diff --git a/internal/workflow/workflow_manager_test.go b/internal/workflow/workflow_manager_test.go new file mode 100644 index 0000000..a05cb3b --- /dev/null +++ b/internal/workflow/workflow_manager_test.go @@ -0,0 +1,187 @@ +package workflow + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + kagentv1alpha2 "github.com/kagent-dev/khook/api/v1alpha2" +) + +func TestCalculateSignature(t *testing.T) { + wm := &WorkflowManager{} + + tests := []struct { + name string + hooks []*kagentv1alpha2.Hook + signature string + }{ + { + name: "single event form", + hooks: []*kagentv1alpha2.Hook{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hook", + Namespace: "default", + }, + Spec: kagentv1alpha2.HookSpec{ + EventConfigurations: []kagentv1alpha2.EventConfiguration{ + { + EventType: "pod-restart", + AgentRef: kagentv1alpha2.ObjectReference{ + Name: "test-agent", + }, + Prompt: "Test prompt", + }, + }, + }, + }, + }, + signature: "default/test-hook@pod-restart|test-agent|Test prompt", + }, + { + name: "grouped source form", + hooks: []*kagentv1alpha2.Hook{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hook", + Namespace: "default", + }, + Spec: kagentv1alpha2.HookSpec{ + EventConfigurations: []kagentv1alpha2.EventConfiguration{ + { + Source: &kagentv1alpha2.EventSourceEvents{ + Name: "redis-events", + Events: []kagentv1alpha2.SourceEvent{ + { + EventType: "create-issue", + AgentRef: kagentv1alpha2.ObjectReference{ + Name: "jira-agent", + }, + Prompt: "Create issue", + }, + }, + }, + }, + }, + }, + }, + }, + signature: "default/test-hook@redis-events:create-issue|jira-agent|Create issue", + }, + { + name: "grouped source form with multiple events", + hooks: []*kagentv1alpha2.Hook{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hook", + Namespace: "default", + }, + Spec: kagentv1alpha2.HookSpec{ + EventConfigurations: []kagentv1alpha2.EventConfiguration{ + { + Source: &kagentv1alpha2.EventSourceEvents{ + Name: "redis-events", + Events: []kagentv1alpha2.SourceEvent{ + { + EventType: "create-issue", + AgentRef: kagentv1alpha2.ObjectReference{ + Name: "jira-agent", + }, + Prompt: "Create issue", + }, + { + EventType: "resolve-issue", + AgentRef: kagentv1alpha2.ObjectReference{ + Name: "jira-agent", + }, + Prompt: "Resolve issue", + }, + }, + }, + }, + }, + }, + }, + }, + signature: "default/test-hook@redis-events:create-issue|jira-agent|Create issue;redis-events:resolve-issue|jira-agent|Resolve issue", + }, + { + name: "prompt change should change signature", + hooks: []*kagentv1alpha2.Hook{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hook", + Namespace: "default", + }, + Spec: kagentv1alpha2.HookSpec{ + EventConfigurations: []kagentv1alpha2.EventConfiguration{ + { + Source: &kagentv1alpha2.EventSourceEvents{ + Name: "redis-events", + Events: []kagentv1alpha2.SourceEvent{ + { + EventType: "create-issue", + AgentRef: kagentv1alpha2.ObjectReference{ + Name: "jira-agent", + }, + Prompt: "Create issue with details", + }, + }, + }, + }, + }, + }, + }, + }, + signature: "default/test-hook@redis-events:create-issue|jira-agent|Create issue with details", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sig := wm.CalculateSignature(tt.hooks) + assert.Equal(t, tt.signature, sig) + }) + } +} + +func TestCalculateSignatureChangesOnPromptUpdate(t *testing.T) { + wm := &WorkflowManager{} + + hook := &kagentv1alpha2.Hook{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hook", + Namespace: "default", + }, + Spec: kagentv1alpha2.HookSpec{ + EventConfigurations: []kagentv1alpha2.EventConfiguration{ + { + Source: &kagentv1alpha2.EventSourceEvents{ + Name: "redis-events", + Events: []kagentv1alpha2.SourceEvent{ + { + EventType: "create-issue", + AgentRef: kagentv1alpha2.ObjectReference{ + Name: "jira-agent", + }, + Prompt: "Create an issue for this event data.", + }, + }, + }, + }, + }, + }, + } + + sig1 := wm.CalculateSignature([]*kagentv1alpha2.Hook{hook}) + + // Update the prompt + hook.Spec.EventConfigurations[0].Source.Events[0].Prompt = "Create an issue for this event data. Include all context and details." + + sig2 := wm.CalculateSignature([]*kagentv1alpha2.Hook{hook}) + + // Signatures should be different + assert.NotEqual(t, sig1, sig2, "Signature should change when prompt is updated") +} diff --git a/specs/design-event-sources.md b/specs/design-event-sources.md new file mode 100644 index 0000000..6876867 --- /dev/null +++ b/specs/design-event-sources.md @@ -0,0 +1,279 @@ +## Multi-Source Event Ingestion (Built-in and External gRPC) - Design + +### Context +Khook currently ingests Kubernetes Events via a single watcher (`internal/event/watcher.go`) and matches them to `Hook` specs within the event processing pipeline (`internal/pipeline/processor.go`). To support additional event sources we will introduce a first-class, extensible event source model: + +- Built-in sources are implemented in-tree as Go types that satisfy a common interface. +- External sources are defined as CRDs and consumed via a well-defined gRPC contract maintained out-of-tree. + +This keeps core source types co-evolving with Khook while enabling third-parties to provide new sources without being compiled into the controller. + +### Goals +- Add an extensible abstraction for multiple event sources. +- Support both built-in (compiled-in) and external (gRPC) event sources. +- Allow `Hook` to reference either a built-in event source or an external one. +- Maintain backward compatibility with existing `Hook` specs using `eventType`. +- Provide observability, resilience, and secure connectivity for external sources. + +### Non-Goals +- Replacing existing pipeline/dedup/agent integration. +- Implementing every possible built-in source in this iteration (we’ll start with the existing Kubernetes Events source and structure for adding more). + +### High-Level Architecture +- Introduce an `EventSource` interface. Each built-in source implements it. An `ExternalGRPCEventSource` implements it by connecting to an external service. +- Add an `EventMultiplexer` that aggregates any number of `EventSource`s into a single `EventWatcher`-compatible facade so the processing pipeline remains unchanged. +- Add a new CRD `ExternalEventSource` that configures the endpoint, TLS/auth, and options for external providers. +- Extend `Hook` spec so each `eventConfigurations[]` item is either: + - a single built-in event using top-level `eventType`, `agentRef`, `prompt`, or + - a grouped `source` with `name`, `namespace`, and `events[]` where each event has `eventType`, `agentRef`, `prompt`. +- `eventType` remains the matching key; external providers should emit supported event types. + +### Interfaces +We introduce `EventSource` and a multiplexer that implements the existing `EventWatcher` contract. This minimizes changes to `internal/pipeline/processor.go`. + +```go +// New, in internal/interfaces/eventsources.go +type EventSource interface { + // Start should be idempotent; it begins producing events into an internal channel. + Start(ctx context.Context) error + // Events returns a read-only channel of events for this source. + Events() <-chan Event + // Stop gracefully stops the source (cancel context or internal stop channel). + Stop() error + // Name is a stable identifier for logging/metrics. + Name() string +} + +// New, in internal/event/multiplexer.go +// Multiplexer aggregates N EventSources and exposes them as a single EventWatcher. +type Multiplexer struct { + sources []interfaces.EventSource +} + +// Satisfy existing EventWatcher to avoid refactoring the pipeline. +func (m *Multiplexer) WatchEvents(ctx context.Context) (<-chan interfaces.Event, error) { /* merge fan-in */ } +func (m *Multiplexer) Start(ctx context.Context) error { /* start all */ } +func (m *Multiplexer) Stop() error { /* stop all */ } +func (m *Multiplexer) FilterEvent(event interfaces.Event, hooks []*v1alpha2.Hook) []interfaces.EventMatch { /* optional passthrough */ return nil } +``` + +Built-in sources will be created in-tree. The current Kubernetes Events watcher becomes one such `EventSource` implementation (thin adaptation of `Watcher`). + +### External gRPC Event Source +We will support external providers over gRPC with a streaming contract that is purpose-built for events (inspired by KEDA’s external scaler pattern). Khook will open and maintain a long-lived stream to the external endpoint and translate incoming messages into `interfaces.Event` instances. + +#### Protocol (proposed) +```proto +// pkg: externalevents.v1 +syntax = "proto3"; + +package externalevents.v1; + +message WatchRequest { + // Namespace scoping (optional). Empty means cluster-wide if allowed by policy. + string namespace = 1; + // Arbitrary configuration payload (JSON-encoded string or map) from the CRD. + string config_json = 2; + // Optional per-hook hints (e.g., event types). Aggregated by Khook for efficiency. + repeated string hints = 3; +} + +message EventMessage { + string type = 1; // e.g., "pod-restart" or arbitrary external type + string resource_name = 2; + string namespace = 3; + string reason = 4; + string message = 5; + string uid = 6; + map metadata = 7; + int64 timestamp_unix = 8; // seconds since epoch +} + +service ExternalEventSource { + rpc StreamEvents(WatchRequest) returns (stream EventMessage); +} +``` + +Notes: +- We keep a single server stream per configured ExternalEventSource (and optionally per namespace) with server push of events. +- If the upstream doesn’t support server-streaming, providers can buffer and ‘push’ polled events through the stream. +- `hints` in `WatchRequest` are derived automatically by Khook from all declared events: top-level single events and every `source.events[*].eventType`. Hooks do not configure hints directly. + +#### Resilience +- Exponential backoff reconnects with jitter. +- Per-source circuit breaking and health metrics. +- Optional heartbeats (e.g., periodic empty `EventMessage` or explicit health stream) to detect dead connections. + +#### Security +- TLS and optional mTLS support (CA bundle, client cert/key via Kubernetes Secret). +- Per-source token or basic auth via Secret reference. +- Namespaced scoping by default; cross-namespace and cluster-wide may require elevated RBAC and explicit allow flags. + +### CRD Changes + +#### 1) New CRD: ExternalEventSource +Group/Version: `kagent.dev/v1alpha2` +Kind: `ExternalEventSource` + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: ExternalEventSource +metadata: + name: my-external-source + namespace: default +spec: + endpoint: + address: "externalevents.default.svc:8080" # host:port + tls: + enabled: true + # optional mTLS + custom CA; single Secret holds CA, cert, and key + secretRef: + name: my-tls + caKey: ca.crt + certKey: tls.crt + keyKey: tls.key + # optional per-connection auth + auth: + type: BearerToken # or Basic + secretRef: { name: external-auth, key: token } + + # Optional JSON configuration string passed to the provider + config: + json: | + { "filter": { "kinds": ["Pod"], "reasons": ["BackOff"] } } + + # Connection policy + connection: + namespaceScope: true # restrict stream to this namespace + reconnectBackoff: + initialSeconds: 2 + maxSeconds: 60 + +status: + conditions: + - type: Ready + status: "True" + reason: Connected + message: "Stream established" + lastConnectedTime: "2025-10-09T10:00:00Z" +``` + +Validation highlights: +- `address` required; scheme optional (defaults to TLS when `tls.enabled: true`). +- A single `tls.secretRef` is used; it must exist in the same namespace as the `ExternalEventSource` and contain keys for CA, certificate, and private key as configured. + +Controller responsibilities (new): +- Reconcile `ExternalEventSource` objects; manage gRPC connections; maintain `Ready` condition and last connection info. +- Provide events to the `EventMultiplexer` via the `ExternalGRPCEventSource` implementation. + +#### 2) Extend Hook spec to reference sources +We retain the current single-event schema for built-in sources and add a grouped `source` form for external (or named) sources to define multiple events. Each item in `eventConfigurations[]` must be either a single built-in event or a grouped `source` with an `events[]` list. External providers MUST emit supported event types. + +```yaml +apiVersion: kagent.dev/v1alpha2 +kind: Hook +spec: + eventConfigurations: + - # built-in Kubernetes source (default) + eventType: pod-restart + agentRef: { name: kagent } + prompt: "Pod {{.ResourceName}} restarted" + + - # external gRPC source via CRD reference: multiple events grouped + source: + name: my-external-source + namespace: default + events: + - eventType: pod-restart + agentRef: { name: kagent } + prompt: "External restart: {{.ResourceName}}" + - eventType: probe-failed + agentRef: { name: kagent } + prompt: "Probe failed on {{.ResourceName}}" +``` + +API changes (additive): +- In `api/v1alpha2/hook_types.go`, make `EventConfiguration` a union-like struct supporting either a single built-in event or a grouped source: + - Single built-in event: `EventType`, `AgentRef`, `Prompt` + - Grouped source: `Source *EventSourceEvents` with `Name`, `Namespace`, `Events []SourceEvent{ EventType, AgentRef, Prompt }` +- Validation: exactly one form per list item. All `eventType` values must be in the supported set. + +Example shape (Go): +```go +type EventConfiguration struct { + // Single built-in event form + EventType string `json:"eventType,omitempty"` + AgentRef ObjectReference `json:"agentRef,omitempty"` + Prompt string `json:"prompt,omitempty"` + + // Grouped source form + Source *EventSourceEvents `json:"source,omitempty"` +} + +type EventSourceEvents struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` + Events []SourceEvent `json:"events"` +} + +type SourceEvent struct { + EventType string `json:"eventType"` + AgentRef ObjectReference `json:"agentRef"` + Prompt string `json:"prompt"` +} +``` + +### Controller and Workflow Changes +- `internal/workflow`: During workflow startup per namespace, build a set of sources from: + - Existing Kubernetes Events source (built-in), optionally filtered by hooks. + - Any referenced `ExternalEventSource` objects (namespace-scoped) required by hooks. +- Instantiate an `EventMultiplexer` with all required `EventSource`s and pass it to the pipeline. +- The pipeline remains unchanged; it receives a single stream and finds matches per event. + +### Built-in Sources (initial set) +- `kubernetes-events` (existing watcher adapted to `EventSource`). +- Future: `kube-state-metrics`, `prometheus-alerts`, etc., may be added similarly. + +### Observability +- Per-source metrics: connection status, reconnects, events per second, errors. +- Structured logs include `source.name` and `source.type`. +- Expose readiness state of `ExternalEventSource` via CR status conditions. + +### Security Considerations +- All external connections use TLS by default when `tls.enabled` is true; support mTLS via a single Secret containing CA, cert, and key. +- Secrets stay in the same namespace as the `ExternalEventSource` to prevent privilege escalation. +- Optional allowlist of external hostnames in the controller config. +- Namespace scoping enforced: controllers only request events for their namespace unless explicitly configured. + +### Backward Compatibility +- Hooks using `eventType` keep working, mapped to the built-in Kubernetes Events source with the same event type mapping as today. +- Validation ensures that new fields are optional; old manifests remain valid. + +### Failure Modes and Recovery +- External stream drops: auto-reconnect with backoff and jitter. +- Provider overload: backpressure via channel buffering; drop policy and metrics to surface issues. +- CRD/Secret updates: reconcile triggers a reconnect with updated settings. + +### Implementation Plan (Phased) +1) API and CRD + - Add grouped `source.events[]` form to `EventConfiguration`; keep single built-in `eventType` form. + - Introduce `ExternalEventSource` CRD, manifests, and validation. +2) Interfaces and Core + - Add `EventSource` interface and `EventMultiplexer` that implements `EventWatcher`. + - Adapt existing Kubernetes watcher into a built-in `EventSource`. +3) External gRPC Client + - Implement `ExternalGRPCEventSource` with TLS/mTLS, auth, reconnection, metrics. + - Controller for `ExternalEventSource` to maintain status conditions. +4) Wiring + - Update workflow manager to construct sources based on hooks and existing CRDs in the namespace. + - Keep `Processor` unchanged (consumes the multiplexed stream). +5) Docs & Examples + - Add examples for `ExternalEventSource` and `Hook` referencing it. + - Troubleshooting guide for connection issues. + +### Open Questions +- Should we support fan-out multiplexing per `Hook` vs per `Namespace`? Initial approach: per-namespace shared streams for efficiency. +- Do we need request-scoped auth per hook? Initial approach: connection-level auth on the external source. +- Should we support batch/event envelope compression in gRPC? Defer until needed. + + diff --git a/specs/tasks.md b/specs/tasks.md index 321b5e1..995a6d0 100644 --- a/specs/tasks.md +++ b/specs/tasks.md @@ -298,8 +298,7 @@ - Add predictive scaling based on event patterns - Write performance tests for analytics workloads - _Requirements: Analytics, Performance monitoring_ -## De -vOps and CI/CD Tasks +## DevOps and CI/CD Tasks - [x] 37. Initialize Git repository and version control - Initialize Git repository with proper .gitignore for Go projects @@ -324,6 +323,68 @@ vOps and CI/CD Tasks - Create release workflow for tagged versions with multi-platform binaries - _Requirements: Automated testing, Continuous deployment_ +--- + +## Feature: Multi-Source Event Ingestion (Built-in + External gRPC) + +### Objectives +- Allow Hooks to define events as either: + - Single built-in Kubernetes events (top-level `eventType`, `agentRef`, `prompt`), or + - Grouped events under a `source` (`name`, `namespace`, `events[]` with `eventType`, `agentRef`, `prompt`). +- Add `ExternalEventSource` CRD for gRPC-based providers with TLS/auth. +- Maintain existing pipeline; introduce `EventSource` interface + `EventMultiplexer`. + +### Scope +- API/CRD updates (Hook schema, new ExternalEventSource CRD, validation). +- Core interfaces and wiring (`EventSource`, multiplexer, K8s watcher adaptation). +- External gRPC source client (streaming, TLS, auth, reconnects, metrics). +- Workflow assembly per-namespace from Hooks and ExternalEventSources. +- Tests, docs, Helm and CRD manifests. + +### Deliverables +1) Updated `api/v1alpha2` types and validating webhooks (Hook union, ExternalEventSource). +2) Generated CRDs and Helm chart updates. +3) `EventSource` interface, `EventMultiplexer`, adapted K8s watcher as built-in source. +4) External gRPC client with proto, TLS/mTLS (single Secret), retries, metrics. +5) `workflow` changes to build sources and derive provider hints from declared events. +6) Unit/integration tests and examples. + +### Implementation Steps +1) API and Validation + - Extend `Hook.EventConfiguration` with union: single built-in event OR `source.events[]`. + - Add `ExternalEventSource` type: endpoint, TLS secretRef (caKey/certKey/keyKey), auth. + - Implement validation: + - Exactly one of single-event or grouped `source.events` per item. + - All `eventType` values must be supported. + - Optional: warn if referenced ExternalEventSource missing (best-effort at webhook). + - Regenerate deepcopy, CRDs; update `config/crd` and `helm/khook-crds`. + +2) Interfaces and Core Wiring + - Create `internal/interfaces/eventsources.go` with `EventSource`. + - Implement `internal/event/multiplexer.go` to fan-in multiple sources and implement `EventWatcher`. + - Adapt `internal/event/watcher.go` to an `EventSource` (built-in K8s events). + +3) External gRPC Event Source + - Define proto `externalevents.v1` and generate Go client. + - Implement `ExternalGRPCEventSource`: TLS/mTLS from single Secret, auth, backoff. + - Add metrics and structured logs; derive `hints` from declared events. + +4) Workflow Integration + - Update `workflow_manager` to: + - Parse Hooks: collect top-level events and grouped `source.events`. + - Resolve required `ExternalEventSource` objects per namespace. + - Construct sources + multiplexer; pass to pipeline unchanged. + +5) Tests and Docs + - Unit tests: schema validation, multiplexer fan-in, K8s source adapter. + - Integration: external source reconnects, status conditions, per-namespace wiring. + - Update `docs/` with CRD reference, examples, troubleshooting. + +### Rollout Plan +- Ship as minor version; backward compatible for existing Hooks. +- Provide migration examples for external sources. +- Add feature gate/env flag to disable external sources if needed. + - [ ] 40. Set up automated dependency management - Configure Dependabot for Go modules, GitHub Actions, and Docker updates - Add automated security updates and vulnerability patching