diff --git a/cmd/apiserver/app/options/options.go b/cmd/apiserver/app/options/options.go index d83b9c823..79a1bf4a4 100644 --- a/cmd/apiserver/app/options/options.go +++ b/cmd/apiserver/app/options/options.go @@ -24,6 +24,10 @@ import ( generatedopenapi "github.com/clusterpedia-io/clusterpedia/pkg/generated/openapi" "github.com/clusterpedia-io/clusterpedia/pkg/storage" storageoptions "github.com/clusterpedia-io/clusterpedia/pkg/storage/options" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher" + watchcomponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware" + watchoptions "github.com/clusterpedia-io/clusterpedia/pkg/watcher/options" ) type ClusterPediaServerOptions struct { @@ -42,6 +46,8 @@ type ClusterPediaServerOptions struct { Traces *genericoptions.TracingOptions Storage *storageoptions.StorageOptions + + Subscriber *watchoptions.MiddlewareOptions } func NewServerOptions() *ClusterPediaServerOptions { @@ -68,7 +74,8 @@ func NewServerOptions() *ClusterPediaServerOptions { Admission: genericoptions.NewAdmissionOptions(), Traces: genericoptions.NewTracingOptions(), - Storage: storageoptions.NewStorageOptions(), + Storage: storageoptions.NewStorageOptions(), + Subscriber: watchoptions.NewMiddlerwareOptions(), } } @@ -118,10 +125,21 @@ func (o *ClusterPediaServerOptions) Config() (*apiserver.Config, error) { return nil, err } - return &apiserver.Config{ + config := &apiserver.Config{ GenericConfig: genericConfig, StorageFactory: storage, - }, nil + } + + middleware.SubscriberEnabled = o.Subscriber.Enabled + if middleware.SubscriberEnabled { + err = watcher.NewSubscriber(o.Subscriber) + if err != nil { + return nil, err + } + watchcomponents.InitEventCacheSize(o.Subscriber.CacheSize) + } + + return config, nil } func (o *ClusterPediaServerOptions) genericOptionsApplyTo(config *genericapiserver.RecommendedConfig) error { @@ -182,6 +200,7 @@ func (o *ClusterPediaServerOptions) Flags() cliflag.NamedFlagSets { o.Traces.AddFlags(fss.FlagSet("traces")) o.Storage.AddFlags(fss.FlagSet("storage")) + o.Subscriber.AddFlags(fss.FlagSet("middleware")) return fss } diff --git a/cmd/clustersynchro-manager/app/options/options.go b/cmd/clustersynchro-manager/app/options/options.go index 7e91334cf..586dd532f 100644 --- a/cmd/clustersynchro-manager/app/options/options.go +++ b/cmd/clustersynchro-manager/app/options/options.go @@ -27,6 +27,9 @@ import ( "github.com/clusterpedia-io/clusterpedia/pkg/storage" storageoptions "github.com/clusterpedia-io/clusterpedia/pkg/storage/options" "github.com/clusterpedia-io/clusterpedia/pkg/synchromanager/clustersynchro" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware" + watchoptions "github.com/clusterpedia-io/clusterpedia/pkg/watcher/options" ) const ( @@ -48,6 +51,7 @@ type Options struct { WorkerNumber int // WorkerNumber is the number of worker goroutines PageSizeForResourceSync int64 ShardingName string + Publisher *watchoptions.MiddlewareOptions } func NewClusterSynchroManagerOptions() (*Options, error) { @@ -80,6 +84,7 @@ func NewClusterSynchroManagerOptions() (*Options, error) { options.KubeStateMetrics = kubestatemetrics.NewOptions() options.WorkerNumber = 5 + options.Publisher = watchoptions.NewMiddlerwareOptions() return &options, nil } @@ -107,6 +112,7 @@ func (o *Options) Flags() cliflag.NamedFlagSets { o.Storage.AddFlags(fss.FlagSet("storage")) o.Metrics.AddFlags(fss.FlagSet("metrics server")) o.KubeStateMetrics.AddFlags(fss.FlagSet("kube state metrics")) + o.Publisher.AddFlags(fss.FlagSet("middleware")) return fss } @@ -132,6 +138,14 @@ func (o *Options) Config() (*config.Config, error) { return nil, err } + middleware.PublisherEnabled = o.Publisher.Enabled + if middleware.PublisherEnabled { + err = watcher.NewPulisher(o.Publisher) + if err != nil { + return nil, err + } + } + kubeconfig, err := clientcmd.BuildConfigFromFlags(o.Master, o.Kubeconfig) if err != nil { return nil, err diff --git a/cmd/clustersynchro-manager/app/synchro.go b/cmd/clustersynchro-manager/app/synchro.go index a66305725..15662cfb3 100644 --- a/cmd/clustersynchro-manager/app/synchro.go +++ b/cmd/clustersynchro-manager/app/synchro.go @@ -26,6 +26,8 @@ import ( "github.com/clusterpedia-io/clusterpedia/pkg/synchromanager" clusterpediafeature "github.com/clusterpedia-io/clusterpedia/pkg/utils/feature" "github.com/clusterpedia-io/clusterpedia/pkg/version/verflag" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware" ) func init() { @@ -98,6 +100,12 @@ func Run(ctx context.Context, c *config.Config) error { } if !c.LeaderElection.LeaderElect { + if middleware.PublisherEnabled { + err := middleware.GlobalPublisher.InitPublisher(ctx) + if err != nil { + return err + } + } synchromanager.Run(c.WorkerNumber, ctx.Done()) return nil } @@ -138,6 +146,12 @@ func Run(ctx context.Context, c *config.Config) error { defer close(done) stopCh := ctx.Done() + if middleware.PublisherEnabled { + err := middleware.GlobalPublisher.InitPublisher(ctx) + if err != nil { + return + } + } synchromanager.Run(c.WorkerNumber, stopCh) }, OnStoppedLeading: func() { @@ -145,6 +159,10 @@ func Run(ctx context.Context, c *config.Config) error { if done != nil { <-done } + if middleware.PublisherEnabled { + middleware.GlobalPublisher.StopPublisher() + components.EC.CloseChannels() + } }, }, }) diff --git a/go.mod b/go.mod index ed5c8f088..02d8dc010 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,7 @@ require ( github.com/prometheus/exporter-toolkit v0.10.0 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 + github.com/streadway/amqp v1.1.0 github.com/stretchr/testify v1.8.3 go.uber.org/atomic v1.10.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 diff --git a/go.sum b/go.sum index 111e77052..cf47975d9 100644 --- a/go.sum +++ b/go.sum @@ -490,6 +490,8 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/streadway/amqp v1.1.0 h1:py12iX8XSyI7aN/3dUT8DFIDJazNJsVJdxNVEpnQTZM= +github.com/streadway/amqp v1.1.0/go.mod h1:WYSrTEYHOXHd0nwFeUXAe2G2hRnQT+deZJJf88uS9Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= diff --git a/pkg/apiserver/apiserver.go b/pkg/apiserver/apiserver.go index 88fff05a8..adf72f888 100644 --- a/pkg/apiserver/apiserver.go +++ b/pkg/apiserver/apiserver.go @@ -27,6 +27,8 @@ import ( "github.com/clusterpedia-io/clusterpedia/pkg/kubeapiserver" "github.com/clusterpedia-io/clusterpedia/pkg/storage" "github.com/clusterpedia-io/clusterpedia/pkg/utils/filters" + watchcomponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware" ) var ( @@ -106,6 +108,16 @@ func (config completedConfig) New() (*ClusterPediaServer, error) { return nil, fmt.Errorf("CompletedConfig.New() called with config.StorageFactory == nil") } + // init event cache pool + eventStop := make(chan struct{}) + if middleware.SubscriberEnabled { + watchcomponents.InitEventCachePool(eventStop) + err := middleware.GlobalSubscriber.InitSubscriber(eventStop) + if err != nil { + return nil, err + } + } + discoveryClient, err := discovery.NewDiscoveryClientForConfig(config.ClientConfig) if err != nil { return nil, err @@ -159,6 +171,11 @@ func (config completedConfig) New() (*ClusterPediaServer, error) { } genericServer.AddPostStartHookOrDie("start-clusterpedia-informers", func(context genericapiserver.PostStartHookContext) error { + // inform to close event watch + go func() { + <-context.StopCh + close(eventStop) + }() clusterpediaInformerFactory.Start(context.StopCh) clusterpediaInformerFactory.WaitForCacheSync(context.StopCh) diff --git a/pkg/apiserver/registry/clusterpedia/collectionresources/rest.go b/pkg/apiserver/registry/clusterpedia/collectionresources/rest.go index 0e52bee3a..417803127 100644 --- a/pkg/apiserver/registry/clusterpedia/collectionresources/rest.go +++ b/pkg/apiserver/registry/clusterpedia/collectionresources/rest.go @@ -53,7 +53,7 @@ func NewREST(serializer runtime.NegotiatedSerializer, factory storage.StorageFac for irt := range cr.ResourceTypes { rt := &cr.ResourceTypes[irt] if rt.Resource != "" { - config, err := configFactory.NewConfig(rt.GroupResource().WithVersion(""), false) + config, err := configFactory.NewConfig(rt.GroupResource().WithVersion(""), false, rt.Kind) if err != nil { continue } diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 5361791e7..9afcfc08b 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -928,6 +928,12 @@ func schema_clusterpedia_io_api_clusterpedia_v1beta1_ListOptions(ref common.Refe Format: "", }, }, + "resourcePrefix": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, "orderby": { SchemaProps: spec.SchemaProps{ Type: []string{"string"}, diff --git a/pkg/kube_state_metrics/generators.go b/pkg/kube_state_metrics/generators.go index e59e5339c..d5d9b2274 100644 --- a/pkg/kube_state_metrics/generators.go +++ b/pkg/kube_state_metrics/generators.go @@ -28,6 +28,25 @@ var generators = map[schema.GroupVersionResource]func(allowAnnotationsList, allo {Group: "networking.k8s.io", Version: "v1", Resource: "ingressclasses"}: ingressClassMetricFamilies, } +var gvrKinds = map[schema.GroupVersionResource]string{ + {Version: "v1", Resource: "pods"}: "Pod", + {Version: "v1", Resource: "secrets"}: "Secret", + {Version: "v1", Resource: "nodes"}: "Node", + {Version: "v1", Resource: "namespaces"}: "Namespace", + {Version: "v1", Resource: "services"}: "Service", + + {Group: "apps", Version: "v1", Resource: "deployments"}: "Deployment", + {Group: "apps", Version: "v1", Resource: "daemonsets"}: "DaemonSet", + {Group: "apps", Version: "v1", Resource: "statefulsets"}: "StatefulSet", + {Group: "apps", Version: "v1", Resource: "replicasets"}: "ReplicaSet", + + {Group: "batch", Version: "v1", Resource: "jobs"}: "Job", + {Group: "batch", Version: "v1", Resource: "cronjobs"}: "CronJob", + + {Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"}: "Ingress", + {Group: "networking.k8s.io", Version: "v1", Resource: "ingressclasses"}: "IngressClass", +} + var rToGVR = make(map[string]schema.GroupVersionResource) func init() { diff --git a/pkg/kube_state_metrics/metrics_store.go b/pkg/kube_state_metrics/metrics_store.go index 0feb0c532..ebd53ea81 100644 --- a/pkg/kube_state_metrics/metrics_store.go +++ b/pkg/kube_state_metrics/metrics_store.go @@ -27,7 +27,7 @@ var ( func init() { for gvr := range generators { - config, err := storageConfigFactory.NewLegacyResourceConfig(gvr.GroupResource(), false) + config, err := storageConfigFactory.NewLegacyResourceConfig(gvr.GroupResource(), false, gvrKinds[gvr]) if err != nil { panic(err) } @@ -89,7 +89,7 @@ type MetricsStoreBuilder struct { match func(obj interface{}) (bool, error) } -func (builder *MetricsStoreBuilder) GetMetricStore(cluster string, resource schema.GroupVersionResource) *MetricsStore { +func (builder *MetricsStoreBuilder) GetMetricStore(cluster string, resource schema.GroupVersionResource, kind string) *MetricsStore { if _, ok := builder.resources[resource.Resource]; !ok { return nil } @@ -101,7 +101,7 @@ func (builder *MetricsStoreBuilder) GetMetricStore(cluster string, resource sche return nil } - config, err := storageConfigFactory.NewLegacyResourceConfig(resource.GroupResource(), false) + config, err := storageConfigFactory.NewLegacyResourceConfig(resource.GroupResource(), false, kind) if err != nil { return nil } diff --git a/pkg/kubeapiserver/apiserver.go b/pkg/kubeapiserver/apiserver.go index bdb6550fd..5f3d93cf5 100644 --- a/pkg/kubeapiserver/apiserver.go +++ b/pkg/kubeapiserver/apiserver.go @@ -118,7 +118,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget) delegate = http.NotFoundHandler() } - restManager := NewRESTManager(c.GenericConfig.Serializer, runtime.ContentTypeJSON, c.ExtraConfig.StorageFactory, c.ExtraConfig.InitialAPIGroupResources) + restManager := NewRESTManager(c.GenericConfig.Serializer, runtime.ContentTypeJSON, c.ExtraConfig.StorageFactory, c.ExtraConfig.InitialAPIGroupResources, true) discoveryManager := discovery.NewDiscoveryManager(c.GenericConfig.Serializer, restManager, delegate) // handle root discovery request diff --git a/pkg/kubeapiserver/clusterresource_controller.go b/pkg/kubeapiserver/clusterresource_controller.go index ae8f171a9..933cc1242 100644 --- a/pkg/kubeapiserver/clusterresource_controller.go +++ b/pkg/kubeapiserver/clusterresource_controller.go @@ -12,6 +12,7 @@ import ( clusterinformer "github.com/clusterpedia-io/clusterpedia/pkg/generated/informers/externalversions/cluster/v1alpha2" clusterlister "github.com/clusterpedia-io/clusterpedia/pkg/generated/listers/cluster/v1alpha2" "github.com/clusterpedia-io/clusterpedia/pkg/kubeapiserver/discovery" + watchcomponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" ) type ClusterResourceController struct { @@ -36,14 +37,16 @@ func NewClusterResourceController(restManager *RESTManager, discoveryManager *di AddFunc: func(obj interface{}) { controller.updateClusterResources(obj.(*clusterv1alpha2.PediaCluster)) }, - UpdateFunc: func(_, obj interface{}) { + UpdateFunc: func(oldObj, obj interface{}) { cluster := obj.(*clusterv1alpha2.PediaCluster) if !cluster.DeletionTimestamp.IsZero() { + controller.clearCache(cluster) controller.removeCluster(cluster.Name) return } - controller.updateClusterResources(obj.(*clusterv1alpha2.PediaCluster)) + controller.updateCache(oldObj.(*clusterv1alpha2.PediaCluster), cluster) + controller.updateClusterResources(cluster) }, DeleteFunc: func(obj interface{}) { clusterName, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) @@ -60,7 +63,7 @@ func NewClusterResourceController(restManager *RESTManager, discoveryManager *di return controller } -func (c *ClusterResourceController) updateClusterResources(cluster *clusterv1alpha2.PediaCluster) { +func (c *ClusterResourceController) convertCluster2Map(cluster *clusterv1alpha2.PediaCluster) ResourceInfoMap { resources := ResourceInfoMap{} for _, groupResources := range cluster.Status.SyncResources { for _, resource := range groupResources.Resources { @@ -68,7 +71,7 @@ func (c *ClusterResourceController) updateClusterResources(cluster *clusterv1alp continue } - versions := sets.Set[string]{} + versions := sets.New[string]() for _, cond := range resource.SyncConditions { versions.Insert(cond.Version) } @@ -82,6 +85,49 @@ func (c *ClusterResourceController) updateClusterResources(cluster *clusterv1alp } } + return resources +} + +func (c *ClusterResourceController) updateCache(oldCluster *clusterv1alpha2.PediaCluster, cluster *clusterv1alpha2.PediaCluster) { + if ecp := watchcomponents.GetInitEventCachePool(); ecp == nil { + return + } + oldResources := c.convertCluster2Map(oldCluster) + resources := c.convertCluster2Map(cluster) + for gr, ri := range oldResources { + for version := range ri.Versions { + if !resources[gr].Versions.Has(version) { + // gr has deleted, clear the cache of this gv + watchcomponents.GetInitEventCachePool().ClearCacheByGVR(schema.GroupVersionResource{ + Group: gr.Group, Version: version, Resource: gr.Resource, + }) + } + } + } +} + +func (c *ClusterResourceController) clearCache(cluster *clusterv1alpha2.PediaCluster) { + if ecp := watchcomponents.GetInitEventCachePool(); ecp == nil { + return + } + currentResources := c.clusterresources[cluster.Name] + resources := c.convertCluster2Map(cluster) + + for gr, ri := range currentResources { + for version := range ri.Versions { + // clear the cache of this gv which in cluster + if resources[gr].Versions.Has(version) { + watchcomponents.GetInitEventCachePool().ClearCacheByGVR(schema.GroupVersionResource{ + Group: gr.Group, Version: version, Resource: gr.Resource, + }) + } + } + } +} + +func (c *ClusterResourceController) updateClusterResources(cluster *clusterv1alpha2.PediaCluster) { + resources := c.convertCluster2Map(cluster) + currentResources := c.clusterresources[cluster.Name] if reflect.DeepEqual(resources, currentResources) { return diff --git a/pkg/kubeapiserver/resourcerest/storage.go b/pkg/kubeapiserver/resourcerest/storage.go index 574fd6050..e3765227e 100644 --- a/pkg/kubeapiserver/resourcerest/storage.go +++ b/pkg/kubeapiserver/resourcerest/storage.go @@ -35,6 +35,8 @@ type RESTStorage struct { Storage storage.ResourceStorage TableConvertor rest.TableConvertor + + Kind string } var _ rest.Lister = &RESTStorage{} @@ -83,6 +85,8 @@ func (s *RESTStorage) resolveListOptions(ctx context.Context) (*internal.ListOpt options.Namespaces = []string{requestInfo.Namespace} } + options.ResourcePrefix = requestInfo.Resource + if cluster := request.ClusterNameValue(ctx); cluster != "" { options.ClusterNames = []string{cluster} } @@ -126,7 +130,19 @@ func (s *RESTStorage) Watch(ctx context.Context, _ *metainternalversion.ListOpti return nil, err } - inter, err := s.Storage.Watch(ctx, options) + requestInfo, ok := genericrequest.RequestInfoFrom(ctx) + if !ok { + return nil, errors.New("missing requestInfo") + } + + gvr := schema.GroupVersionResource{ + Group: requestInfo.APIGroup, + Version: requestInfo.APIVersion, + Resource: requestInfo.Resource, + } + gvk := gvr.GroupVersion().WithKind(s.Kind) + + inter, err := s.Storage.Watch(ctx, s.New, options, gvk) if apierrors.IsMethodNotSupported(err) { return nil, apierrors.NewMethodNotSupported(s.DefaultQualifiedResource, "watch") } diff --git a/pkg/kubeapiserver/resourcescheme/import_known_versions.go b/pkg/kubeapiserver/resourcescheme/import_known_versions.go new file mode 100644 index 000000000..934b00147 --- /dev/null +++ b/pkg/kubeapiserver/resourcescheme/import_known_versions.go @@ -0,0 +1,32 @@ +package resourcescheme + +import ( + // These imports are the API groups the API server will support. + apiextensionsinstall "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/install" + _ "k8s.io/kubernetes/pkg/apis/admission/install" + _ "k8s.io/kubernetes/pkg/apis/admissionregistration/install" + _ "k8s.io/kubernetes/pkg/apis/apiserverinternal/install" + _ "k8s.io/kubernetes/pkg/apis/apps/install" + _ "k8s.io/kubernetes/pkg/apis/authentication/install" + _ "k8s.io/kubernetes/pkg/apis/authorization/install" + _ "k8s.io/kubernetes/pkg/apis/autoscaling/install" + _ "k8s.io/kubernetes/pkg/apis/batch/install" + _ "k8s.io/kubernetes/pkg/apis/certificates/install" + _ "k8s.io/kubernetes/pkg/apis/coordination/install" + _ "k8s.io/kubernetes/pkg/apis/core/install" + _ "k8s.io/kubernetes/pkg/apis/discovery/install" + _ "k8s.io/kubernetes/pkg/apis/events/install" + _ "k8s.io/kubernetes/pkg/apis/extensions/install" + _ "k8s.io/kubernetes/pkg/apis/flowcontrol/install" + _ "k8s.io/kubernetes/pkg/apis/imagepolicy/install" + _ "k8s.io/kubernetes/pkg/apis/networking/install" + _ "k8s.io/kubernetes/pkg/apis/node/install" + _ "k8s.io/kubernetes/pkg/apis/policy/install" + _ "k8s.io/kubernetes/pkg/apis/rbac/install" + _ "k8s.io/kubernetes/pkg/apis/scheduling/install" + _ "k8s.io/kubernetes/pkg/apis/storage/install" +) + +func init() { + apiextensionsinstall.Install(LegacyResourceScheme) +} diff --git a/pkg/kubeapiserver/resourcescheme/scheme.go b/pkg/kubeapiserver/resourcescheme/scheme.go new file mode 100644 index 000000000..92064f9e8 --- /dev/null +++ b/pkg/kubeapiserver/resourcescheme/scheme.go @@ -0,0 +1,17 @@ +package resourcescheme + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/kubernetes/pkg/api/legacyscheme" + + unstructuredresourcescheme "github.com/clusterpedia-io/clusterpedia/pkg/kubeapiserver/resourcescheme/unstructured" +) + +var ( + LegacyResourceScheme = legacyscheme.Scheme + LegacyResourceCodecs = legacyscheme.Codecs + LegacyResourceParameterCodec = legacyscheme.ParameterCodec + + UnstructuredScheme = unstructuredresourcescheme.NewScheme() + UnstructuredCodecs = unstructured.UnstructuredJSONScheme +) diff --git a/pkg/kubeapiserver/resourcescheme/unstructured/scheme.go b/pkg/kubeapiserver/resourcescheme/unstructured/scheme.go new file mode 100644 index 000000000..3093ada7f --- /dev/null +++ b/pkg/kubeapiserver/resourcescheme/unstructured/scheme.go @@ -0,0 +1,145 @@ +package unstructured + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type Scheme struct{} + +func NewScheme() *Scheme { + return &Scheme{} +} + +func (s *Scheme) New(kind schema.GroupVersionKind) (runtime.Object, error) { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(kind) + return obj, nil +} + +func (s *Scheme) Default(_ runtime.Object) {} + +// ObjectKinds returns a slice of one element with the group,version,kind of the +// provided object, or an error if the object is not runtime.Unstructured or +// has no group,version,kind information. unversionedType will always be false +// because runtime.Unstructured object should always have group,version,kind +// information set. +// +// reference from +// https://github.com/kubernetes/apiextensions-apiserver/blob/b0680ddb99b88a5978a43fe4f2508dce81be1ec9/pkg/crdserverscheme/unstructured.go#L46 +func (s *Scheme) ObjectKinds(obj runtime.Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { + if _, ok := obj.(runtime.Unstructured); !ok { + return nil, false, runtime.NewNotRegisteredErrForType("unstructured.Scheme", reflect.TypeOf(obj)) + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Kind == "" { + return nil, false, runtime.NewMissingKindErr("object has no kind field ") + } + + if gvk.Version == "" { + return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field") + } + return []schema.GroupVersionKind{gvk}, false, nil +} + +// Recognizes does not delegate the Recognizes check, needs to be wrapped by +// the caller to check the specific gvk +func (s *Scheme) Recognizes(gvk schema.GroupVersionKind) bool { + return false +} + +func (s *Scheme) ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) { + return runtime.DefaultMetaV1FieldSelectorConversion(label, value) +} + +// Convert reference from +// https://github.com/kubernetes/apiextensions-apiserver/blob/b0680ddb99b88a5978a43fe4f2508dce81be1ec9/pkg/apiserver/conversion/converter.go#L104 +func (s *Scheme) Convert(in, out, context interface{}) error { + obj, ok := in.(runtime.Object) + if !ok { + return fmt.Errorf("input type %T in not valid for object conversion", in) + } + return s.UnsafeConvert(obj.DeepCopyObject(), out, context) +} + +func (s *Scheme) UnsafeConvert(in, out, context interface{}) error { + unstructIn, ok := in.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("input type %T in not Valid for unstructed conversion to %T", in, out) + } + + unstructOut, ok := out.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("out type %T in not Valid for unstructured conversion from %T", out, in) + } + + fromGVK := unstructIn.GroupVersionKind() + toGVK := unstructOut.GroupVersionKind() + if fromGVK.GroupKind() != toGVK.GroupKind() { + return fmt.Errorf("not supported to convert from %s to %s", fromGVK.GroupKind(), toGVK.GroupKind()) + } + + unstructOut.SetUnstructuredContent(unstructIn.UnstructuredContent()) + unstructOut.SetGroupVersionKind(toGVK) + return nil +} + +// ConvertToVersion converts in object to the given gvk in place and returns the same `in` object. +// The in object can be a single object or a UnstructuredList. CRD storage implementation creates an +// UnstructuredList with the request's GV, populates it from storage, then calls conversion to convert +// the individual items. This function assumes it never gets a v1.List. +func (s *Scheme) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + return s.UnsafeConvertToVersion(in.DeepCopyObject(), target) +} + +func (s *Scheme) UnsafeConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + // todo 先写死 参考k8s 将WatchEvent 传入到scheme里 + var fromGVK schema.GroupVersionKind = in.GetObjectKind().GroupVersionKind() + + toGVK, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{fromGVK}) + if !ok { + return nil, fmt.Errorf("%s is unstructured and is not suitable for converting to %q", fromGVK, target) + } + + if fromGVK.GroupKind() != toGVK.GroupKind() { + return nil, fmt.Errorf("not supported to convert from %s to %s", fromGVK.GroupKind(), toGVK.GroupKind()) + } + + if list, ok := in.(*unstructured.UnstructuredList); ok { + for i := range list.Items { + itemKind := list.Items[i].GroupVersionKind().Kind + list.Items[i].SetGroupVersionKind(toGVK.GroupVersion().WithKind(itemKind)) + } + } + in.GetObjectKind().SetGroupVersionKind(toGVK) + return in, nil +} + +var _ runtime.ObjectCreater = &Scheme{} +var _ runtime.ObjectConvertor = &Scheme{} +var _ runtime.ObjectDefaulter = &Scheme{} +var _ runtime.ObjectTyper = &Scheme{} + +// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. +type unsafeObjectConvertor struct { + *Scheme +} + +var _ runtime.ObjectConvertor = unsafeObjectConvertor{} + +func (c unsafeObjectConvertor) Convert(in, out, context interface{}) error { + return c.Scheme.UnsafeConvert(in, out, context) +} + +func (c unsafeObjectConvertor) ConvertToVersion(in runtime.Object, outVersion runtime.GroupVersioner) (runtime.Object, error) { + return c.Scheme.UnsafeConvertToVersion(in, outVersion) +} + +func UnsafeObjectConvertor(scheme *Scheme) runtime.ObjectConvertor { + return unsafeObjectConvertor{scheme} +} diff --git a/pkg/kubeapiserver/restmanager.go b/pkg/kubeapiserver/restmanager.go index 160e53786..2f3c50ef2 100644 --- a/pkg/kubeapiserver/restmanager.go +++ b/pkg/kubeapiserver/restmanager.go @@ -47,9 +47,11 @@ type RESTManager struct { restResourceInfos atomic.Value // map[schema.GroupVersionResource]RESTResourceInfo requestVerbs metav1.Verbs + + initCache bool } -func NewRESTManager(serializer runtime.NegotiatedSerializer, storageMediaType string, storageFactory storage.StorageFactory, initialAPIGroupResources []*restmapper.APIGroupResources) *RESTManager { +func NewRESTManager(serializer runtime.NegotiatedSerializer, storageMediaType string, storageFactory storage.StorageFactory, initialAPIGroupResources []*restmapper.APIGroupResources, initCache bool) *RESTManager { requestVerbs := storageFactory.GetSupportedRequestVerbs() apiresources := make(map[schema.GroupResource]metav1.APIResource) @@ -90,6 +92,7 @@ func NewRESTManager(serializer runtime.NegotiatedSerializer, storageMediaType st resourcetSorageConfig: storageconfig.NewStorageConfigFactory(), equivalentResourceRegistry: runtime.NewEquivalentResourceRegistry(), requestVerbs: requestVerbs, + initCache: initCache, } manager.resources.Store(apiresources) @@ -270,12 +273,12 @@ func (m *RESTManager) addRESTResourceInfosLocked(addedInfos map[schema.GroupVers } func (m *RESTManager) genLegacyResourceRESTStorage(gvr schema.GroupVersionResource, kind string, namespaced bool) (*resourcerest.RESTStorage, error) { - storageConfig, err := m.resourcetSorageConfig.NewLegacyResourceConfig(gvr.GroupResource(), namespaced) + storageConfig, err := m.resourcetSorageConfig.NewLegacyResourceConfig(gvr.GroupResource(), namespaced, kind) if err != nil { return nil, err } - resourceStorage, err := m.storageFactory.NewResourceStorage(storageConfig) + resourceStorage, err := m.storageFactory.NewResourceStorage(storageConfig, m.initCache) if err != nil { return nil, err } @@ -283,36 +286,30 @@ func (m *RESTManager) genLegacyResourceRESTStorage(gvr schema.GroupVersionResour return &resourcerest.RESTStorage{ DefaultQualifiedResource: gvr.GroupResource(), - NewFunc: func() runtime.Object { - obj, _ := scheme.LegacyResourceScheme.New(storageConfig.MemoryVersion.WithKind(kind)) - return obj - }, + NewFunc: storageConfig.NewFunc, NewListFunc: func() runtime.Object { obj, _ := scheme.LegacyResourceScheme.New(storageConfig.MemoryVersion.WithKind(kind + "List")) return obj }, Storage: resourceStorage, + Kind: kind, }, nil } func (m *RESTManager) genUnstructuredRESTStorage(gvr schema.GroupVersionResource, kind string, namespaced bool) (*resourcerest.RESTStorage, error) { - storageConfig, err := m.resourcetSorageConfig.NewUnstructuredConfig(gvr, namespaced) + storageConfig, err := m.resourcetSorageConfig.NewUnstructuredConfig(gvr, namespaced, kind) if err != nil { return nil, err } - resourceStorage, err := m.storageFactory.NewResourceStorage(storageConfig) + resourceStorage, err := m.storageFactory.NewResourceStorage(storageConfig, m.initCache) if err != nil { return nil, err } return &resourcerest.RESTStorage{ - NewFunc: func() runtime.Object { - obj := &unstructured.Unstructured{} - obj.SetGroupVersionKind(storageConfig.MemoryVersion.WithKind(kind)) - return obj - }, + NewFunc: storageConfig.NewFunc, NewListFunc: func() runtime.Object { obj := &unstructured.UnstructuredList{} obj.SetGroupVersionKind(storageConfig.MemoryVersion.WithKind(kind + "List")) @@ -320,6 +317,7 @@ func (m *RESTManager) genUnstructuredRESTStorage(gvr schema.GroupVersionResource }, Storage: resourceStorage, + Kind: kind, }, nil } diff --git a/pkg/storage/internalstorage/resource_storage.go b/pkg/storage/internalstorage/resource_storage.go index c306ac9b6..4d7ca22d7 100644 --- a/pkg/storage/internalstorage/resource_storage.go +++ b/pkg/storage/internalstorage/resource_storage.go @@ -7,10 +7,11 @@ import ( "fmt" "reflect" "strconv" + "sync" + "time" "gorm.io/datatypes" "gorm.io/gorm" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -21,9 +22,13 @@ import ( "k8s.io/apimachinery/pkg/watch" genericstorage "k8s.io/apiserver/pkg/storage" "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" internal "github.com/clusterpedia-io/api/clusterpedia" "github.com/clusterpedia-io/clusterpedia/pkg/storage" + "github.com/clusterpedia-io/clusterpedia/pkg/utils" + watchutil "github.com/clusterpedia-io/clusterpedia/pkg/utils/watch" + watchcomponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" ) type ResourceStorage struct { @@ -33,6 +38,18 @@ type ResourceStorage struct { storageGroupResource schema.GroupResource storageVersion schema.GroupVersion memoryVersion schema.GroupVersion + + buffer *watchcomponents.MultiClusterBuffer + watchLock sync.Mutex + + eventCache *watchcomponents.EventCache + Namespaced bool + + eventChan chan *watchcomponents.EventWithCluster + + newFunc func() runtime.Object + + KeyFunc func(runtime.Object) (string, error) } func (s *ResourceStorage) GetStorageConfig() *storage.ResourceStorageConfig { @@ -44,7 +61,7 @@ func (s *ResourceStorage) GetStorageConfig() *storage.ResourceStorageConfig { } } -func (s *ResourceStorage) Create(ctx context.Context, cluster string, obj runtime.Object) error { +func (s *ResourceStorage) Create(ctx context.Context, cluster string, obj runtime.Object, crvUpdated bool) error { gvk := obj.GetObjectKind().GroupVersionKind() if gvk.Kind == "" { return fmt.Errorf("%s: kind is required", gvk) @@ -55,6 +72,24 @@ func (s *ResourceStorage) Create(ctx context.Context, cluster string, obj runtim return err } + //deleted object could be created again + condition := map[string]interface{}{ + "namespace": metaobj.GetNamespace(), + "name": metaobj.GetName(), + "group": s.storageGroupResource.Group, + "version": s.storageVersion.Version, + "resource": s.storageGroupResource.Resource, + "deleted": true, + } + if cluster != "" { + condition["cluster"] = cluster + } + dbResult := s.db.Model(&Resource{}).Where(condition).Delete(&Resource{}) + if dbResult.Error != nil { + err = InterpretResourceDBError(cluster, metaobj.GetName(), dbResult.Error) + return fmt.Errorf("[Create]: Object %s/%s has been created failed in step one, err: %v", metaobj.GetName(), metaobj.GetNamespace(), err) + } + var ownerUID types.UID if owner := metav1.GetControllerOfNoCopy(metaobj); owner != nil { ownerUID = owner.UID @@ -66,18 +101,19 @@ func (s *ResourceStorage) Create(ctx context.Context, cluster string, obj runtim } resource := Resource{ - Cluster: cluster, - OwnerUID: ownerUID, - UID: metaobj.GetUID(), - Name: metaobj.GetName(), - Namespace: metaobj.GetNamespace(), - Group: s.storageGroupResource.Group, - Resource: s.storageGroupResource.Resource, - Version: s.storageVersion.Version, - Kind: gvk.Kind, - ResourceVersion: metaobj.GetResourceVersion(), - Object: buffer.Bytes(), - CreatedAt: metaobj.GetCreationTimestamp().Time, + Cluster: cluster, + OwnerUID: ownerUID, + UID: metaobj.GetUID(), + Name: metaobj.GetName(), + Namespace: metaobj.GetNamespace(), + Group: s.storageGroupResource.Group, + Resource: s.storageGroupResource.Resource, + Version: s.storageVersion.Version, + Kind: gvk.Kind, + ResourceVersion: metaobj.GetResourceVersion(), + ClusterResourceVersion: 0, + Object: buffer.Bytes(), + CreatedAt: metaobj.GetCreationTimestamp().Time, } if deletedAt := metaobj.GetDeletionTimestamp(); deletedAt != nil { resource.DeletedAt = sql.NullTime{Time: deletedAt.Time, Valid: true} @@ -87,7 +123,7 @@ func (s *ResourceStorage) Create(ctx context.Context, cluster string, obj runtim return InterpretResourceDBError(cluster, metaobj.GetName(), result.Error) } -func (s *ResourceStorage) Update(ctx context.Context, cluster string, obj runtime.Object) error { +func (s *ResourceStorage) Update(ctx context.Context, cluster string, obj runtime.Object, crvUpdated bool) error { metaobj, err := meta.Accessor(obj) if err != nil { return err @@ -105,13 +141,27 @@ func (s *ResourceStorage) Update(ctx context.Context, cluster string, obj runtim // The uid may not be the same for resources with the same namespace/name // in the same cluster at different times. - updatedResource := map[string]interface{}{ - "owner_uid": ownerUID, - "uid": metaobj.GetUID(), - "resource_version": metaobj.GetResourceVersion(), - "object": datatypes.JSON(buffer.Bytes()), - "created_at": metaobj.GetCreationTimestamp().Time, + var updatedResource map[string]interface{} + if crvUpdated { + updatedResource = map[string]interface{}{ + "owner_uid": ownerUID, + "uid": metaobj.GetUID(), + "resource_version": metaobj.GetResourceVersion(), + "object": datatypes.JSON(buffer.Bytes()), + "created_at": metaobj.GetCreationTimestamp().Time, + "published": false, + "deleted": false, + } + } else { + updatedResource = map[string]interface{}{ + "owner_uid": ownerUID, + "uid": metaobj.GetUID(), + "resource_version": metaobj.GetResourceVersion(), + "object": datatypes.JSON(buffer.Bytes()), + "created_at": metaobj.GetCreationTimestamp().Time, + } } + if deletedAt := metaobj.GetDeletionTimestamp(); deletedAt != nil { updatedResource["deleted_at"] = sql.NullTime{Time: deletedAt.Time, Valid: true} } @@ -154,63 +204,186 @@ func (s *ResourceStorage) deleteObject(cluster, namespace, name string) *gorm.DB }).Delete(&Resource{}) } -func (s *ResourceStorage) Delete(ctx context.Context, cluster string, obj runtime.Object) error { +func (s *ResourceStorage) Delete(ctx context.Context, cluster string, obj runtime.Object, crvUpdated bool) error { metaobj, err := meta.Accessor(obj) if err != nil { return err } - if result := s.deleteObject(cluster, metaobj.GetNamespace(), metaobj.GetName()); result.Error != nil { - return InterpretResourceDBError(cluster, metaobj.GetName(), result.Error) + // The uid may not be the same for resources with the same namespace/name + // in the same cluster at different times. + var updatedResource map[string]interface{} + if crvUpdated { + updatedResource = map[string]interface{}{ + "resource_version": metaobj.GetResourceVersion(), + "deleted": true, + "published": false, + } + if deletedAt := metaobj.GetDeletionTimestamp(); deletedAt != nil { + updatedResource["deleted_at"] = sql.NullTime{Time: deletedAt.Time, Valid: true} + } + } else { + updatedResource = map[string]interface{}{ + "deleted": true, + } } - return nil + + condition := map[string]interface{}{ + "cluster": cluster, + "namespace": metaobj.GetNamespace(), + "group": s.storageGroupResource.Group, + "version": s.storageVersion.Version, + "resource": s.storageGroupResource.Resource, + } + if metaobj.GetName() != "" { + condition["name"] = metaobj.GetName() + } + + result := s.db.WithContext(ctx).Model(&Resource{}).Where(condition).Updates(updatedResource) + return InterpretResourceDBError(cluster, metaobj.GetName(), result.Error) } func (s *ResourceStorage) genGetObjectQuery(ctx context.Context, cluster, namespace, name string) *gorm.DB { - return s.db.WithContext(ctx).Model(&Resource{}).Select("object").Where(map[string]interface{}{ + condition := map[string]interface{}{ + "namespace": namespace, + "name": name, + "group": s.storageGroupResource.Group, + "version": s.storageVersion.Version, + "resource": s.storageGroupResource.Resource, + "deleted": false, + } + + if cluster != "" { + condition["cluster"] = cluster + } + return s.db.WithContext(ctx).Model(&Resource{}).Select("cluster_resource_version, object").Where(condition) +} + +func (s *ResourceStorage) GetObj(ctx context.Context, cluster, namespace, name string) (runtime.Object, error) { + var resource Resource + condition := map[string]interface{}{ + "namespace": namespace, + "name": name, "cluster": cluster, "group": s.storageGroupResource.Group, "version": s.storageVersion.Version, "resource": s.storageGroupResource.Resource, + } + + result := s.db.WithContext(ctx).Model(&Resource{}). + Select("cluster_resource_version, object").Where(condition).First(&resource) + if result.Error != nil { + return nil, InterpretResourceDBError(cluster, namespace+"/"+name, result.Error) + } + + into := s.newFunc() + obj, _, err := s.codec.Decode(resource.Object, nil, into) + if err != nil { + return nil, err + } + if obj != into { + return nil, fmt.Errorf("Failed to decode resource, into is %T", into) + } + + return obj, nil +} + +func (s *ResourceStorage) GenGetObjectQuery(ctx context.Context, cluster, namespace, name string) *gorm.DB { + condition := map[string]interface{}{ "namespace": namespace, "name": name, - }) + "group": s.storageGroupResource.Group, + "version": s.storageVersion.Version, + "resource": s.storageGroupResource.Resource, + "deleted": false, + } + + if cluster != "" { + condition["cluster"] = cluster + } + return s.db.WithContext(ctx).Model(&Resource{}).Select("cluster_resource_version, object").Where(condition) } func (s *ResourceStorage) Get(ctx context.Context, cluster, namespace, name string, into runtime.Object) error { - var objects [][]byte - if result := s.genGetObjectQuery(ctx, cluster, namespace, name).First(&objects); result.Error != nil { + var resource Resource + if result := s.GenGetObjectQuery(ctx, cluster, namespace, name).First(&resource); result.Error != nil { return InterpretResourceDBError(cluster, namespace+"/"+name, result.Error) } - obj, _, err := s.codec.Decode(objects[0], nil, into) + obj, _, err := s.codec.Decode(resource.Object, nil, into) if err != nil { return err } if obj != into { return fmt.Errorf("Failed to decode resource, into is %T", into) } + metaObj, err := meta.Accessor(obj) + if err != nil { + return err + } + metaObj.SetResourceVersion(utils.ParseInt642Str(resource.ClusterResourceVersion)) return nil } -func (s *ResourceStorage) genListObjectsQuery(ctx context.Context, opts *internal.ListOptions) (int64, *int64, *gorm.DB, ObjectList, error) { - var result ObjectList = &BytesList{} +func (s *ResourceStorage) genListObjectsQuery(ctx context.Context, opts *internal.ListOptions, isAll bool) (int64, *int64, *gorm.DB, ObjectList, error) { + var result ObjectList = &ResourceList{} if opts.OnlyMetadata { result = &ResourceMetadataList{} } - query := s.db.WithContext(ctx).Model(&Resource{}) - query = query.Where(map[string]interface{}{ + condition := map[string]interface{}{ "group": s.storageGroupResource.Group, "version": s.storageVersion.Version, "resource": s.storageGroupResource.Resource, - }) + } + if !isAll { + condition["deleted"] = false + } + + query := s.db.WithContext(ctx).Model(&Resource{}).Where(condition) offset, amount, query, err := applyListOptionsToResourceQuery(s.db, query, opts) return offset, amount, query, result, err } +func (s *ResourceStorage) genListQuery(ctx context.Context, newfunc func() runtime.Object, opts *internal.ListOptions) ([]runtime.Object, error) { + var result [][]byte + + condition := map[string]interface{}{ + "group": s.storageGroupResource.Group, + "version": s.storageVersion.Version, + "resource": s.storageGroupResource.Resource, + "deleted": false, + } + query := s.db.WithContext(ctx).Model(&Resource{}).Select("object").Where(condition) + _, _, query, err := applyListOptionsToResourceQuery(s.db, query, opts) + if err != nil { + return nil, err + } + queryResult := query.Find(&result) + if queryResult.Error != nil { + return nil, queryResult.Error + } + + length := len(result) + objList := make([]runtime.Object, length) + + for index, value := range result { + into := newfunc() + obj, _, err := s.codec.Decode(value, nil, into) + if err != nil { + return nil, err + } + if obj != into { + return nil, fmt.Errorf("Failed to decode resource, into is %T", into) + } + objList[index] = obj + } + + return objList, nil +} + func (s *ResourceStorage) List(ctx context.Context, listObject runtime.Object, opts *internal.ListOptions) error { - offset, amount, query, result, err := s.genListObjectsQuery(ctx, opts) + offset, amount, query, result, err := s.genListObjectsQuery(ctx, opts, true) if err != nil { return err } @@ -225,6 +398,18 @@ func (s *ResourceStorage) List(ctx context.Context, listObject runtime.Object, o return err } + objects, crvs, maxCrv, err := getObjectListAndMaxCrv(objects, opts.OnlyMetadata) + if err != nil { + return err + } + if !utils.IsListOptsEmpty(opts) { + maxCrv, err = s.GetMaxCrv(ctx) + if err != nil { + return err + } + } + list.SetResourceVersion(maxCrv) + if opts.WithContinue != nil && *opts.WithContinue { if int64(len(objects)) == opts.Limit { list.SetContinue(strconv.FormatInt(offset+opts.Limit, 10)) @@ -242,15 +427,22 @@ func (s *ResourceStorage) List(ctx context.Context, listObject runtime.Object, o return nil } + accessor := meta.NewAccessor() + if unstructuredList, ok := listObject.(*unstructured.UnstructuredList); ok { unstructuredList.Items = make([]unstructured.Unstructured, 0, len(objects)) - for _, object := range objects { + for i, object := range objects { uObj := &unstructured.Unstructured{} obj, err := object.ConvertTo(s.codec, uObj) if err != nil { return err } + err = accessor.SetResourceVersion(obj, crvs[i]) + if err != nil { + return fmt.Errorf("set resourceVersion failed: %v, unstructuredList", err) + } + uObj, ok := obj.(*unstructured.Unstructured) if !ok { return genericstorage.NewInternalError("the converted object is not *unstructured.Unstructured") @@ -280,21 +472,202 @@ func (s *ResourceStorage) List(ctx context.Context, listObject runtime.Object, o return fmt.Errorf("need ptr to slice: %v", err) } - slice := reflect.MakeSlice(v.Type(), len(objects), len(objects)) + dedup := make(map[string]bool) expected := reflect.New(v.Type().Elem()).Interface().(runtime.Object) - for i, object := range objects { + var dedupObjects []runtime.Object + for _, object := range objects { obj, err := object.ConvertTo(s.codec, expected.DeepCopyObject()) if err != nil { return err } + + resourceKey, err := s.KeyFunc(obj) + if err != nil { + return fmt.Errorf("keyfunc failed: %v, structedList", err) + } else { + if dedup[resourceKey] { + continue + } + dedup[resourceKey] = true + dedupObjects = append(dedupObjects, obj) + } + } + + slice := reflect.MakeSlice(v.Type(), len(dedupObjects), len(dedupObjects)) + for i, obj := range dedupObjects { + err = accessor.SetResourceVersion(obj, crvs[i]) + if err != nil { + return fmt.Errorf("set resourceVersion failed: %v, structedList", err) + } slice.Index(i).Set(reflect.ValueOf(obj).Elem()) } v.Set(slice) return nil } -func (s *ResourceStorage) Watch(_ context.Context, _ *internal.ListOptions) (watch.Interface, error) { - return nil, apierrors.NewMethodNotSupported(s.storageGroupResource, "watch") +func (s *ResourceStorage) Watch(ctx context.Context, newfunc func() runtime.Object, options *internal.ListOptions, gvk schema.GroupVersionKind) (watch.Interface, error) { + s.watchLock.Lock() + defer s.watchLock.Unlock() + initEvents, err := s.fetchInitEvents(ctx, options.ResourceVersion, newfunc, options) + if err != nil { + // To match the uncached watch implementation, once we have passed authn/authz/admission, + // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, + // rather than a directly returned error. + return newErrWatcher(err), nil + } + + watcher, err := watchcomponents.NewPredicateWatch(ctx, options, gvk, s.Namespaced) + if err != nil { + return newErrWatcher(err), nil + } + s.buffer.AppendWatcherBuffer(watcher) + + watcher.SetForget(func() { + s.buffer.ForgetWatcher(watcher) + }) + + go watcher.Process(ctx, initEvents) + return watcher, nil +} + +func (s *ResourceStorage) ProcessEvent(ctx context.Context, eventType watch.EventType, obj runtime.Object, cluster string) error { + newObj := obj.DeepCopyObject() + event := watch.Event{ + Object: newObj, + Type: eventType, + } + s.eventChan <- &watchcomponents.EventWithCluster{ + Cluster: cluster, + Event: &event, + } + + return nil +} + +func (s *ResourceStorage) fetchInitEvents(ctx context.Context, rv string, newfunc func() runtime.Object, opts *internal.ListOptions) ([]*watch.Event, error) { + if rv == "" { + objects, err := s.genListQuery(ctx, newfunc, opts) + if err != nil { + return nil, err + } + + result := make([]*watch.Event, len(objects)) + for index, value := range objects { + event := &watch.Event{ + Object: value, + Type: watch.Added, + } + result[index] = event + } + return result, nil + } else { + result, err := s.eventCache.GetEvents(rv, func() (string, error) { + return s.GetMaxCrv(ctx) + }) + if err != nil { + return nil, err + } + return result, nil + } +} + +func getObjectListAndMaxCrv(objList []Object, onlyMetada bool) ([]Object, []string, string, error) { + crvs := make([]string, 0, len(objList)) + var maxCrv int64 = 0 + + var objListNeed []Object + if onlyMetada { + for _, object := range objList { + if metadata, ok := object.(ResourceMetadata); ok { + if utils.IsBigger(metadata.ClusterResourceVersion, maxCrv) { + maxCrv = metadata.ClusterResourceVersion + } + + if metadata.Deleted { + continue + } + objListNeed = append(objListNeed, object) + crvs = append(crvs, utils.ParseInt642Str(metadata.ClusterResourceVersion)) + } else { + return nil, nil, "0", fmt.Errorf("unknown object type") + } + } + return objList, crvs, utils.ParseInt642Str(maxCrv), nil + } else { + for _, object := range objList { + if resource, ok := object.(Resource); ok { + if utils.IsBigger(resource.ClusterResourceVersion, maxCrv) { + maxCrv = resource.ClusterResourceVersion + } + if resource.Deleted { + continue + } + var b Bytes = []byte(resource.Object) + crvs = append(crvs, utils.ParseInt642Str(resource.ClusterResourceVersion)) + objListNeed = append(objListNeed, b) + } else { + return nil, nil, "0", fmt.Errorf("unknown object type") + } + } + return objListNeed, crvs, utils.ParseInt642Str(maxCrv), nil + } +} + +func (s *ResourceStorage) GetMaxCrv(ctx context.Context) (string, error) { + maxCrv := "0" + var metadataList ResourceMetadataList + condition := map[string]interface{}{ + "group": s.storageGroupResource.Group, + "version": s.storageVersion.Version, + "resource": s.storageGroupResource.Resource, + } + result := s.db.WithContext(ctx).Model(&Resource{}).Select("cluster_resource_version").Where(condition).Order("cluster_resource_version DESC").Limit(1).Find(&metadataList) + if result.Error != nil { + return maxCrv, InterpretResourceDBError("", s.storageGroupResource.Resource, result.Error) + } + for _, metadata := range metadataList { + maxCrv = utils.ParseInt642Str(metadata.ClusterResourceVersion) + } + return maxCrv, nil +} + +// PublishEvent update column `ClusterResourceVersion` and `published` when event send to messagequeue middleware success. +func (s *ResourceStorage) PublishEvent(ctx context.Context, wc *watchcomponents.EventWithCluster) { + metaObj, err := meta.Accessor(wc.Event.Object) + if err != nil { + return + } + + crv, err := utils.ParseStr2Int64(metaObj.GetResourceVersion()) + if err != nil { + klog.Errorf("Crv failed to convert int64, name: %s, namespace: %s, cluster: %s, err: %v", + metaObj.GetName(), metaObj.GetNamespace(), wc.Cluster, err) + } + // The uid may not be the same for resources with the same namespace/name + // in the same cluster at different times. + updatedResource := map[string]interface{}{ + "ClusterResourceVersion": crv, + "published": true, + } + + condition := map[string]interface{}{ + "group": s.storageGroupResource.Group, + "version": s.storageVersion.Version, + "resource": s.storageGroupResource.Resource, + "cluster": wc.Cluster, + "namespace": metaObj.GetNamespace(), + "name": metaObj.GetName(), + } + + s.db.WithContext(ctx).Model(&Resource{}).Where(condition).Updates(updatedResource) +} + +func (s *ResourceStorage) GenCrv2Event(event *watch.Event) { + accessor := meta.NewAccessor() + err := accessor.SetResourceVersion(event.Object, utils.ParseInt642Str(time.Now().UnixMicro())) + if err != nil { + klog.Errorf("set resourceVersion failed: %v, may be it's a clear event", err) + } } func applyListOptionsToResourceQuery(db *gorm.DB, query *gorm.DB, opts *internal.ListOptions) (int64, *int64, *gorm.DB, error) { @@ -338,3 +711,26 @@ func applyOwnerToResourceQuery(db *gorm.DB, query *gorm.DB, opts *internal.ListO } return query, nil } + +type errWatcher struct { + result chan watch.Event +} + +func newErrWatcher(err error) *errWatcher { + errEvent := watchutil.NewErrorEvent(err) + + // Create a watcher with room for a single event, populate it, and close the channel + watcher := &errWatcher{result: make(chan watch.Event, 1)} + watcher.result <- errEvent + close(watcher.result) + + return watcher +} + +func (c *errWatcher) ResultChan() <-chan watch.Event { + return c.result +} + +func (c *errWatcher) Stop() { + // no-op +} diff --git a/pkg/storage/internalstorage/resource_storage_test.go b/pkg/storage/internalstorage/resource_storage_test.go index 4e44168d9..5cc5b6e92 100644 --- a/pkg/storage/internalstorage/resource_storage_test.go +++ b/pkg/storage/internalstorage/resource_storage_test.go @@ -200,8 +200,8 @@ func TestResourceStorage_genGetObjectQuery(t *testing.T) { "", "", expected{ - `SELECT "object" FROM "resources" WHERE "cluster" = '' AND "group" = '' AND "name" = '' AND "namespace" = '' AND "resource" = '' AND "version" = '' ORDER BY "resources"."id" LIMIT 1`, - "SELECT `object` FROM `resources` WHERE `cluster` = '' AND `group` = '' AND `name` = '' AND `namespace` = '' AND `resource` = '' AND `version` = '' ORDER BY `resources`.`id` LIMIT 1", + `SELECT cluster_resource_version, object FROM "resources" WHERE "deleted" = false AND "group" = '' AND "name" = '' AND "namespace" = '' AND "resource" = '' AND "version" = '' ORDER BY "resources"."id" LIMIT 1`, + "SELECT cluster_resource_version, object FROM `resources` WHERE `deleted` = false AND `group` = '' AND `name` = '' AND `namespace` = '' AND `resource` = '' AND `version` = '' ORDER BY `resources`.`id` LIMIT 1", "", }, }, @@ -212,8 +212,8 @@ func TestResourceStorage_genGetObjectQuery(t *testing.T) { "ns-1", "resource-1", expected{ - `SELECT "object" FROM "resources" WHERE "cluster" = 'cluster-1' AND "group" = 'apps' AND "name" = 'resource-1' AND "namespace" = 'ns-1' AND "resource" = 'deployments' AND "version" = 'v1' ORDER BY "resources"."id" LIMIT 1`, - "SELECT `object` FROM `resources` WHERE `cluster` = 'cluster-1' AND `group` = 'apps' AND `name` = 'resource-1' AND `namespace` = 'ns-1' AND `resource` = 'deployments' AND `version` = 'v1' ORDER BY `resources`.`id` LIMIT 1", + `SELECT cluster_resource_version, object FROM "resources" WHERE "cluster" = 'cluster-1' AND "deleted" = false AND "group" = 'apps' AND "name" = 'resource-1' AND "namespace" = 'ns-1' AND "resource" = 'deployments' AND "version" = 'v1' ORDER BY "resources"."id" LIMIT 1`, + "SELECT cluster_resource_version, object FROM `resources` WHERE `cluster` = 'cluster-1' AND `deleted` = false AND `group` = 'apps' AND `name` = 'resource-1' AND `namespace` = 'ns-1' AND `resource` = 'deployments' AND `version` = 'v1' ORDER BY `resources`.`id` LIMIT 1", "", }, }, @@ -268,7 +268,7 @@ func TestResourceStorage_genListObjectQuery(t *testing.T) { postgreSQL, err := toSQL(postgresDB.Session(&gorm.Session{DryRun: true}), test.listOptions, func(db *gorm.DB, options *internal.ListOptions) (*gorm.DB, error) { rs := newTestResourceStorage(db, test.resource) - _, _, query, _, err := rs.genListObjectsQuery(context.TODO(), options) + _, _, query, _, err := rs.genListObjectsQuery(context.TODO(), options, true) return query, err }, ) @@ -284,7 +284,7 @@ func TestResourceStorage_genListObjectQuery(t *testing.T) { mysqlSQL, err := toSQL(mysqlDBs[version].Session(&gorm.Session{DryRun: true}), test.listOptions, func(db *gorm.DB, options *internal.ListOptions) (*gorm.DB, error) { rs := newTestResourceStorage(db, test.resource) - _, _, query, _, err := rs.genListObjectsQuery(context.TODO(), options) + _, _, query, _, err := rs.genListObjectsQuery(context.TODO(), options, true) return query, err }, ) @@ -379,7 +379,7 @@ func TestResourceStorage_Update(t *testing.T) { factory := storageconfig.NewStorageConfigFactory() require.NotNil(factory) - config, err := factory.NewLegacyResourceConfig(schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, true) + config, err := factory.NewLegacyResourceConfig(schema.GroupResource{Group: appsv1.SchemeGroupVersion.Group, Resource: "deployments"}, true, "Deployment") require.NoError(err) require.NotNil(config) @@ -425,7 +425,7 @@ func TestResourceStorage_Update(t *testing.T) { clusterName := "test" - err = rs.Create(context.Background(), clusterName, obj) + err = rs.Create(context.Background(), clusterName, obj, true) require.NoError(err) var resourcesAfterCreation []Resource @@ -445,7 +445,7 @@ func TestResourceStorage_Update(t *testing.T) { "foo2": "bar2", } - err = rs.Update(context.Background(), clusterName, obj) + err = rs.Update(context.Background(), clusterName, obj, true) require.NoError(err) var resourcesAfterUpdates []Resource diff --git a/pkg/storage/internalstorage/storage.go b/pkg/storage/internalstorage/storage.go index fdf3bccb5..42a265b6a 100644 --- a/pkg/storage/internalstorage/storage.go +++ b/pkg/storage/internalstorage/storage.go @@ -6,9 +6,14 @@ import ( "gorm.io/gorm" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" internal "github.com/clusterpedia-io/api/clusterpedia" "github.com/clusterpedia-io/clusterpedia/pkg/storage" + "github.com/clusterpedia-io/clusterpedia/pkg/synchromanager/clustersynchro/informer" + "github.com/clusterpedia-io/clusterpedia/pkg/utils" + watchcomponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware" ) type StorageFactory struct { @@ -19,15 +24,69 @@ func (s *StorageFactory) GetSupportedRequestVerbs() []string { return []string{"get", "list"} } -func (s *StorageFactory) NewResourceStorage(config *storage.ResourceStorageConfig) (storage.ResourceStorage, error) { - return &ResourceStorage{ +func (s *StorageFactory) NewResourceStorage(config *storage.ResourceStorageConfig, initEventCache bool) (storage.ResourceStorage, error) { + gvr := schema.GroupVersionResource{ + Group: config.StorageGroupResource.Group, + Version: config.StorageVersion.Version, + Resource: config.StorageGroupResource.Resource, + } + + resourceStorage := &ResourceStorage{ db: s.db, codec: config.Codec, storageGroupResource: config.StorageGroupResource, storageVersion: config.StorageVersion, memoryVersion: config.MemoryVersion, - }, nil + + Namespaced: config.Namespaced, + newFunc: config.NewFunc, + KeyFunc: utils.GetKeyFunc(gvr, config.Namespaced), + } + + // SubscriberEnabled is true when Apiserver starts and middleware enabled + if middleware.SubscriberEnabled { + var cache *watchcomponents.EventCache + buffer := watchcomponents.GetMultiClusterEventPool().GetClusterBufferByGVR(gvr) + cachePool := watchcomponents.GetInitEventCachePool() + cache = cachePool.GetWatchEventCacheByGVR(gvr) + err := middleware.GlobalSubscriber.SubscribeTopic(gvr, config.Codec, config.NewFunc) + if err != nil { + return nil, err + } + enqueueFunc := func(event *watch.Event) { + if event.Type != watch.Error { + cache.Enqueue(event) + } + err := buffer.ProcessEvent(event.Object, event.Type) + if err != nil { + return + } + } + clearfunc := func() { + cache.Clear() + } + err = middleware.GlobalSubscriber.EventReceiving(gvr, enqueueFunc, clearfunc) + if err != nil { + return nil, err + } + + resourceStorage.buffer = buffer + resourceStorage.eventCache = cache + } else if middleware.PublisherEnabled { // PublisherEnabled is true when clustersynchro-manager starts and middleware enabled + err := middleware.GlobalPublisher.PublishTopic(gvr, config.Codec) + if err != nil { + return nil, err + } + err = middleware.GlobalPublisher.EventSending(gvr, watchcomponents.EC.StartChan, resourceStorage.PublishEvent, resourceStorage.GenCrv2Event) + if err != nil { + return nil, err + } + + resourceStorage.eventChan = watchcomponents.EC.StartChan(gvr) + } + + return resourceStorage, nil } func (s *StorageFactory) NewCollectionResourceStorage(cr *internal.CollectionResource) (storage.CollectionResourceStorage, error) { @@ -41,8 +100,11 @@ func (s *StorageFactory) NewCollectionResourceStorage(cr *internal.CollectionRes func (f *StorageFactory) GetResourceVersions(ctx context.Context, cluster string) (map[schema.GroupVersionResource]map[string]interface{}, error) { var resources []Resource - result := f.db.WithContext(ctx).Select("group", "version", "resource", "namespace", "name", "resource_version"). - Where(map[string]interface{}{"cluster": cluster}). + result := f.db.WithContext(ctx).Select("group", "version", "resource", + "namespace", "name", "resource_version", "deleted", "published"). + Where(map[string]interface{}{"cluster": cluster, "deleted": false}). + //In case deleted event be losted when synchro manager do a leaderelection or reboot + Or(map[string]interface{}{"cluster": cluster, "deleted": true, "published": false}). Find(&resources) if result.Error != nil { return nil, InterpretDBError(cluster, result.Error) @@ -61,7 +123,13 @@ func (f *StorageFactory) GetResourceVersions(ctx context.Context, cluster string if resource.Namespace != "" { key = resource.Namespace + "/" + resource.Name } - versions[key] = resource.ResourceVersion + versions[key] = informer.StorageElement{ + Version: resource.ResourceVersion, + Deleted: resource.Deleted, + Published: resource.Published, + Name: resource.Name, + Namespace: resource.Namespace, + } } return resourceversions, nil } diff --git a/pkg/storage/internalstorage/types.go b/pkg/storage/internalstorage/types.go index 30daebab9..b4e92cc25 100644 --- a/pkg/storage/internalstorage/types.go +++ b/pkg/storage/internalstorage/types.go @@ -55,18 +55,22 @@ type Resource struct { Resource string `gorm:"size:63;not null;uniqueIndex:uni_group_version_resource_cluster_namespace_name;index:idx_group_version_resource_namespace_name;index:idx_group_version_resource_name"` Kind string `gorm:"size:63;not null"` - Cluster string `gorm:"size:253;not null;uniqueIndex:uni_group_version_resource_cluster_namespace_name,length:100;index:idx_cluster"` - Namespace string `gorm:"size:253;not null;uniqueIndex:uni_group_version_resource_cluster_namespace_name,length:50;index:idx_group_version_resource_namespace_name"` - Name string `gorm:"size:253;not null;uniqueIndex:uni_group_version_resource_cluster_namespace_name,length:100;index:idx_group_version_resource_namespace_name;index:idx_group_version_resource_name"` - OwnerUID types.UID `gorm:"column:owner_uid;size:36;not null;default:''"` - UID types.UID `gorm:"size:36;not null"` - ResourceVersion string `gorm:"size:30;not null"` + Cluster string `gorm:"size:253;not null;uniqueIndex:uni_group_version_resource_cluster_namespace_name,length:100;index:idx_cluster"` + Namespace string `gorm:"size:253;not null;uniqueIndex:uni_group_version_resource_cluster_namespace_name,length:50;index:idx_group_version_resource_namespace_name"` + Name string `gorm:"size:253;not null;uniqueIndex:uni_group_version_resource_cluster_namespace_name,length:100;index:idx_group_version_resource_namespace_name;index:idx_group_version_resource_name"` + OwnerUID types.UID `gorm:"column:owner_uid;size:36;not null;default:''"` + UID types.UID `gorm:"size:36;not null"` + ResourceVersion string `gorm:"size:30;not null"` + ClusterResourceVersion int64 Object datatypes.JSON `gorm:"not null"` CreatedAt time.Time `gorm:"not null"` SyncedAt time.Time `gorm:"not null;autoUpdateTime"` DeletedAt sql.NullTime + + Deleted bool `gorm:"default:false"` + Published bool `gorm:"default:false"` } func (res Resource) GroupVersionResource() schema.GroupVersionResource { @@ -100,9 +104,10 @@ func (res Resource) ConvertTo(codec runtime.Codec, object runtime.Object) (runti } type ResourceMetadata struct { - ResourceType `gorm:"embedded"` - - Metadata datatypes.JSON + ResourceType `gorm:"embedded"` + ClusterResourceVersion int64 + Metadata datatypes.JSON + Deleted bool } func (data ResourceMetadata) ConvertToUnstructured() (*unstructured.Unstructured, error) { diff --git a/pkg/storage/memorystorage/memory_resource_storage.go b/pkg/storage/memorystorage/memory_resource_storage.go index 055cda5ff..3a2783c80 100644 --- a/pkg/storage/memorystorage/memory_resource_storage.go +++ b/pkg/storage/memorystorage/memory_resource_storage.go @@ -20,6 +20,7 @@ import ( "github.com/clusterpedia-io/clusterpedia/pkg/storage" cache "github.com/clusterpedia-io/clusterpedia/pkg/storage/memorystorage/watchcache" utilwatch "github.com/clusterpedia-io/clusterpedia/pkg/utils/watch" + watchcomponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" ) var ( @@ -56,7 +57,7 @@ func (s *ResourceStorage) GetStorageConfig() *storage.ResourceStorageConfig { return s.storageConfig } -func (s *ResourceStorage) Create(ctx context.Context, cluster string, obj runtime.Object) error { +func (s *ResourceStorage) Create(ctx context.Context, cluster string, obj runtime.Object, _ bool) error { resourceVersion, err := s.CrvSynchro.UpdateClusterResourceVersion(obj, cluster) if err != nil { return err @@ -70,7 +71,7 @@ func (s *ResourceStorage) Create(ctx context.Context, cluster string, obj runtim return nil } -func (s *ResourceStorage) Update(ctx context.Context, cluster string, obj runtime.Object) error { +func (s *ResourceStorage) Update(ctx context.Context, cluster string, obj runtime.Object, _ bool) error { resourceVersion, err := s.CrvSynchro.UpdateClusterResourceVersion(obj, cluster) if err != nil { return err @@ -102,7 +103,7 @@ func (s *ResourceStorage) ConvertDeletedObject(obj interface{}) (runobj runtime. return nil, fmt.Errorf("Invalid Type(%T): couldn't convert deleted object", obj) } -func (s *ResourceStorage) Delete(ctx context.Context, cluster string, obj runtime.Object) error { +func (s *ResourceStorage) Delete(ctx context.Context, cluster string, obj runtime.Object, _ bool) error { resourceVersion, err := s.CrvSynchro.UpdateClusterResourceVersion(obj, cluster) if err != nil { return err @@ -116,6 +117,10 @@ func (s *ResourceStorage) Delete(ctx context.Context, cluster string, obj runtim return nil } +func (s *ResourceStorage) GetObj(ctx context.Context, cluster, namespace, name string) (runtime.Object, error) { + return nil, nil +} + func (s *ResourceStorage) Get(ctx context.Context, cluster, namespace, name string, into runtime.Object) error { var buffer bytes.Buffer se, err := s.watchCache.WaitUntilFreshAndGet(cluster, namespace, name) @@ -140,6 +145,14 @@ func (s *ResourceStorage) Get(ctx context.Context, cluster, namespace, name stri return nil } +func (s *ResourceStorage) GetEventBuffer() *watchcomponents.MultiClusterBuffer { + return nil +} + +func (s *ResourceStorage) ProcessEvent(ctx context.Context, eventType watch.EventType, obj runtime.Object, cluster string) error { + return nil +} + // nolint func (s *ResourceStorage) newClusterWatchEvent(eventType watch.EventType, obj runtime.Object, cluster string) *ClusterWatchEvent { return &ClusterWatchEvent{ @@ -214,7 +227,7 @@ func (s *ResourceStorage) List(ctx context.Context, listObject runtime.Object, o return nil } -func (s *ResourceStorage) Watch(ctx context.Context, options *internal.ListOptions) (watch.Interface, error) { +func (s *ResourceStorage) Watch(ctx context.Context, _ func() runtime.Object, options *internal.ListOptions, _ schema.GroupVersionKind) (watch.Interface, error) { resourceversion := options.ResourceVersion watchRV, err := cache.NewClusterResourceVersionFromString(resourceversion) if err != nil { diff --git a/pkg/storage/memorystorage/memory_storage.go b/pkg/storage/memorystorage/memory_storage.go index 77209f538..3e343bd90 100644 --- a/pkg/storage/memorystorage/memory_storage.go +++ b/pkg/storage/memorystorage/memory_storage.go @@ -18,7 +18,7 @@ func (s *StorageFactory) GetSupportedRequestVerbs() []string { return []string{"get", "list", "watch"} } -func (s *StorageFactory) NewResourceStorage(config *storage.ResourceStorageConfig) (storage.ResourceStorage, error) { +func (s *StorageFactory) NewResourceStorage(config *storage.ResourceStorageConfig, _ bool) (storage.ResourceStorage, error) { storages.Lock() defer storages.Unlock() diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index a8df23fcf..507fe5188 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -20,7 +20,7 @@ type StorageFactory interface { GetResourceVersions(ctx context.Context, cluster string) (map[schema.GroupVersionResource]map[string]interface{}, error) GetCollectionResources(ctx context.Context) ([]*internal.CollectionResource, error) - NewResourceStorage(config *ResourceStorageConfig) (ResourceStorage, error) + NewResourceStorage(config *ResourceStorageConfig, initEventCache bool) (ResourceStorage, error) NewCollectionResourceStorage(cr *internal.CollectionResource) (CollectionResourceStorage, error) CleanCluster(ctx context.Context, cluster string) error @@ -32,13 +32,16 @@ type ResourceStorage interface { Get(ctx context.Context, cluster, namespace, name string, obj runtime.Object) error List(ctx context.Context, listObj runtime.Object, opts *internal.ListOptions) error - Watch(ctx context.Context, options *internal.ListOptions) (watch.Interface, error) + Watch(ctx context.Context, newfunc func() runtime.Object, options *internal.ListOptions, gvk schema.GroupVersionKind) (watch.Interface, error) - Create(ctx context.Context, cluster string, obj runtime.Object) error - Update(ctx context.Context, cluster string, obj runtime.Object) error + Create(ctx context.Context, cluster string, obj runtime.Object, crvUpdated bool) error + Update(ctx context.Context, cluster string, obj runtime.Object, crvUpdated bool) error ConvertDeletedObject(obj interface{}) (runtime.Object, error) - Delete(ctx context.Context, cluster string, obj runtime.Object) error + Delete(ctx context.Context, cluster string, obj runtime.Object, crvUpdated bool) error + + ProcessEvent(ctx context.Context, eventType watch.EventType, obj runtime.Object, cluster string) error + GetObj(ctx context.Context, cluster, namespace, name string) (runtime.Object, error) } type CollectionResourceStorage interface { @@ -55,6 +58,8 @@ type ResourceStorageConfig struct { StorageVersion schema.GroupVersion Codec runtime.Codec + + NewFunc func() runtime.Object } type storageRecoverableExceptionError struct { diff --git a/pkg/storageconfig/storageconfig_factory.go b/pkg/storageconfig/storageconfig_factory.go index 4462583bd..13d2680ca 100644 --- a/pkg/storageconfig/storageconfig_factory.go +++ b/pkg/storageconfig/storageconfig_factory.go @@ -1,6 +1,7 @@ package storageconfig import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" @@ -59,14 +60,14 @@ func (g *StorageConfigFactory) GetStorageGroupResource(groupResource schema.Grou return groupResource } -func (g *StorageConfigFactory) NewConfig(gvr schema.GroupVersionResource, namespaced bool) (*storage.ResourceStorageConfig, error) { +func (g *StorageConfigFactory) NewConfig(gvr schema.GroupVersionResource, namespaced bool, kind string) (*storage.ResourceStorageConfig, error) { if scheme.LegacyResourceScheme.IsGroupRegistered(gvr.Group) { - return g.NewLegacyResourceConfig(gvr.GroupResource(), namespaced) + return g.NewLegacyResourceConfig(gvr.GroupResource(), namespaced, kind) } - return g.NewUnstructuredConfig(gvr, namespaced) + return g.NewUnstructuredConfig(gvr, namespaced, kind) } -func (g *StorageConfigFactory) NewUnstructuredConfig(gvr schema.GroupVersionResource, namespaced bool) (*storage.ResourceStorageConfig, error) { +func (g *StorageConfigFactory) NewUnstructuredConfig(gvr schema.GroupVersionResource, namespaced bool, kind string) (*storage.ResourceStorageConfig, error) { version := gvr.GroupVersion() codec := versioning.NewCodec( scheme.UnstructuredCodecs, @@ -79,6 +80,13 @@ func (g *StorageConfigFactory) NewUnstructuredConfig(gvr schema.GroupVersionReso version, "unstructuredObjectStorage", ) + + newFunc := func() runtime.Object { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(version.WithKind(kind)) + return obj + } + return &storage.ResourceStorageConfig{ GroupResource: gvr.GroupResource(), StorageGroupResource: gvr.GroupResource(), @@ -86,10 +94,11 @@ func (g *StorageConfigFactory) NewUnstructuredConfig(gvr schema.GroupVersionReso StorageVersion: version, MemoryVersion: version, Namespaced: namespaced, + NewFunc: newFunc, }, nil } -func (g *StorageConfigFactory) NewLegacyResourceConfig(gr schema.GroupResource, namespaced bool) (*storage.ResourceStorageConfig, error) { +func (g *StorageConfigFactory) NewLegacyResourceConfig(gr schema.GroupResource, namespaced bool, kind string) (*storage.ResourceStorageConfig, error) { chosenStorageResource := g.GetStorageGroupResource(gr) storageVersion, err := g.legacyResourceEncodingConfig.StorageEncodingFor(chosenStorageResource) @@ -112,6 +121,11 @@ func (g *StorageConfigFactory) NewLegacyResourceConfig(gr schema.GroupResource, return nil, err } + newFunc := func() runtime.Object { + obj, _ := scheme.LegacyResourceScheme.New(memoryVersion.WithKind(kind)) + return obj + } + return &storage.ResourceStorageConfig{ GroupResource: gr, StorageGroupResource: chosenStorageResource, @@ -119,5 +133,6 @@ func (g *StorageConfigFactory) NewLegacyResourceConfig(gr schema.GroupResource, StorageVersion: codecConfig.StorageVersion, MemoryVersion: memoryVersion, Namespaced: namespaced, + NewFunc: newFunc, }, nil } diff --git a/pkg/synchromanager/clustersynchro/cluster_synchro.go b/pkg/synchromanager/clustersynchro/cluster_synchro.go index 527c7a259..002489ce5 100644 --- a/pkg/synchromanager/clustersynchro/cluster_synchro.go +++ b/pkg/synchromanager/clustersynchro/cluster_synchro.go @@ -342,7 +342,7 @@ func (s *ClusterSynchro) refreshSyncResources() { continue } - resourceStorage, err := s.storage.NewResourceStorage(config.storageConfig) + resourceStorage, err := s.storage.NewResourceStorage(config.storageConfig, false) if err != nil { klog.ErrorS(err, "Failed to create resource storage", "cluster", s.name, "storage resource", storageGVR) updateSyncConditions(storageGVR, clusterv1alpha2.ResourceSyncStatusPending, "SynchroCreateFailed", fmt.Sprintf("new resource storage failed: %s", err)) @@ -357,7 +357,7 @@ func (s *ClusterSynchro) refreshSyncResources() { var metricsStore *kubestatemetrics.MetricsStore if s.syncConfig.MetricsStoreBuilder != nil { - metricsStore = s.syncConfig.MetricsStoreBuilder.GetMetricStore(s.name, config.syncResource) + metricsStore = s.syncConfig.MetricsStoreBuilder.GetMetricStore(s.name, config.syncResource, config.kind) } synchro := newResourceSynchro(s.name, ResourceSynchroConfig{ diff --git a/pkg/synchromanager/clustersynchro/informer/resourceversion_informer.go b/pkg/synchromanager/clustersynchro/informer/resourceversion_informer.go index 2c8d6568f..f8c2cfeed 100644 --- a/pkg/synchromanager/clustersynchro/informer/resourceversion_informer.go +++ b/pkg/synchromanager/clustersynchro/informer/resourceversion_informer.go @@ -129,7 +129,11 @@ func (informer *resourceVersionInformer) HandleDeltas(deltas cache.Deltas, isInI var versioner storage.Versioner = storage.APIObjectVersioner{} -func compareResourceVersion(obj interface{}, rv string) int { +func compareResourceVersion(obj interface{}, rv *StorageElement) int { + if rv == nil || !rv.Published { + return 1 + } + object, ok := obj.(runtime.Object) if !ok { // TODO(clusterpedia-io): add log @@ -141,7 +145,7 @@ func compareResourceVersion(obj interface{}, rv string) int { return -1 } - version, err := versioner.ParseResourceVersion(rv) + version, err := versioner.ParseResourceVersion(rv.Version) if err != nil { return -1 } diff --git a/pkg/synchromanager/clustersynchro/informer/resourceversion_storage.go b/pkg/synchromanager/clustersynchro/informer/resourceversion_storage.go index 21645be95..839f5ace3 100644 --- a/pkg/synchromanager/clustersynchro/informer/resourceversion_storage.go +++ b/pkg/synchromanager/clustersynchro/informer/resourceversion_storage.go @@ -5,6 +5,14 @@ import ( "k8s.io/client-go/tools/cache" ) +type StorageElement struct { + Version string + Deleted bool + Published bool + Name string + Namespace string +} + type ResourceVersionStorage struct { keyFunc cache.KeyFunc @@ -25,12 +33,21 @@ func (c *ResourceVersionStorage) Add(obj interface{}) error { if err != nil { return cache.KeyError{Obj: obj, Err: err} } + + c.cacheStorage.Delete(key) + accessor, err := meta.Accessor(obj) if err != nil { return err } - c.cacheStorage.Add(key, accessor.GetResourceVersion()) + c.cacheStorage.Add(key, StorageElement{ + Version: accessor.GetResourceVersion(), + Deleted: false, + Published: true, + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }) return nil } @@ -44,7 +61,13 @@ func (c *ResourceVersionStorage) Update(obj interface{}) error { return err } - c.cacheStorage.Update(key, accessor.GetResourceVersion()) + c.cacheStorage.Update(key, StorageElement{ + Version: accessor.GetResourceVersion(), + Deleted: false, + Published: true, + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }) return nil } @@ -58,16 +81,26 @@ func (c *ResourceVersionStorage) Delete(obj interface{}) error { return nil } -func (c *ResourceVersionStorage) Get(obj interface{}) (string, bool, error) { +func (c *ResourceVersionStorage) Get(obj interface{}) (*StorageElement, bool, error) { key, err := c.keyFunc(obj) if err != nil { - return "", false, cache.KeyError{Obj: obj, Err: err} + return nil, false, cache.KeyError{Obj: obj, Err: err} } version, exists := c.cacheStorage.Get(key) - if exists { - return version.(string), exists, nil + if !exists { + return nil, false, nil + } + + var se StorageElement + var ok bool + if se, ok = version.(StorageElement); !ok { + return nil, false, nil + } + + if !se.Deleted { + return &se, exists, nil } - return "", false, nil + return nil, false, nil } func (c *ResourceVersionStorage) ListKeys() []string { diff --git a/pkg/synchromanager/clustersynchro/resource_negotiator.go b/pkg/synchromanager/clustersynchro/resource_negotiator.go index 42b111af8..1048fde30 100644 --- a/pkg/synchromanager/clustersynchro/resource_negotiator.go +++ b/pkg/synchromanager/clustersynchro/resource_negotiator.go @@ -124,7 +124,7 @@ func (negotiator *ResourceNegotiator) NegotiateSyncResources(syncResources []clu Reason: "SynchroCreating", } - storageConfig, err := negotiator.resourceStorageConfig.NewConfig(syncGVR, apiResource.Namespaced) + storageConfig, err := negotiator.resourceStorageConfig.NewConfig(syncGVR, apiResource.Namespaced, apiResource.Kind) if err != nil { syncCondition.Reason = "SynchroCreateFailed" syncCondition.Message = fmt.Sprintf("new resource storage config failed: %s", err) diff --git a/pkg/synchromanager/clustersynchro/resource_synchro.go b/pkg/synchromanager/clustersynchro/resource_synchro.go index dbf0a37a6..48ece7324 100644 --- a/pkg/synchromanager/clustersynchro/resource_synchro.go +++ b/pkg/synchromanager/clustersynchro/resource_synchro.go @@ -13,6 +13,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" genericstorage "k8s.io/apiserver/pkg/storage" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" @@ -26,6 +27,7 @@ import ( "github.com/clusterpedia-io/clusterpedia/pkg/synchromanager/features" "github.com/clusterpedia-io/clusterpedia/pkg/utils" clusterpediafeature "github.com/clusterpedia-io/clusterpedia/pkg/utils/feature" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware" ) type ResourceSynchroConfig struct { @@ -100,7 +102,7 @@ func newResourceSynchro(cluster string, config ResourceSynchroConfig) *ResourceS rvs: config.ResourceVersions, // all resources saved to the queue are `runtime.Object` - queue: queue.NewPressureQueue(cache.MetaNamespaceKeyFunc), + queue: queue.NewPressureQueue(cache.DeletionHandlingMetaNamespaceKeyFunc), storage: config.ResourceStorage, convertor: config.ObjectConvertor, @@ -346,14 +348,26 @@ func (synchro *ResourceSynchro) OnDelete(obj interface{}) { if !synchro.isRunnableForStorage.Load() { return } + + if d, ok := obj.(cache.DeletedFinalStateUnknown); ok { + if obj, ok = d.Obj.(*unstructured.Unstructured); !ok { + namespace, name, err := cache.SplitMetaNamespaceKey(d.Key) + if err != nil { + return + } + obj = &metav1.PartialObjectMetadata{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}} + } + } + if o, ok := obj.(*unstructured.Unstructured); ok { synchro.pruneObject(o) } - obj, err := synchro.storage.ConvertDeletedObject(obj) - if err != nil { - return - } + // full obj is needed in watch feature + //obj, err := synchro.storage.ConvertDeletedObject(obj) + //if err != nil { + // return + //} _ = synchro.queue.Delete(obj) } @@ -386,12 +400,14 @@ func (synchro *ResourceSynchro) handleResourceEvent(event *queue.Event) { obj, ok := event.Object.(runtime.Object) if !ok { - return + if _, ok = event.Object.(cache.DeletedFinalStateUnknown); !ok { + return + } } key, _ := cache.MetaNamespaceKeyFunc(obj) - var callback func(obj runtime.Object) - var handler func(ctx context.Context, obj runtime.Object) error + var callback func(obj runtime.Object, eventType watch.EventType) + var handler func(ctx context.Context, obj runtime.Object) (watch.EventType, error) if event.Action != queue.Deleted { var err error if obj, err = synchro.convertToStorageVersion(obj); err != nil { @@ -407,27 +423,45 @@ func (synchro *ResourceSynchro) handleResourceEvent(event *queue.Event) { case queue.Updated: handler = synchro.updateOrCreateResource } - callback = func(obj runtime.Object) { + callback = func(obj runtime.Object, eventType watch.EventType) { metaobj, _ := meta.Accessor(obj) synchro.rvsLock.Lock() - synchro.rvs[key] = metaobj.GetResourceVersion() + synchro.rvs[key] = informer.StorageElement{ + Version: metaobj.GetResourceVersion(), + Name: metaobj.GetName(), + Namespace: metaobj.GetNamespace(), + Deleted: false, + Published: true, + } synchro.rvsLock.Unlock() + if middleware.PublisherEnabled { + err := synchro.storage.ProcessEvent(context.TODO(), eventType, obj, synchro.cluster) + if err != nil { + return + } + } } } else { - handler, callback = synchro.deleteResource, func(_ runtime.Object) { + handler, callback = synchro.deleteResource, func(_ runtime.Object, eventType watch.EventType) { synchro.rvsLock.Lock() delete(synchro.rvs, key) synchro.rvsLock.Unlock() + if middleware.PublisherEnabled { + err := synchro.storage.ProcessEvent(context.TODO(), eventType, obj, synchro.cluster) + if err != nil { + return + } + } } } // TODO(Iceber): put the event back into the queue to retry? for i := 0; ; i++ { ctx, cancel := context.WithTimeout(synchro.ctx, 30*time.Second) - err := handler(ctx, obj) + eventType, err := handler(ctx, obj) cancel() if err == nil { - callback(obj) + callback(obj, eventType) if !synchro.isRunnableForStorage.Load() && synchro.queue.Len() == 0 { // Start the informer after processing the data in the queue to ensure that storage is up and running for a period of time. @@ -535,24 +569,30 @@ func (synchro *ResourceSynchro) convertToStorageVersion(obj runtime.Object) (run return obj, nil } -func (synchro *ResourceSynchro) createOrUpdateResource(ctx context.Context, obj runtime.Object) error { - err := synchro.storage.Create(ctx, synchro.cluster, obj) +func (synchro *ResourceSynchro) createOrUpdateResource(ctx context.Context, obj runtime.Object) (watch.EventType, error) { + err := synchro.storage.Create(ctx, synchro.cluster, obj, true) if genericstorage.IsExist(err) { - return synchro.storage.Update(ctx, synchro.cluster, obj) + err = synchro.storage.Update(ctx, synchro.cluster, obj, true) + return watch.Added, err } - return err + return watch.Added, err } -func (synchro *ResourceSynchro) updateOrCreateResource(ctx context.Context, obj runtime.Object) error { - err := synchro.storage.Update(ctx, synchro.cluster, obj) +func (synchro *ResourceSynchro) updateOrCreateResource(ctx context.Context, obj runtime.Object) (watch.EventType, error) { + err := synchro.storage.Update(ctx, synchro.cluster, obj, true) if genericstorage.IsNotFound(err) { - return synchro.storage.Create(ctx, synchro.cluster, obj) + err = synchro.storage.Create(ctx, synchro.cluster, obj, true) + return watch.Modified, err } - return err + return watch.Modified, err } -func (synchro *ResourceSynchro) deleteResource(ctx context.Context, obj runtime.Object) error { - return synchro.storage.Delete(ctx, synchro.cluster, obj) +func (synchro *ResourceSynchro) deleteResource(ctx context.Context, obj runtime.Object) (watch.EventType, error) { + err := synchro.storage.Delete(ctx, synchro.cluster, obj, true) + if err != nil { + return watch.Deleted, err + } + return watch.Deleted, err } func (synchro *ResourceSynchro) setStatus(status string, reason, message string) { diff --git a/pkg/utils/util.go b/pkg/utils/util.go new file mode 100644 index 000000000..40218ee1f --- /dev/null +++ b/pkg/utils/util.go @@ -0,0 +1,112 @@ +package utils + +import ( + "context" + "strconv" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/registry/generic/registry" + + internal "github.com/clusterpedia-io/api/clusterpedia" +) + +func ParseInt642Str(crv int64) string { + return strconv.FormatInt(crv, 10) +} + +func ParseStr2Int64(crvStr string) (int64, error) { + crv, err := strconv.ParseInt(crvStr, 10, 64) + if err != nil { + return 0, err + } + return crv, nil +} + +func IsBigger(crv1 int64, crv2 int64) bool { + return crv1 > crv2 +} + +func IsEqual(crvStr1 string, crvStr2 string) bool { + return crvStr1 == crvStr2 +} + +type keyFunc func(runtime.Object) (string, error) + +func GetKeyFunc(gvr schema.GroupVersionResource, isNamespaced bool) keyFunc { + prefix := gvr.Group + "/" + gvr.Resource + + var KeyFunc func(ctx context.Context, name string) (string, error) + if isNamespaced { + KeyFunc = func(ctx context.Context, name string) (string, error) { + return registry.NamespaceKeyFunc(ctx, prefix, name) + } + } else { + KeyFunc = func(ctx context.Context, name string) (string, error) { + return registry.NoNamespaceKeyFunc(ctx, prefix, name) + } + } + + // We adapt the store's keyFunc so that we can use it with the StorageDecorator + // without making any assumptions about where objects are stored in etcd + kc := func(obj runtime.Object) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + + if isNamespaced { + return KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName()) + } + + return KeyFunc(genericapirequest.NewContext(), accessor.GetName()) + } + + return kc +} + +func IsListOptsEmpty(opts *internal.ListOptions) bool { + if opts == nil { + return true + } + + if opts.Names != nil { + return false + } + + if opts.Namespaces != nil { + return false + } + + if opts.ClusterNames != nil { + return false + } + + if opts.OwnerName != "" { + return false + } + + if opts.OwnerUID != "" { + return false + } + + if opts.Since != nil { + return false + } + + if opts.Before != nil { + return false + } + + if opts.EnhancedFieldSelector != nil { + return false + } + + if opts.ExtraLabelSelector != nil { + return false + } + + return true +} diff --git a/pkg/watcher/codec/event_codec.go b/pkg/watcher/codec/event_codec.go new file mode 100644 index 000000000..f0816219f --- /dev/null +++ b/pkg/watcher/codec/event_codec.go @@ -0,0 +1,66 @@ +package codec + +import ( + "bytes" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" +) + +const EVENTTYPE = "EventTypeForLabel" + +func EventEncode(eventType watch.EventType, obj runtime.Object, codec runtime.Codec) ([]byte, error) { + accessor := meta.NewAccessor() + labels, err := accessor.Labels(obj) + if err != nil { + return nil, err + } + if labels == nil { + labels = make(map[string]string) + } + labels[EVENTTYPE] = string(eventType) + err = accessor.SetLabels(obj, labels) + if err != nil { + return nil, err + } + + var buffer bytes.Buffer + if err := codec.Encode(obj, &buffer); err != nil { + return nil, err + } + + return buffer.Bytes(), nil +} + +func EventDecode(value []byte, codec runtime.Codec, newFunc func() runtime.Object) (*watch.Event, error) { + into := newFunc() + obj, _, err := codec.Decode(value, nil, into) + if err != nil { + return nil, err + } + + accessor := meta.NewAccessor() + labels, err := accessor.Labels(obj) + if err != nil { + return nil, err + } + var eventType string + if labels != nil { + eventType = labels[EVENTTYPE] + delete(labels, EVENTTYPE) + if eventType == "" { + return nil, fmt.Errorf("event can not find eventtype") + } + } + err = accessor.SetLabels(obj, labels) + if err != nil { + return nil, err + } + + return &watch.Event{ + Type: watch.EventType(eventType), + Object: obj, + }, nil +} diff --git a/pkg/watcher/components/event_cache.go b/pkg/watcher/components/event_cache.go new file mode 100644 index 000000000..c2c3331df --- /dev/null +++ b/pkg/watcher/components/event_cache.go @@ -0,0 +1,170 @@ +package components + +import ( + "strings" + "sync" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + + "github.com/clusterpedia-io/clusterpedia/pkg/utils" +) + +var cacheSize = 100 + +var ( + eventCachePool *EventCachePool + one sync.Once +) + +type EventCachePool struct { + lock sync.Mutex + cache map[schema.GroupVersionResource]*EventCache + stopCh <-chan struct{} +} + +type EventCache struct { + lock sync.RWMutex + // watcher watch.Interface + cache []*watch.Event + gvr schema.GroupVersionResource + // cache数组范围 循环利用 + startIndex int + endIndex int +} + +func newEventCache(gvr schema.GroupVersionResource) *EventCache { + return &EventCache{ + cache: make([]*watch.Event, cacheSize), + gvr: gvr, + startIndex: 0, + endIndex: 0, + } +} + +func InitEventCacheSize(cs int) { + cacheSize = cs +} + +func InitEventCachePool(stopCh <-chan struct{}) *EventCachePool { + one.Do(func() { + eventCachePool = &EventCachePool{ + cache: map[schema.GroupVersionResource]*EventCache{}, + stopCh: stopCh, + } + }) + return eventCachePool +} + +func GetInitEventCachePool() *EventCachePool { + return eventCachePool +} + +func (e *EventCachePool) GetWatchEventCacheByGVR(gvr schema.GroupVersionResource) *EventCache { + e.lock.Lock() + defer e.lock.Unlock() + if watchcache, ok := e.cache[gvr]; ok { + return watchcache + } else { + ec := newEventCache(gvr) + e.cache[gvr] = ec + return ec + } +} + +func (e *EventCachePool) ClearCacheByGVR(gvr schema.GroupVersionResource) { + e.lock.Lock() + defer e.lock.Unlock() + ec := e.cache[gvr] + if ec != nil { + ec.Clear() + } +} + +func (e *EventCache) ExistsInCache(resourceVersion string) bool { + e.lock.RLock() + defer e.lock.RUnlock() + + accessor := meta.NewAccessor() + var index int + for index = 0; e.startIndex+index < e.endIndex; index++ { + value := e.cache[(e.startIndex+index)%cacheSize] + rv, _ := accessor.ResourceVersion(value.Object) + if strings.Compare(resourceVersion, rv) == 0 { + return true + } + } + + return false +} + +// GetEvents returns newer events by comparing crv +func (e *EventCache) GetEvents(crv string, getMaxCrv func() (string, error)) ([]*watch.Event, error) { + e.lock.RLock() + defer e.lock.RUnlock() + // Judge whether the RV is the latest when cache is empty and watch with RV. + if e.startIndex == e.endIndex { + maxCrv, err := getMaxCrv() + if err != nil { + return nil, err + } + if utils.IsEqual(maxCrv, crv) { + return make([]*watch.Event, 0), nil + } else { + return nil, apierrors.NewResourceExpired("resource version not found") + } + } else { + var found bool + accessor := meta.NewAccessor() + var index int + var start int + result := []*watch.Event{} + for index = 0; e.startIndex+index < e.endIndex; index++ { + value := e.cache[(e.startIndex+index)%cacheSize] + rv, err := accessor.ResourceVersion(value.Object) + if err != nil { + return nil, err + } + // Unlikely that there will be a situation greater than + if utils.IsEqual(rv, crv) { + found = true + start = e.startIndex + index + break + } + } + + if found { + i := 0 + resultSize := (e.endIndex - start) % cacheSize + // append item to splice which is after the equal one + for ; i < resultSize; i++ { + result = append(result, e.cache[(start+i)%cacheSize]) + } + return result, nil + } + } + + return nil, apierrors.NewResourceExpired("resource version not found") +} + +func (e *EventCache) Enqueue(event *watch.Event) { + e.lock.RLock() + defer e.lock.RUnlock() + + if e.endIndex == e.startIndex+cacheSize { + // Cache is full - remove the oldest element. + e.startIndex++ + } + e.cache[e.endIndex%cacheSize] = event + e.endIndex++ +} + +func (e *EventCache) Clear() { + e.lock.RLock() + defer e.lock.RUnlock() + + e.startIndex = 0 + e.endIndex = 0 +} diff --git a/pkg/watcher/components/event_channel.go b/pkg/watcher/components/event_channel.go new file mode 100644 index 000000000..5cc0ef8d8 --- /dev/null +++ b/pkg/watcher/components/event_channel.go @@ -0,0 +1,47 @@ +package components + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +const SIZE = 2000 + +type EventWithCluster struct { + Cluster string + Event *watch.Event +} + +type EventChannels struct { + channels map[schema.GroupVersionResource]chan *EventWithCluster + lock sync.Mutex +} + +var EC *EventChannels + +func init() { + EC = &EventChannels{ + channels: make(map[schema.GroupVersionResource]chan *EventWithCluster), + } +} + +func (e *EventChannels) StartChan(gvr schema.GroupVersionResource) chan *EventWithCluster { + e.lock.Lock() + defer e.lock.Unlock() + + if _, ok := e.channels[gvr]; !ok { + e.channels[gvr] = make(chan *EventWithCluster, SIZE) + } + + return e.channels[gvr] +} + +func (e *EventChannels) CloseChannels() { + e.lock.Lock() + defer e.lock.Unlock() + for _, ch := range e.channels { + close(ch) + } +} diff --git a/pkg/watcher/components/filter.go b/pkg/watcher/components/filter.go new file mode 100644 index 000000000..f834f283f --- /dev/null +++ b/pkg/watcher/components/filter.go @@ -0,0 +1,211 @@ +package components + +import ( + "context" + "fmt" + "strings" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/validation/path" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + genericstorage "k8s.io/apiserver/pkg/storage" + + internal "github.com/clusterpedia-io/api/clusterpedia" + clusterpediaV1beta1 "github.com/clusterpedia-io/api/clusterpedia/v1beta1" +) + +// apiserver\pkg\registry\generic\registry\store.go:239 +// NamespaceKeyRootFunc is the default function for constructing storage paths +// to resource directories enforcing namespace rules. +func NamespaceKeyRootFunc(ctx context.Context, prefix string) string { + key := prefix + ns, ok := genericapirequest.NamespaceFrom(ctx) + if ok && len(ns) > 0 { + key = key + "/" + ns + } + return key +} + +// apiserver\pkg\registry\generic\registry\store.go:251 +// NamespaceKeyFunc is the default function for constructing storage paths to +// a resource relative to the given prefix enforcing namespace rules. If the +// context does not contain a namespace, it errors. +func NamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { + key := NamespaceKeyRootFunc(ctx, prefix) + ns, ok := genericapirequest.NamespaceFrom(ctx) + if !ok || len(ns) == 0 { + return "", apierrors.NewBadRequest("Namespace parameter required.") + } + if len(name) == 0 { + return "", apierrors.NewBadRequest("Name parameter required.") + } + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key = key + "/" + name + return key, nil +} + +// NoNamespaceKeyFunc is the default function for constructing storage paths +// to a resource relative to the given prefix without a namespace. +func NoNamespaceKeyFunc(ctx context.Context, prefix string, name string) (string, error) { + if len(name) == 0 { + return "", apierrors.NewBadRequest("Name parameter required.") + } + if msgs := path.IsValidPathSegmentName(name); len(msgs) != 0 { + return "", apierrors.NewBadRequest(fmt.Sprintf("Name parameter invalid: %q: %s", name, strings.Join(msgs, ";"))) + } + key := prefix + "/" + name + return key, nil +} + +// hasPathPrefix returns true if the string matches pathPrefix exactly, or if is prefixed with pathPrefix at a path segment boundary +func hasPathPrefix(s, pathPrefix string) bool { + // Short circuit if s doesn't contain the prefix at all + if !strings.HasPrefix(s, pathPrefix) { + return false + } + + pathPrefixLength := len(pathPrefix) + + if len(s) == pathPrefixLength { + // Exact match + return true + } + if strings.HasSuffix(pathPrefix, "/") { + // pathPrefix already ensured a path segment boundary + return true + } + if s[pathPrefixLength:pathPrefixLength+1] == "/" { + // The next character in s is a path segment boundary + // Check this instead of normalizing pathPrefix to avoid allocating on every call + return true + } + return false +} + +func PredicateFunc(label labels.Selector, field fields.Selector, isNamespaced bool) genericstorage.SelectionPredicate { + attrFunc := genericstorage.DefaultClusterScopedAttr + if isNamespaced { + attrFunc = genericstorage.DefaultNamespaceScopedAttr + } + return genericstorage.SelectionPredicate{ + Label: label, + Field: field, + GetAttrs: attrFunc, + } +} + +func filterWithAttrsFunction(key string, p genericstorage.SelectionPredicate) FilterWithAttrsFunc { + filterFunc := func(objKey string, label labels.Set, field fields.Set) bool { + if !hasPathPrefix(objKey, key) { + return false + } + return p.MatchesObjectAttributes(label, field) + } + return filterFunc +} + +// NewPredicateWatch implemetes watcher.interface enbled fieldSelector and labelSelector +func NewPredicateWatch(ctx context.Context, options *internal.ListOptions, gvk schema.GroupVersionKind, isNamespaced bool) (*MultiClusterWatcher, error) { + resourceVersion := "" + + label := labels.Everything() + if options != nil && options.LabelSelector != nil { + label = options.LabelSelector + } + field := fields.Everything() + if options != nil && options.EnhancedFieldSelector != nil { + fieldSelectorPointer, err := clusterpediaV1beta1.Convert_EnhancedFieldSelector_To_FieldSelector(&options.EnhancedFieldSelector) + if err != nil { + return nil, err + } + field = *fieldSelectorPointer + } + + predicate := PredicateFunc(label, field, isNamespaced) + + if options != nil { + resourceVersion = options.ResourceVersion + predicate.AllowWatchBookmarks = options.AllowWatchBookmarks + } + + // ResourcePrefix must come from the underlying factory + prefix := options.ResourcePrefix + if !strings.HasPrefix(prefix, "/") { + prefix = "/" + prefix + } + if prefix == "/" { + // return fmt.Errorf("store for %s has an invalid prefix %q", e.DefaultQualifiedResource.String(), opts.ResourcePrefix) + return nil, fmt.Errorf("resource prefix an invalid prefix: /") + } + + storageOpts := genericstorage.ListOptions{ + ResourceVersion: resourceVersion, + Predicate: predicate, + Recursive: true, + } + + // KeyRootFunc returns the root etcd key for this resource; should not + // include trailing "/". This is used for operations that work on the + // entire collection (listing and watching). + // + // KeyRootFunc and KeyFunc must be supplied together or not at all. + var KeyRootFunc func(ctx context.Context) string + + // KeyFunc returns the key for a specific object in the collection. + // KeyFunc is called for Create/Update/Get/Delete. Note that 'namespace' + // can be gotten from ctx. + // + // KeyFunc and KeyRootFunc must be supplied together or not at all. + var KeyFunc func(ctx context.Context, name string) (string, error) + + if isNamespaced { + KeyRootFunc = func(ctx context.Context) string { + return NamespaceKeyRootFunc(ctx, prefix) + } + KeyFunc = func(ctx context.Context, name string) (string, error) { + return NamespaceKeyFunc(ctx, prefix, name) + } + } else { + KeyRootFunc = func(ctx context.Context) string { + return prefix + } + + KeyFunc = func(ctx context.Context, name string) (string, error) { + return NoNamespaceKeyFunc(ctx, prefix, name) + } + } + + key := KeyRootFunc(ctx) + if name, ok := predicate.MatchesSingle(); ok { + if k, err := KeyFunc(ctx, name); err == nil { + key = k + storageOpts.Recursive = false + } + // if we cannot extract a key based on the current context, the + // optimization is skipped + } + + // We adapt the store's keyFunc so that we can use it with the StorageDecorator + // without making any assumptions about where objects are stored in etcd + keyFunc := func(obj runtime.Object) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", err + } + + if isNamespaced { + return KeyFunc(genericapirequest.WithNamespace(genericapirequest.NewContext(), accessor.GetNamespace()), accessor.GetName()) + } + + return KeyFunc(genericapirequest.NewContext(), accessor.GetName()) + } + + return NewMultiClusterWatcher(100, filterWithAttrsFunction(key, storageOpts.Predicate), keyFunc, gvk), nil +} diff --git a/pkg/watcher/components/multi_cluster_event_pool.go b/pkg/watcher/components/multi_cluster_event_pool.go new file mode 100644 index 000000000..3a5f248ee --- /dev/null +++ b/pkg/watcher/components/multi_cluster_event_pool.go @@ -0,0 +1,104 @@ +package components + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +var ( + instance *MultiClusterEventPool + once sync.Once +) + +type MultiClusterEventPool struct { + clusterbuffer map[schema.GroupVersionResource]*MultiClusterBuffer + + //use atomic.value instead of lock + lock sync.Mutex +} + +func GetMultiClusterEventPool() *MultiClusterEventPool { + once.Do(func() { + instance = &MultiClusterEventPool{ + clusterbuffer: map[schema.GroupVersionResource]*MultiClusterBuffer{}, + } + }) + return instance +} + +func (p *MultiClusterEventPool) GetClusterBufferByGVR(gvr schema.GroupVersionResource) *MultiClusterBuffer { + p.lock.Lock() + defer p.lock.Unlock() + if buffer, ok := p.clusterbuffer[gvr]; ok { + return buffer + } else { + wb := newMultiClusterBuffer(gvr) + p.clusterbuffer[gvr] = wb + return wb + } +} + +type MultiClusterBuffer struct { + gvr schema.GroupVersionResource + watcherbuffer []*MultiClusterWatcher + + lock sync.Mutex +} + +func newMultiClusterBuffer(gvr schema.GroupVersionResource) *MultiClusterBuffer { + wb := &MultiClusterBuffer{ + gvr: gvr, + } + + return wb +} + +func (b *MultiClusterBuffer) ForgetWatcher(watcher *MultiClusterWatcher) { + b.lock.Lock() + defer b.lock.Unlock() + var i int + hit := false + for i = 0; i < len(b.watcherbuffer); i++ { + if b.watcherbuffer[i] == watcher { + hit = true + break + } + } + if hit { + b.watcherbuffer = append(b.watcherbuffer[:i], b.watcherbuffer[i+1:]...) + } +} + +func (b *MultiClusterBuffer) AppendWatcherBuffer(watcher *MultiClusterWatcher) *MultiClusterBuffer { + b.lock.Lock() + defer b.lock.Unlock() + + b.watcherbuffer = append(b.watcherbuffer, watcher) + + return b +} + +func (b *MultiClusterBuffer) ProcessEvent(obj runtime.Object, eventType watch.EventType) error { + event := watch.Event{Type: eventType, Object: obj} + + b.lock.Lock() + defer b.lock.Unlock() + + for _, buffer := range b.watcherbuffer { + buffer.NonblockingAdd(&event) + } + return nil +} + +func (b *MultiClusterBuffer) ProcessCompleteEvent(event *watch.Event) error { + b.lock.Lock() + defer b.lock.Unlock() + + for _, buffer := range b.watcherbuffer { + buffer.NonblockingAdd(event) + } + return nil +} diff --git a/pkg/watcher/components/multi_cluster_watcher.go b/pkg/watcher/components/multi_cluster_watcher.go new file mode 100644 index 000000000..6ca3fab72 --- /dev/null +++ b/pkg/watcher/components/multi_cluster_watcher.go @@ -0,0 +1,242 @@ +package components + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + "github.com/clusterpedia-io/clusterpedia/pkg/kubeapiserver/resourcescheme" +) + +type FilterWithAttrsFunc func(key string, l labels.Set, f fields.Set) bool + +// MultiClusterWatcher implements watch.Interface +type MultiClusterWatcher struct { + input chan *watch.Event + //output + result chan watch.Event + done chan struct{} + stopped bool + forget func() + filter FilterWithAttrsFunc + keyFunc func(obj runtime.Object) (string, error) + gvk schema.GroupVersionKind +} + +func NewMultiClusterWatcher(chanSize int, filter FilterWithAttrsFunc, keyFunc func(obj runtime.Object) (string, error), gvk schema.GroupVersionKind) *MultiClusterWatcher { + return &MultiClusterWatcher{ + input: make(chan *watch.Event, chanSize), + result: make(chan watch.Event, chanSize), + done: make(chan struct{}), + stopped: false, + forget: func() {}, + filter: filter, + keyFunc: keyFunc, + gvk: gvk, + } +} + +func (w *MultiClusterWatcher) SetForget(forget func()) { + if forget != nil { + w.forget = forget + } +} + +// ResultChan implements watch.Interface. +func (w *MultiClusterWatcher) ResultChan() <-chan watch.Event { + return w.result +} + +// Stop implements watch.Interface. +func (w *MultiClusterWatcher) Stop() { + w.forget() +} + +func (w *MultiClusterWatcher) StopThreadUnsafe() { + if !w.stopped { + w.stopped = true + close(w.done) + close(w.input) + } +} + +func (w *MultiClusterWatcher) NonblockingAdd(event *watch.Event) bool { + select { + case w.input <- event: + klog.V(8).Infof("Event in to input %v : %v \n", event.Type, event.Object.GetObjectKind().GroupVersionKind()) + return true + default: + return false + } +} + +// Add Nil timer means that add will not block (if it can't send event immediately, it will break the watcher) +func (w *MultiClusterWatcher) Add(event *watch.Event, timer *time.Timer) bool { + // Try to send the event immediately, without blocking. + if w.NonblockingAdd(event) { + return true + } + + closeFunc := func() { + // This means that we couldn't send event to that watcher. + // Since we don't want to block on it infinitely, + // we simply terminate it. + //klog.V(1).Infof("Forcing watcher close due to unresponsiveness: %v", c.objectType.String()) + w.forget() + } + + if timer == nil { + closeFunc() + return false + } + + // OK, block sending, but only until timer fires. + select { + case w.input <- event: + return true + case <-timer.C: + closeFunc() + return false + } +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + val, found, err := unstructured.NestedString(obj, fields...) + if !found || err != nil { + return "" + } + return val +} + +func getNamespace(u *unstructured.Unstructured) string { + namespace := getNestedString(u.Object, "metadata", "namespace") + if namespace == "" { + namespace = getNestedString(u.Object, "objectMeta", "namespace") + } + return namespace +} + +func getName(u *unstructured.Unstructured) string { + namespace := getNestedString(u.Object, "metadata", "name") + if namespace == "" { + namespace = getNestedString(u.Object, "objectMeta", "name") + } + return namespace +} + +func internalToUnstructured(internal runtime.Object, gvk schema.GroupVersionKind) (*unstructured.Unstructured, error) { + var into runtime.Object + var err error + if resourcescheme.LegacyResourceScheme.IsGroupRegistered(gvk.Group) { + into, _ = resourcescheme.LegacyResourceScheme.New(gvk) + err = resourcescheme.LegacyResourceScheme.Convert(internal, into, nil) + } else { + into, _ = resourcescheme.UnstructuredScheme.New(gvk) + err = resourcescheme.UnstructuredScheme.Convert(internal, into, nil) + } + if err != nil { + return nil, err + } + toUnstructured, err := externalToUnstructured(into) + if err != nil { + return nil, err + } + return toUnstructured, nil +} + +func externalToUnstructured(obj interface{}) (*unstructured.Unstructured, error) { + uncastObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: uncastObj}, nil +} + +// apiserver\pkg\storage\cacher\cacher.go:1339 +func (w *MultiClusterWatcher) convertToWatchEvent(event *watch.Event) *watch.Event { + if event.Type == watch.Error || w.filter == nil { + return event + } + + unstructuredData, err := internalToUnstructured(event.Object, w.gvk) + if err != nil { + klog.Error(err) + return nil + } + key, err := w.keyFunc(event.Object) + if err != nil { + klog.Error(err) + return nil + } + curObjPasses := w.filter(key, unstructuredData.GetLabels(), fields.Set{ + "metadata.name": getName(unstructuredData), + "metadata.namespace": getNamespace(unstructuredData), + }) + if curObjPasses { + return event + } + return nil +} + +func (w *MultiClusterWatcher) sendWatchCacheEvent(event *watch.Event) { + watchEvent := w.convertToWatchEvent(event) + if watchEvent == nil { + // Watcher is not interested in that object. + return + } + + // We need to ensure that if we put event X to the c.result, all + // previous events were already put into it before, no matter whether + // c.done is close or not. + // Thus we cannot simply select from c.done and c.result and this + // would give us non-determinism. + // At the same time, we don't want to block infinitely on putting + // to c.result, when c.done is already closed. + + // This ensures that with c.done already close, we at most once go + // into the next select after this. With that, no matter which + // statement we choose there, we will deliver only consecutive + // events. + select { + case <-w.done: + return + default: + } + + select { + case w.result <- *watchEvent: + case <-w.done: + } +} + +// Process send the events which stored in watchCache into the result channel,and select the event from input channel into result channel continuously. +func (w *MultiClusterWatcher) Process(ctx context.Context, initEvents []*watch.Event) { + defer utilruntime.HandleCrash() + + for _, event := range initEvents { + w.sendWatchCacheEvent(event) + } + + defer close(w.result) + defer w.Stop() + for { + select { + case event, ok := <-w.input: + if !ok { + return + } + w.sendWatchCacheEvent(event) + + case <-ctx.Done(): + return + } + } +} diff --git a/pkg/watcher/middleware/publisher.go b/pkg/watcher/middleware/publisher.go new file mode 100644 index 000000000..1be4f4f99 --- /dev/null +++ b/pkg/watcher/middleware/publisher.go @@ -0,0 +1,23 @@ +package middleware + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + + watchComponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" +) + +var GlobalPublisher Publisher +var PublisherEnabled bool = false + +type Publisher interface { + InitPublisher(ctx context.Context) error + PublishTopic(gvr schema.GroupVersionResource, codec runtime.Codec) error + EventSending(gvr schema.GroupVersionResource, startChan func(schema.GroupVersionResource) chan *watchComponents.EventWithCluster, + publishEvent func(context.Context, *watchComponents.EventWithCluster), GenCrv2Event func(event *watch.Event)) error + StopPublishing(gvr schema.GroupVersionResource) error + StopPublisher() +} diff --git a/pkg/watcher/middleware/rabbitmq/rabbit_client.go b/pkg/watcher/middleware/rabbitmq/rabbit_client.go new file mode 100644 index 000000000..0132b9527 --- /dev/null +++ b/pkg/watcher/middleware/rabbitmq/rabbit_client.go @@ -0,0 +1,315 @@ +package rabbitmq + +import ( + "context" + "fmt" + "math/rand" + "strings" + "time" + + "github.com/streadway/amqp" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/codec" + watchcomponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" +) + +const ( + RoleConsumer = "consumer" + RoleProducer = "producer" +) + +type QueueExchange struct { + QueueName string + RoutingKey string + ExchangeName string + ExchangeType string +} + +type RabbitClient struct { + QueueExchange + conn *RabbitConnection + channel *amqp.Channel + codec runtime.Codec + started bool + cliStopCh chan bool + globalStopCh <-chan struct{} + expiresPerSend int + notifyConfirm chan amqp.Confirmation // msg send confirmed chan + notifyClose chan *amqp.Error // channel closed chan + role string + newFunc func() runtime.Object // event decode + queueExpires int64 +} + +func NewProducer(queueEx QueueExchange, conn *RabbitConnection, codec runtime.Codec, expiresPerSend int, gStopCh <-chan struct{}) *RabbitClient { + return &RabbitClient{ + QueueExchange: queueEx, + conn: conn, + codec: codec, + cliStopCh: make(chan bool, 1), + globalStopCh: gStopCh, + expiresPerSend: expiresPerSend, + role: RoleProducer, + } +} + +func NewConsumer(queueEx QueueExchange, conn *RabbitConnection, codec runtime.Codec, gStopCh <-chan struct{}, newFunc func() runtime.Object, queueExpires int64) *RabbitClient { + return &RabbitClient{ + QueueExchange: queueEx, + conn: conn, + codec: codec, + cliStopCh: make(chan bool, 1), + globalStopCh: gStopCh, + role: RoleConsumer, + newFunc: newFunc, + queueExpires: queueExpires, + } +} + +func NewQueue(quePrefix string, conn *RabbitConnection, queueExpires int64) string { + for { + ch := CreateChannel(conn) + + randStr := fmt.Sprintf("%d%d%d%d", rand.Intn(10), rand.Intn(10), rand.Intn(10), rand.Intn(10)) + timeStr := time.Now().Format("2006-01-02 15-04-05") + timeStr = strings.ReplaceAll(timeStr, "-", "") + timeStr = strings.ReplaceAll(timeStr, " ", "") + queue := fmt.Sprintf("%s_%s_%s", quePrefix, timeStr, randStr) + args := make(amqp.Table, 1) + args["x-expires"] = queueExpires + _, err := ch.QueueDeclarePassive(queue, true, false, false, false, args) + if err == nil { // queue already exist + _ = ch.Close() + continue + } else { // declare the queue + _ = ch.Close() + ch := CreateChannel(conn) + _, err = ch.QueueDeclare(queue, true, false, false, false, args) + if err != nil { + klog.Errorf("rabbitmq queueDeclare failed: %v", err) + _ = ch.Close() + continue + } else { + _ = ch.Close() + return queue + } + } + } +} + +// CreateChannel open a channel until success +func CreateChannel(conn *RabbitConnection) *amqp.Channel { + for { + conn.tryConnect() + ch, err := conn.NewChannel() + if err != nil { + klog.Error("open channel failed. ", err, ". retry after 1 second") + time.Sleep(1 * time.Second) + continue + } else { + return ch + } + } +} + +func (r *RabbitClient) Destroy() (err error) { + r.cliStopCh <- true + return nil +} + +func (r *RabbitClient) DestroyGvr() { + klog.Info("consume stopped for client stop cmd. delete queue: ", r.QueueName) + _, err := r.channel.QueueDelete(r.QueueName, false, false, true) + if err != nil { + klog.Errorf("delete %s queue fail. %v", r.QueueName, err.Error()) + } else { + klog.Info("deleted queue ", r.QueueName) + } + _ = r.closeChannel() +} + +func (r *RabbitClient) initChannel() { + for { + r.channel = CreateChannel(r.conn) + err := r.initQuExchange() + if err != nil { + klog.Error("init channel failed. ", err.Error()) + _ = r.closeChannel() + continue + } else { + return + } + } +} + +func (r *RabbitClient) initQuExchange() error { + args := make(amqp.Table, 1) + args["x-expires"] = r.queueExpires + err := r.channel.ExchangeDeclare(r.ExchangeName, r.ExchangeType, true, false, false, false, args) + if err != nil { + return fmt.Errorf("rabbitmq exchangeDeclare failed: %v", err) + } + + r.notifyClose = r.channel.NotifyClose(make(chan *amqp.Error, 1)) // listen channel close event + + if r.role == RoleProducer { + err = r.channel.Confirm(false) // set msg confirm mode + if err != nil { + return fmt.Errorf("rabbitmq confirm error. %v", err) + } + r.notifyConfirm = r.channel.NotifyPublish(make(chan amqp.Confirmation, 1)) + } else { + _, err = r.channel.QueueDeclare(r.QueueName, true, false, false, false, args) + if err != nil { + return fmt.Errorf("rabbitmq queueDeclare failed: %v", err) + } + + err = r.channel.QueueBind(r.QueueName, r.RoutingKey, r.ExchangeName, false, nil) + if err != nil { + return fmt.Errorf("rabbitmq queueBind failed: %v", err) + } + + err = r.channel.Qos(1, 0, false) + if err != nil { + return fmt.Errorf("rabbitmq Qos failed: %v", err) + } + } + return nil +} + +func (r *RabbitClient) closeChannel() (err error) { + r.channel.Close() + if err != nil { + return fmt.Errorf("close rabbitmq channel failed: %v", err) + } + return +} + +// sendEventSynchro send message until success +func (r *RabbitClient) sendEventSynchro(event *watch.Event, expiresPerTry int) error { + msgBytes, err := codec.EventEncode(event.Type, event.Object, r.codec) + if err != nil { + return fmt.Errorf("event encode failed. error: %v", err.Error()) + } + ticker := time.NewTicker(time.Duration(expiresPerTry) * time.Second) + defer ticker.Stop() + for { + _ = r.channel.Publish( + r.ExchangeName, + r.RoutingKey, + false, + false, + amqp.Publishing{ + ContentType: "text/plain", + Body: msgBytes, + }) + + select { + case c := <-r.notifyConfirm: + if !c.Ack { + klog.Errorf("rabbit confirm ack false. retry init channel and send. exchange: %s", r.ExchangeName) + } else { + return nil + } + case <-ticker.C: + klog.Errorf("send event timeout. retry init channel and send. exchange: %s", r.ExchangeName) + } + + _ = r.closeChannel() + r.initChannel() + } +} + +func (r *RabbitClient) Produce(eventChan chan *watchcomponents.EventWithCluster, publishEvent func(context.Context, *watchcomponents.EventWithCluster), + ctx context.Context, genCrv2Event func(event *watch.Event)) { + for { + r.initChannel() + + LOOP: + for { + select { + case e := <-r.notifyClose: + klog.Warningf("channel notifyClose: %v. exchange: %s. retry channel connecting", e.Error(), r.ExchangeName) + break LOOP + case event := <-eventChan: + genCrv2Event(event.Event) + err := r.sendEventSynchro(event.Event, r.expiresPerSend) + if err != nil { + klog.Errorf("send event error %v. exchange: %s. this should not happen normally", err.Error(), r.ExchangeName) + } else { + publishEvent(ctx, event) + } + case <-r.cliStopCh: + klog.Info("produce stopped for client stop cmd. exchange: ", r.ExchangeName) + _ = r.closeChannel() + close(r.cliStopCh) + return + case <-r.globalStopCh: + klog.Info("produce stopped for global publisher stopped. exchange: ", r.ExchangeName) + _ = r.closeChannel() + return + } + } + } +} + +func (r *RabbitClient) Consume(enqueueFunc func(event *watch.Event), clearfunc func()) { + for { + r.initChannel() + msgList, err := r.channel.Consume(r.QueueName, "", false, false, false, false, nil) + if err != nil { + klog.Errorf("consume err: ", err.Error()) + _ = r.closeChannel() + continue + } + + LOOP: + for { + select { + case <-r.cliStopCh: + klog.Info("consume stopped for client stop cmd. delete queue: ", r.QueueName) + _, err = r.channel.QueueDelete(r.QueueName, false, false, true) + if err != nil { + klog.Errorf("delete %s queue fail. %v", r.QueueName, err.Error()) + } else { + klog.Info("deleted queue ", r.QueueName) + } + _ = r.closeChannel() + close(r.cliStopCh) + return + case msg := <-msgList: + //处理数据 + event, _ := codec.EventDecode(msg.Body, r.codec, r.newFunc) + klog.V(7).Infof("Event in to cache %v : %v \n", event.Type, event.Object.GetObjectKind().GroupVersionKind()) + err = msg.Ack(true) + if err != nil { + klog.Errorf("msg ack error: %v. event: %v, queue: %s. retry init channel and consume...", err.Error(), event.Type, r.QueueName) + break LOOP + } + enqueueFunc(event) + case e := <-r.notifyClose: + klog.Warningf("channel notifyClose: %v. queue: %s. retry channel connecting", e.Error(), r.QueueName) + break LOOP + case <-r.globalStopCh: + klog.Info("consume stopped for global publisher stopped. delete queue: ", r.QueueName) + _, err = r.channel.QueueDelete(r.QueueName, false, false, true) + if err != nil { + klog.Errorf("delete %s queue fail. %v", r.QueueName, err.Error()) + } else { + klog.Info("deleted queue ", r.QueueName) + } + _ = r.closeChannel() + return + } + } + } +} + +func GvrString(gvr schema.GroupVersionResource) string { + group := strings.ReplaceAll(gvr.Group, ".", "_") + return fmt.Sprintf("%s_%s_%s", group, gvr.Version, gvr.Resource) +} diff --git a/pkg/watcher/middleware/rabbitmq/rabbit_conn.go b/pkg/watcher/middleware/rabbitmq/rabbit_conn.go new file mode 100644 index 000000000..8f16fb75f --- /dev/null +++ b/pkg/watcher/middleware/rabbitmq/rabbit_conn.go @@ -0,0 +1,75 @@ +package rabbitmq + +import ( + "sync" + "time" + + "github.com/streadway/amqp" + "k8s.io/klog/v2" +) + +type RabbitConnection struct { + conn *amqp.Connection + url string + rw sync.Mutex + clientNum int32 + stopCh chan struct{} + notifyClose chan *amqp.Error +} + +func NewConn(url string) (*RabbitConnection, error) { + rabbitConn := &RabbitConnection{ + url: url, + rw: sync.Mutex{}, + stopCh: make(chan struct{}), + } + rabbitConn.tryConnect() + go rabbitConn.loop() + + return rabbitConn, nil +} + +func (r *RabbitConnection) NewChannel() (*amqp.Channel, error) { + r.rw.Lock() + defer r.rw.Unlock() + + return r.conn.Channel() +} + +func (r *RabbitConnection) tryConnect() { + r.rw.Lock() + defer r.rw.Unlock() + if r.conn == nil || r.conn.IsClosed() { + for { + conn, err := amqp.Dial(r.url) + if err != nil { + klog.Errorf("connect dial error: %v, reconnect after 1 second", err.Error()) + time.Sleep(1 * time.Second) + continue + } else { + r.conn = conn + r.notifyClose = r.conn.NotifyClose(make(chan *amqp.Error, 1)) + return + } + } + } +} + +func (r *RabbitConnection) Close() { + close(r.stopCh) +} + +// loop process channel close event +func (r *RabbitConnection) loop() { + for { + select { + case e := <-r.notifyClose: + klog.Errorf("connection notifyClose: %v. reconnecting...", e.Error()) + r.tryConnect() + case <-r.stopCh: + klog.Info("connection loop check stopped.") + _ = r.conn.Close() + return + } + } +} diff --git a/pkg/watcher/middleware/rabbitmq/rabbitmq_publisher.go b/pkg/watcher/middleware/rabbitmq/rabbitmq_publisher.go new file mode 100644 index 000000000..2832a76e4 --- /dev/null +++ b/pkg/watcher/middleware/rabbitmq/rabbitmq_publisher.go @@ -0,0 +1,124 @@ +package rabbitmq + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" + + watchcomponents "github.com/clusterpedia-io/clusterpedia/pkg/watcher/components" +) + +type RabbitmqPublisher struct { + connNums int // tcp connects + mqUrl string // rabbitmq addr + ctx context.Context + connectionsPool []*RabbitConnection + producerList map[schema.GroupVersionResource]*RabbitClient + stopCh <-chan struct{} + rw sync.Mutex + expiresPerSend int +} + +func (r *RabbitmqPublisher) InitPublisher(ctx context.Context) error { + r.ctx = ctx + r.stopCh = ctx.Done() + return r.initConnectionsPool() +} + +func (r *RabbitmqPublisher) initConnectionsPool() (err error) { + for i := len(r.connectionsPool); i < r.connNums; i++ { + conn, err := NewConn(r.mqUrl) + if err != nil { + klog.Error("init connect error: ", err.Error()) + return err + } + klog.Info("connect-%d succes", i) + r.connectionsPool = append(r.connectionsPool, conn) + } + return +} + +func (r *RabbitmqPublisher) PublishTopic(gvr schema.GroupVersionResource, codec runtime.Codec) error { + gvrStr := GvrString(gvr) + r.rw.Lock() + defer r.rw.Unlock() + if _, ok := r.producerList[gvr]; !ok { + klog.Infof("publish topic. gvr: %s. producer size: %d", gvrStr, len(r.producerList)) + queueEx := QueueExchange{ + RoutingKey: gvrStr, + ExchangeName: gvrStr, + ExchangeType: "direct", + } + + conn, err := r.assignConnByBalancePolicy() + if err != nil { + return fmt.Errorf("assign connection to producer failed. %v", err.Error()) + } + producer := NewProducer(queueEx, conn, codec, r.expiresPerSend, r.stopCh) + atomic.AddInt32(&conn.clientNum, 1) + r.producerList[gvr] = producer + } + return nil +} + +func (r *RabbitmqPublisher) EventSending(gvr schema.GroupVersionResource, startChan func(schema.GroupVersionResource) chan *watchcomponents.EventWithCluster, + publishEvent func(context.Context, *watchcomponents.EventWithCluster), genCrv2Event func(event *watch.Event)) error { + r.rw.Lock() + defer r.rw.Unlock() + if _, ok := r.producerList[gvr]; !ok { + return fmt.Errorf("producer not found. this should not happen normally") + } + p := r.producerList[gvr] + if !p.started { + p.started = true + go p.Produce(startChan(gvr), publishEvent, r.ctx, genCrv2Event) + } + return nil +} + +func (r *RabbitmqPublisher) StopPublishing(gvr schema.GroupVersionResource) error { + r.rw.Lock() + defer r.rw.Unlock() + if producer, ok := r.producerList[gvr]; ok { + _ = producer.Destroy() + delete(r.producerList, gvr) + atomic.AddInt32(&producer.conn.clientNum, -1) + } + return nil +} + +func (r *RabbitmqPublisher) StopPublisher() { + klog.Warning("stop publisher... this may caused by leader changed") + for gvr := range r.producerList { + _ = r.StopPublishing(gvr) + } + for _, conn := range r.connectionsPool { + conn.Close() + } + r.producerList = make(map[schema.GroupVersionResource]*RabbitClient) + r.connectionsPool = nil +} + +func (r *RabbitmqPublisher) assignConnByBalancePolicy() (*RabbitConnection, error) { + // rabbitmq needs to be ready before clusterpedia starts + if len(r.connectionsPool) != r.connNums { + err := r.initConnectionsPool() + if err != nil { + return nil, err + } + } + + conn := r.connectionsPool[0] + for i := 1; i < r.connNums; i++ { + if r.connectionsPool[i].clientNum < conn.clientNum { + conn = r.connectionsPool[i] + } + } + return conn, nil +} diff --git a/pkg/watcher/middleware/rabbitmq/rabbitmq_subscriber.go b/pkg/watcher/middleware/rabbitmq/rabbitmq_subscriber.go new file mode 100644 index 000000000..972359284 --- /dev/null +++ b/pkg/watcher/middleware/rabbitmq/rabbitmq_subscriber.go @@ -0,0 +1,121 @@ +package rabbitmq + +import ( + "fmt" + "sync" + "sync/atomic" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/klog/v2" +) + +type RabbitmqSubscriber struct { + connNums int + mqUrl string + connectionsPool []*RabbitConnection + consumerList map[schema.GroupVersionResource]*RabbitClient + stopCh <-chan struct{} + rw sync.Mutex + queueExpires int64 +} + +func (r *RabbitmqSubscriber) InitSubscriber(stopCh <-chan struct{}) error { + r.stopCh = stopCh + return r.initConnectionsPool() +} + +func (r *RabbitmqSubscriber) initConnectionsPool() (err error) { + for i := len(r.connectionsPool); i < r.connNums; i++ { + conn, err := NewConn(r.mqUrl) + if err != nil { + klog.Error("init connect error: %v", err.Error()) + return err + } + klog.Infof("connect-%d succes", i) + r.connectionsPool = append(r.connectionsPool, conn) + } + return +} + +func (r *RabbitmqSubscriber) SubscribeTopic(gvr schema.GroupVersionResource, codec runtime.Codec, newFunc func() runtime.Object) error { + gvrStr := GvrString(gvr) + r.rw.Lock() + defer r.rw.Unlock() + if _, ok := r.consumerList[gvr]; !ok { + conn, err := r.assignConnByBalancePolicy() + if err != nil { + return fmt.Errorf("assign connection to consumer failed. %v", err.Error()) + } + queueEx := QueueExchange{ + QueueName: NewQueue(gvrStr, conn, r.queueExpires), + RoutingKey: gvrStr, + ExchangeName: gvrStr, + ExchangeType: "direct", + } + consumer := NewConsumer(queueEx, conn, codec, r.stopCh, newFunc, r.queueExpires) + atomic.AddInt32(&conn.clientNum, 1) + r.consumerList[gvr] = consumer + klog.V(2).Infof("create new consumer for gvr: %v. consumer list size: %d", gvr, len(r.consumerList)) + } + return nil +} + +func (r *RabbitmqSubscriber) EventReceiving(gvr schema.GroupVersionResource, enqueueFunc func(event *watch.Event), clearfunc func()) error { + r.rw.Lock() + defer r.rw.Unlock() + if _, ok := r.consumerList[gvr]; !ok { + return fmt.Errorf("consumer not found. this should not happen normally") + } + consumer := r.consumerList[gvr] + if !consumer.started { + consumer.started = true + go consumer.Consume(enqueueFunc, clearfunc) + } + return nil +} + +func (r *RabbitmqSubscriber) StopSubscribing(gvr schema.GroupVersionResource) error { + r.rw.Lock() + defer r.rw.Unlock() + if consumer, ok := r.consumerList[gvr]; ok { + _ = consumer.Destroy() + delete(r.consumerList, gvr) + atomic.AddInt32(&consumer.conn.clientNum, -1) + } + return nil +} + +func (r *RabbitmqSubscriber) StopSubscriber() error { + r.rw.Lock() + defer r.rw.Unlock() + for gvr, consumer := range r.consumerList { + _ = consumer.Destroy() + delete(r.consumerList, gvr) + atomic.AddInt32(&consumer.conn.clientNum, -1) + } + for _, conn := range r.connectionsPool { + conn.Close() + } + r.consumerList = make(map[schema.GroupVersionResource]*RabbitClient) + r.connectionsPool = nil + return nil +} + +func (r *RabbitmqSubscriber) assignConnByBalancePolicy() (*RabbitConnection, error) { + if len(r.connectionsPool) != r.connNums { + err := r.initConnectionsPool() + if err != nil { + return nil, err + } + } + + conn := r.connectionsPool[0] + for i := 1; i < r.connNums; i++ { + if r.connectionsPool[i].clientNum < conn.clientNum { + conn = r.connectionsPool[i] + } + } + return conn, nil +} diff --git a/pkg/watcher/middleware/rabbitmq/register.go b/pkg/watcher/middleware/rabbitmq/register.go new file mode 100644 index 000000000..325e99509 --- /dev/null +++ b/pkg/watcher/middleware/rabbitmq/register.go @@ -0,0 +1,55 @@ +package rabbitmq + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/options" +) + +const ( + PushlisherName = "rabbitmq" + SubscribeerName = "rabbitmq" +) + +func NewPulisher(mo *options.MiddlewareOptions) (middleware.Publisher, error) { + if mo.MaxConnections <= 0 { + mo.MaxConnections = 3 + } + if mo.ExpiresPerSend <= 0 { + mo.ExpiresPerSend = 150 + } + + url := fmt.Sprintf("amqp://%s:%s@%s:%d/%s", mo.ConnectUser, mo.ConnectPassword, mo.ServerIp, mo.ServerPort, mo.Suffix) + publisher := &RabbitmqPublisher{ + mqUrl: url, + connNums: mo.MaxConnections, + producerList: make(map[schema.GroupVersionResource]*RabbitClient), + expiresPerSend: mo.ExpiresPerSend, + rw: sync.Mutex{}, + } + return publisher, nil +} + +func NewSubscriber(mo *options.MiddlewareOptions) (middleware.Subscriber, error) { + if mo.MaxConnections <= 0 { + mo.MaxConnections = 3 + } + + if mo.QueueExpires <= 0 { + mo.QueueExpires = 3 * 3600 * 1000 // default 3 hours + } + + url := fmt.Sprintf("amqp://%s:%s@%s:%d/%s", mo.ConnectUser, mo.ConnectPassword, mo.ServerIp, mo.ServerPort, mo.Suffix) + subscriber := &RabbitmqSubscriber{ + mqUrl: url, + consumerList: make(map[schema.GroupVersionResource]*RabbitClient), + connNums: mo.MaxConnections, + rw: sync.Mutex{}, + queueExpires: mo.QueueExpires, + } + return subscriber, nil +} diff --git a/pkg/watcher/middleware/subscriber.go b/pkg/watcher/middleware/subscriber.go new file mode 100644 index 000000000..001d87475 --- /dev/null +++ b/pkg/watcher/middleware/subscriber.go @@ -0,0 +1,18 @@ +package middleware + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +var GlobalSubscriber Subscriber +var SubscriberEnabled bool = false + +type Subscriber interface { + InitSubscriber(stopCh <-chan struct{}) error + SubscribeTopic(gvr schema.GroupVersionResource, codec runtime.Codec, newFunc func() runtime.Object) error + EventReceiving(gvr schema.GroupVersionResource, enqueueFunc func(event *watch.Event), clearfunc func()) error + StopSubscribing(gvr schema.GroupVersionResource) error + StopSubscriber() error +} diff --git a/pkg/watcher/options/options.go b/pkg/watcher/options/options.go new file mode 100644 index 000000000..2efd25c9a --- /dev/null +++ b/pkg/watcher/options/options.go @@ -0,0 +1,61 @@ +package options + +import ( + "fmt" + + "github.com/spf13/pflag" +) + +type MiddlewareOptions struct { + Enabled bool // middleware enabled + Name string + ServerIp string + ServerPort int + ConnectUser string // rabbitmq user + ConnectPassword string // rabbitmq passwd + MaxConnections int // rabbitmq tcp connects(default 3) + Suffix string // rabbitmq url suffix(if has) + ExpiresPerSend int + QueueExpires int64 // queue will be deleted if no consumer in expires time + BindingControllerConfigPath string + CacheSize int +} + +func NewMiddlerwareOptions() *MiddlewareOptions { + return &MiddlewareOptions{Enabled: false, Name: "rabbitmq", CacheSize: 100} +} + +func (o *MiddlewareOptions) Validate() []error { + if o == nil { + return nil + } + + var errors []error + if o.ServerPort == 0 { + errors = append(errors, fmt.Errorf("ServerPort is %d", o.ServerPort)) + } + + if o.CacheSize == 0 { + o.CacheSize = 100 + } + + if o.ConnectPassword == "" { + errors = append(errors, fmt.Errorf("Server PassWord is null")) + } + + return errors +} + +func (o *MiddlewareOptions) AddFlags(fs *pflag.FlagSet) { + fs.BoolVar(&o.Enabled, "middleware-enabled", o.Enabled, "middlerware enabled") + fs.StringVar(&o.Name, "middleware-name", o.Name, "middlerware name") + fs.StringVar(&o.ServerIp, "middleware-serverIp", o.ServerIp, "middlerware server Ip") + fs.IntVar(&o.ServerPort, "middleware-serverPort", o.ServerPort, "middlerware server port") + fs.StringVar(&o.BindingControllerConfigPath, "binding-controller-config-path", o.BindingControllerConfigPath, ""+ + "binding controller config path.") + fs.IntVar(&o.CacheSize, "cache-size", o.CacheSize, "middlerware cache size") + fs.StringVar(&o.ConnectUser, "middleware-user", o.ConnectUser, "middlerware connect user") + fs.StringVar(&o.ConnectPassword, "middleware-password", o.ConnectPassword, "middlerware connect password") + fs.IntVar(&o.ExpiresPerSend, "middleware-send-expires", o.ExpiresPerSend, "middlerware expires send") + fs.Int64Var(&o.QueueExpires, "middleware-queue-expires", o.QueueExpires, "middlereare queue expires") +} diff --git a/pkg/watcher/register.go b/pkg/watcher/register.go new file mode 100644 index 000000000..a80b90819 --- /dev/null +++ b/pkg/watcher/register.go @@ -0,0 +1,64 @@ +package watcher + +import ( + "fmt" + + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/middleware/rabbitmq" + "github.com/clusterpedia-io/clusterpedia/pkg/watcher/options" +) + +type NewPublisherFunc func(mo *options.MiddlewareOptions) (middleware.Publisher, error) +type NewSubscriberFunc func(mo *options.MiddlewareOptions) (middleware.Subscriber, error) + +var publisherFuncs = make(map[string]NewPublisherFunc) +var subscriberFuncs = make(map[string]NewSubscriberFunc) + +func init() { + RegisterPublisherFunc(rabbitmq.PushlisherName, rabbitmq.NewPulisher) + RegisterSubscriberFunc(rabbitmq.SubscribeerName, rabbitmq.NewSubscriber) +} + +func RegisterPublisherFunc(name string, f NewPublisherFunc) { + if _, ok := publisherFuncs[name]; ok { + panic(fmt.Sprintf("publisher %s has been registered", name)) + } + publisherFuncs[name] = f +} + +func RegisterSubscriberFunc(name string, f NewSubscriberFunc) { + if _, ok := subscriberFuncs[name]; ok { + panic(fmt.Sprintf("subscriber %s has been registered", name)) + } + subscriberFuncs[name] = f +} + +func NewPulisher(mo *options.MiddlewareOptions) error { + provider, ok := publisherFuncs[mo.Name] + if !ok { + return fmt.Errorf("publisher %s is unregistered", mo.Name) + } + + publisher, err := provider(mo) + if err != nil { + return fmt.Errorf("Failed to init middleware: %w", err) + } + middleware.GlobalPublisher = publisher + + return nil +} + +func NewSubscriber(mo *options.MiddlewareOptions) error { + provider, ok := subscriberFuncs[mo.Name] + if !ok { + return fmt.Errorf("publisher %s is unregistered", mo.Name) + } + + subscriber, err := provider(mo) + if err != nil { + return fmt.Errorf("Failed to init middleware: %w", err) + } + middleware.GlobalSubscriber = subscriber + + return nil +} diff --git a/staging/src/github.com/clusterpedia-io/api/clusterpedia/types.go b/staging/src/github.com/clusterpedia-io/api/clusterpedia/types.go index 35ed197c6..06bab0467 100644 --- a/staging/src/github.com/clusterpedia-io/api/clusterpedia/types.go +++ b/staging/src/github.com/clusterpedia-io/api/clusterpedia/types.go @@ -49,6 +49,7 @@ type ListOptions struct { Names []string ClusterNames []string Namespaces []string + ResourcePrefix string OrderBy []OrderBy OwnerName string diff --git a/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/conversion.go b/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/conversion.go index 4d3f6d84a..ff687cc54 100644 --- a/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/conversion.go +++ b/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/conversion.go @@ -14,6 +14,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + metafields "k8s.io/apimachinery/pkg/fields" "github.com/clusterpedia-io/api/clusterpedia" "github.com/clusterpedia-io/api/clusterpedia/fields" @@ -364,6 +365,18 @@ func convert_string_To_fields_Selector(in *string, out *fields.Selector, s conve return nil } +func Convert_EnhancedFieldSelector_To_FieldSelector(in *fields.Selector) (*metafields.Selector, error) { + out := "" + result := metafields.Nothing() + if err := convert_fields_Selector_To_string(in, &out, nil); err != nil { + return nil, err + } + if err := metav1.Convert_string_To_fields_Selector(&out, &result, nil); err != nil { + return nil, err + } + return &result, nil +} + func convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { if *in == nil { return nil diff --git a/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/types.go b/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/types.go index 25a827d17..6b9013708 100644 --- a/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/types.go +++ b/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/types.go @@ -23,6 +23,9 @@ type ListOptions struct { // +optional Namespaces string `json:"namespaces,omitempty"` + // +optional + ResourcePrefix string `json:"resourcePrefix,omitempty"` + // +optional OrderBy string `json:"orderby,omitempty"` diff --git a/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.conversion.go b/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.conversion.go index e0c81f294..83b5ac500 100644 --- a/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.conversion.go +++ b/staging/src/github.com/clusterpedia-io/api/clusterpedia/v1beta1/zz_generated.conversion.go @@ -196,6 +196,7 @@ func autoConvert_v1beta1_ListOptions_To_clusterpedia_ListOptions(in *ListOptions // WARNING: in.Names requires manual conversion: inconvertible types (string vs []string) // WARNING: in.ClusterNames requires manual conversion: inconvertible types (string vs []string) // WARNING: in.Namespaces requires manual conversion: inconvertible types (string vs []string) + out.ResourcePrefix = in.ResourcePrefix // WARNING: in.OrderBy requires manual conversion: inconvertible types (string vs []github.com/clusterpedia-io/api/clusterpedia.OrderBy) out.OwnerUID = in.OwnerUID out.OwnerName = in.OwnerName @@ -222,6 +223,7 @@ func autoConvert_clusterpedia_ListOptions_To_v1beta1_ListOptions(in *clusterpedi if err := runtime.Convert_Slice_string_To_string(&in.Namespaces, &out.Namespaces, s); err != nil { return err } + out.ResourcePrefix = in.ResourcePrefix // WARNING: in.OrderBy requires manual conversion: inconvertible types ([]github.com/clusterpedia-io/api/clusterpedia.OrderBy vs string) out.OwnerName = in.OwnerName out.OwnerUID = in.OwnerUID @@ -262,6 +264,13 @@ func autoConvert_url_Values_To_v1beta1_ListOptions(in *url.Values, out *ListOpti } else { out.Namespaces = "" } + if values, ok := map[string][]string(*in)["resourcePrefix"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourcePrefix, s); err != nil { + return err + } + } else { + out.ResourcePrefix = "" + } if values, ok := map[string][]string(*in)["orderby"]; ok && len(values) > 0 { if err := runtime.Convert_Slice_string_To_string(&values, &out.OrderBy, s); err != nil { return err diff --git a/vendor/github.com/streadway/amqp/.gitignore b/vendor/github.com/streadway/amqp/.gitignore new file mode 100644 index 000000000..667fb50c5 --- /dev/null +++ b/vendor/github.com/streadway/amqp/.gitignore @@ -0,0 +1,12 @@ +certs/* +spec/spec +examples/simple-consumer/simple-consumer +examples/simple-producer/simple-producer + +.idea/**/workspace.xml +.idea/**/tasks.xml +.idea/**/usage.statistics.xml +.idea/**/dictionaries +.idea/**/shelf + +.idea/**/contentModel.xml diff --git a/vendor/github.com/streadway/amqp/.travis.yml b/vendor/github.com/streadway/amqp/.travis.yml new file mode 100644 index 000000000..7eee262b4 --- /dev/null +++ b/vendor/github.com/streadway/amqp/.travis.yml @@ -0,0 +1,25 @@ +language: go + +go: + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + +addons: + apt: + packages: + - rabbitmq-server + +services: + - rabbitmq + +env: + - GO111MODULE=on AMQP_URL=amqp://guest:guest@127.0.0.1:5672/ + +before_install: + - go get -v golang.org/x/lint/golint + +script: + - ./pre-commit + - go test -cpu=1,2 -v -tags integration ./... diff --git a/vendor/github.com/streadway/amqp/CONTRIBUTING.md b/vendor/github.com/streadway/amqp/CONTRIBUTING.md new file mode 100644 index 000000000..c87f3d7e0 --- /dev/null +++ b/vendor/github.com/streadway/amqp/CONTRIBUTING.md @@ -0,0 +1,35 @@ +## Prequisites + +1. Go: [https://golang.org/dl/](https://golang.org/dl/) +1. Golint `go get -u -v github.com/golang/lint/golint` + +## Contributing + +The workflow is pretty standard: + +1. Fork github.com/streadway/amqp +1. Add the pre-commit hook: `ln -s ../../pre-commit .git/hooks/pre-commit` +1. Create your feature branch (`git checkout -b my-new-feature`) +1. Run integration tests (see below) +1. **Implement tests** +1. Implement fixs +1. Commit your changes (`git commit -am 'Add some feature'`) +1. Push to a branch (`git push -u origin my-new-feature`) +1. Submit a pull request + +## Running Tests + +The test suite assumes that: + + * A RabbitMQ node is running on localhost with all defaults: [https://www.rabbitmq.com/download.html](https://www.rabbitmq.com/download.html) + * `AMQP_URL` is exported to `amqp://guest:guest@127.0.0.1:5672/` + +### Integration Tests + +After starting a local RabbitMQ, run integration tests with the following: + + env AMQP_URL=amqp://guest:guest@127.0.0.1:5672/ go test -v -cpu 2 -tags integration -race + +All integration tests should use the `integrationConnection(...)` test +helpers defined in `integration_test.go` to setup the integration environment +and logging. diff --git a/vendor/github.com/streadway/amqp/LICENSE b/vendor/github.com/streadway/amqp/LICENSE new file mode 100644 index 000000000..07b89680a --- /dev/null +++ b/vendor/github.com/streadway/amqp/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2012-2019, Sean Treadway, SoundCloud Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/streadway/amqp/README.md b/vendor/github.com/streadway/amqp/README.md new file mode 100644 index 000000000..fd179dc29 --- /dev/null +++ b/vendor/github.com/streadway/amqp/README.md @@ -0,0 +1,100 @@ +[![Build Status](https://api.travis-ci.org/streadway/amqp.svg)](http://travis-ci.org/streadway/amqp) [![GoDoc](https://godoc.org/github.com/streadway/amqp?status.svg)](http://godoc.org/github.com/streadway/amqp) + +# Go RabbitMQ Client Library (Unmaintained Fork) + +## Beware of Abandonware + +This repository is **NOT ACTIVELY MAINTAINED**. Consider using +a different fork instead: [rabbitmq/amqp091-go](https://github.com/rabbitmq/amqp091-go). +In case of questions, start a discussion in that repo or [use other RabbitMQ community resources](https://rabbitmq.com/contact.html). + + + +## Project Maturity + +This project has been used in production systems for many years. As of 2022, +this repository is **NOT ACTIVELY MAINTAINED**. + +This repository is **very strict** about any potential public API changes. +You may want to consider [rabbitmq/amqp091-go](https://github.com/rabbitmq/amqp091-go) which +is more willing to adapt the API. + + +## Supported Go Versions + +This library supports two most recent Go release series, currently 1.10 and 1.11. + + +## Supported RabbitMQ Versions + +This project supports RabbitMQ versions starting with `2.0` but primarily tested +against reasonably recent `3.x` releases. Some features and behaviours may be +server version-specific. + +## Goals + +Provide a functional interface that closely represents the AMQP 0.9.1 model +targeted to RabbitMQ as a server. This includes the minimum necessary to +interact the semantics of the protocol. + +## Non-goals + +Things not intended to be supported. + + * Auto reconnect and re-synchronization of client and server topologies. + * Reconnection would require understanding the error paths when the + topology cannot be declared on reconnect. This would require a new set + of types and code paths that are best suited at the call-site of this + package. AMQP has a dynamic topology that needs all peers to agree. If + this doesn't happen, the behavior is undefined. Instead of producing a + possible interface with undefined behavior, this package is designed to + be simple for the caller to implement the necessary connection-time + topology declaration so that reconnection is trivial and encapsulated in + the caller's application code. + * AMQP Protocol negotiation for forward or backward compatibility. + * 0.9.1 is stable and widely deployed. Versions 0.10 and 1.0 are divergent + specifications that change the semantics and wire format of the protocol. + We will accept patches for other protocol support but have no plans for + implementation ourselves. + * Anything other than PLAIN and EXTERNAL authentication mechanisms. + * Keeping the mechanisms interface modular makes it possible to extend + outside of this package. If other mechanisms prove to be popular, then + we would accept patches to include them in this package. + +## Usage + +See the 'examples' subdirectory for simple producers and consumers executables. +If you have a use-case in mind which isn't well-represented by the examples, +please file an issue. + +## Documentation + +Use [Godoc documentation](http://godoc.org/github.com/streadway/amqp) for +reference and usage. + +[RabbitMQ tutorials in +Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) are also +available. + +## Contributing + +Pull requests are very much welcomed. Create your pull request on a non-master +branch, make sure a test or example is included that covers your change and +your commits represent coherent changes that include a reason for the change. + +To run the integration tests, make sure you have RabbitMQ running on any host, +export the environment variable `AMQP_URL=amqp://host/` and run `go test -tags +integration`. TravisCI will also run the integration tests. + +Thanks to the [community of contributors](https://github.com/streadway/amqp/graphs/contributors). + +## External packages + + * [Google App Engine Dialer support](https://github.com/soundtrackyourbrand/gaeamqp) + * [RabbitMQ examples in Go](https://github.com/rabbitmq/rabbitmq-tutorials/tree/master/go) + +## License + +BSD 2 clause - see LICENSE for more details. + + diff --git a/vendor/github.com/streadway/amqp/allocator.go b/vendor/github.com/streadway/amqp/allocator.go new file mode 100644 index 000000000..53620e7d0 --- /dev/null +++ b/vendor/github.com/streadway/amqp/allocator.go @@ -0,0 +1,106 @@ +package amqp + +import ( + "bytes" + "fmt" + "math/big" +) + +const ( + free = 0 + allocated = 1 +) + +// allocator maintains a bitset of allocated numbers. +type allocator struct { + pool *big.Int + last int + low int + high int +} + +// NewAllocator reserves and frees integers out of a range between low and +// high. +// +// O(N) worst case space used, where N is maximum allocated, divided by +// sizeof(big.Word) +func newAllocator(low, high int) *allocator { + return &allocator{ + pool: big.NewInt(0), + last: low, + low: low, + high: high, + } +} + +// String returns a string describing the contents of the allocator like +// "allocator[low..high] reserved..until" +// +// O(N) where N is high-low +func (a allocator) String() string { + b := &bytes.Buffer{} + fmt.Fprintf(b, "allocator[%d..%d]", a.low, a.high) + + for low := a.low; low <= a.high; low++ { + high := low + for a.reserved(high) && high <= a.high { + high++ + } + + if high > low+1 { + fmt.Fprintf(b, " %d..%d", low, high-1) + } else if high > low { + fmt.Fprintf(b, " %d", high-1) + } + + low = high + } + return b.String() +} + +// Next reserves and returns the next available number out of the range between +// low and high. If no number is available, false is returned. +// +// O(N) worst case runtime where N is allocated, but usually O(1) due to a +// rolling index into the oldest allocation. +func (a *allocator) next() (int, bool) { + wrapped := a.last + + // Find trailing bit + for ; a.last <= a.high; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + // Find preceding free'd pool + a.last = a.low + + for ; a.last < wrapped; a.last++ { + if a.reserve(a.last) { + return a.last, true + } + } + + return 0, false +} + +// reserve claims the bit if it is not already claimed, returning true if +// successfully claimed. +func (a *allocator) reserve(n int) bool { + if a.reserved(n) { + return false + } + a.pool.SetBit(a.pool, n-a.low, allocated) + return true +} + +// reserved returns true if the integer has been allocated +func (a *allocator) reserved(n int) bool { + return a.pool.Bit(n-a.low) == allocated +} + +// release frees the use of the number for another allocation +func (a *allocator) release(n int) { + a.pool.SetBit(a.pool, n-a.low, free) +} diff --git a/vendor/github.com/streadway/amqp/auth.go b/vendor/github.com/streadway/amqp/auth.go new file mode 100644 index 000000000..435c94b12 --- /dev/null +++ b/vendor/github.com/streadway/amqp/auth.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" +) + +// Authentication interface provides a means for different SASL authentication +// mechanisms to be used during connection tuning. +type Authentication interface { + Mechanism() string + Response() string +} + +// PlainAuth is a similar to Basic Auth in HTTP. +type PlainAuth struct { + Username string + Password string +} + +// Mechanism returns "PLAIN" +func (auth *PlainAuth) Mechanism() string { + return "PLAIN" +} + +// Response returns the null character delimited encoding for the SASL PLAIN Mechanism. +func (auth *PlainAuth) Response() string { + return fmt.Sprintf("\000%s\000%s", auth.Username, auth.Password) +} + +// AMQPlainAuth is similar to PlainAuth +type AMQPlainAuth struct { + Username string + Password string +} + +// Mechanism returns "AMQPLAIN" +func (auth *AMQPlainAuth) Mechanism() string { + return "AMQPLAIN" +} + +// Response returns the null character delimited encoding for the SASL PLAIN Mechanism. +func (auth *AMQPlainAuth) Response() string { + return fmt.Sprintf("LOGIN:%sPASSWORD:%s", auth.Username, auth.Password) +} + +// Finds the first mechanism preferred by the client that the server supports. +func pickSASLMechanism(client []Authentication, serverMechanisms []string) (auth Authentication, ok bool) { + for _, auth = range client { + for _, mech := range serverMechanisms { + if auth.Mechanism() == mech { + return auth, true + } + } + } + + return +} diff --git a/vendor/github.com/streadway/amqp/certs.sh b/vendor/github.com/streadway/amqp/certs.sh new file mode 100644 index 000000000..834f42242 --- /dev/null +++ b/vendor/github.com/streadway/amqp/certs.sh @@ -0,0 +1,159 @@ +#!/bin/sh +# +# Creates the CA, server and client certs to be used by tls_test.go +# http://www.rabbitmq.com/ssl.html +# +# Copy stdout into the const section of tls_test.go or use for RabbitMQ +# +root=$PWD/certs + +if [ -f $root/ca/serial ]; then + echo >&2 "Previous installation found" + echo >&2 "Remove $root/ca and rerun to overwrite" + exit 1 +fi + +mkdir -p $root/ca/private +mkdir -p $root/ca/certs +mkdir -p $root/server +mkdir -p $root/client + +cd $root/ca + +chmod 700 private +touch index.txt +echo 'unique_subject = no' > index.txt.attr +echo '01' > serial +echo >openssl.cnf ' +[ ca ] +default_ca = testca + +[ testca ] +dir = . +certificate = $dir/cacert.pem +database = $dir/index.txt +new_certs_dir = $dir/certs +private_key = $dir/private/cakey.pem +serial = $dir/serial + +default_crl_days = 7 +default_days = 3650 +default_md = sha1 + +policy = testca_policy +x509_extensions = certificate_extensions + +[ testca_policy ] +commonName = supplied +stateOrProvinceName = optional +countryName = optional +emailAddress = optional +organizationName = optional +organizationalUnitName = optional + +[ certificate_extensions ] +basicConstraints = CA:false + +[ req ] +default_bits = 2048 +default_keyfile = ./private/cakey.pem +default_md = sha1 +prompt = yes +distinguished_name = root_ca_distinguished_name +x509_extensions = root_ca_extensions + +[ root_ca_distinguished_name ] +commonName = hostname + +[ root_ca_extensions ] +basicConstraints = CA:true +keyUsage = keyCertSign, cRLSign + +[ client_ca_extensions ] +basicConstraints = CA:false +keyUsage = digitalSignature +extendedKeyUsage = 1.3.6.1.5.5.7.3.2 + +[ server_ca_extensions ] +basicConstraints = CA:false +keyUsage = keyEncipherment +extendedKeyUsage = 1.3.6.1.5.5.7.3.1 +subjectAltName = @alt_names + +[ alt_names ] +IP.1 = 127.0.0.1 +' + +openssl req \ + -x509 \ + -nodes \ + -config openssl.cnf \ + -newkey rsa:2048 \ + -days 3650 \ + -subj "/CN=MyTestCA/" \ + -out cacert.pem \ + -outform PEM + +openssl x509 \ + -in cacert.pem \ + -out cacert.cer \ + -outform DER + +openssl genrsa -out $root/server/key.pem 2048 +openssl genrsa -out $root/client/key.pem 2048 + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=server/" \ + -key $root/server/key.pem \ + -out $root/server/req.pem \ + -outform PEM + +openssl req \ + -new \ + -nodes \ + -config openssl.cnf \ + -subj "/CN=127.0.0.1/O=client/" \ + -key $root/client/key.pem \ + -out $root/client/req.pem \ + -outform PEM + +openssl ca \ + -config openssl.cnf \ + -in $root/server/req.pem \ + -out $root/server/cert.pem \ + -notext \ + -batch \ + -extensions server_ca_extensions + +openssl ca \ + -config openssl.cnf \ + -in $root/client/req.pem \ + -out $root/client/cert.pem \ + -notext \ + -batch \ + -extensions client_ca_extensions + +cat <<-END +const caCert = \` +`cat $root/ca/cacert.pem` +\` + +const serverCert = \` +`cat $root/server/cert.pem` +\` + +const serverKey = \` +`cat $root/server/key.pem` +\` + +const clientCert = \` +`cat $root/client/cert.pem` +\` + +const clientKey = \` +`cat $root/client/key.pem` +\` +END diff --git a/vendor/github.com/streadway/amqp/channel.go b/vendor/github.com/streadway/amqp/channel.go new file mode 100644 index 000000000..cd19ce7ee --- /dev/null +++ b/vendor/github.com/streadway/amqp/channel.go @@ -0,0 +1,1593 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "reflect" + "sync" + "sync/atomic" +) + +// 0 1 3 7 size+7 size+8 +// +------+---------+-------------+ +------------+ +-----------+ +// | type | channel | size | | payload | | frame-end | +// +------+---------+-------------+ +------------+ +-----------+ +// octet short long size octets octet +const frameHeaderSize = 1 + 2 + 4 + 1 + +/* +Channel represents an AMQP channel. Used as a context for valid message +exchange. Errors on methods with this Channel as a receiver means this channel +should be discarded and a new channel established. + +*/ +type Channel struct { + destructor sync.Once + m sync.Mutex // struct field mutex + confirmM sync.Mutex // publisher confirms state mutex + notifyM sync.RWMutex + + connection *Connection + + rpc chan message + consumers *consumers + + id uint16 + + // closed is set to 1 when the channel has been closed - see Channel.send() + closed int32 + + // true when we will never notify again + noNotify bool + + // Channel and Connection exceptions will be broadcast on these listeners. + closes []chan *Error + + // Listeners for active=true flow control. When true is sent to a listener, + // publishing should pause until false is sent to listeners. + flows []chan bool + + // Listeners for returned publishings for unroutable messages on mandatory + // publishings or undeliverable messages on immediate publishings. + returns []chan Return + + // Listeners for when the server notifies the client that + // a consumer has been cancelled. + cancels []chan string + + // Allocated when in confirm mode in order to track publish counter and order confirms + confirms *confirms + confirming bool + + // Selects on any errors from shutdown during RPC + errors chan *Error + + // State machine that manages frame order, must only be mutated by the connection + recv func(*Channel, frame) error + + // Current state for frame re-assembly, only mutated from recv + message messageWithContent + header *headerFrame + body []byte +} + +// Constructs a new channel with the given framing rules +func newChannel(c *Connection, id uint16) *Channel { + return &Channel{ + connection: c, + id: id, + rpc: make(chan message), + consumers: makeConsumers(), + confirms: newConfirms(), + recv: (*Channel).recvMethod, + errors: make(chan *Error, 1), + } +} + +// shutdown is called by Connection after the channel has been removed from the +// connection registry. +func (ch *Channel) shutdown(e *Error) { + ch.destructor.Do(func() { + ch.m.Lock() + defer ch.m.Unlock() + + // Grab an exclusive lock for the notify channels + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + // Broadcast abnormal shutdown + if e != nil { + for _, c := range ch.closes { + c <- e + } + } + + // Signal that from now on, Channel.send() should call + // Channel.sendClosed() + atomic.StoreInt32(&ch.closed, 1) + + // Notify RPC if we're selecting + if e != nil { + ch.errors <- e + } + + ch.consumers.close() + + for _, c := range ch.closes { + close(c) + } + + for _, c := range ch.flows { + close(c) + } + + for _, c := range ch.returns { + close(c) + } + + for _, c := range ch.cancels { + close(c) + } + + // Set the slices to nil to prevent the dispatch() range from sending on + // the now closed channels after we release the notifyM mutex + ch.flows = nil + ch.closes = nil + ch.returns = nil + ch.cancels = nil + + if ch.confirms != nil { + ch.confirms.Close() + } + + close(ch.errors) + ch.noNotify = true + }) +} + +// send calls Channel.sendOpen() during normal operation. +// +// After the channel has been closed, send calls Channel.sendClosed(), ensuring +// only 'channel.close' is sent to the server. +func (ch *Channel) send(msg message) (err error) { + // If the channel is closed, use Channel.sendClosed() + if atomic.LoadInt32(&ch.closed) == 1 { + return ch.sendClosed(msg) + } + + return ch.sendOpen(msg) +} + +func (ch *Channel) open() error { + return ch.call(&channelOpen{}, &channelOpenOk{}) +} + +// Performs a request/response call for when the message is not NoWait and is +// specified as Synchronous. +func (ch *Channel) call(req message, res ...message) error { + if err := ch.send(req); err != nil { + return err + } + + if req.wait() { + select { + case e, ok := <-ch.errors: + if ok { + return e + } + return ErrClosed + + case msg := <-ch.rpc: + if msg != nil { + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + // RPC channel has been closed without an error, likely due to a hard + // error on the Connection. This indicates we have already been + // shutdown and if were waiting, will have returned from the errors chan. + return ErrClosed + } + } + + return nil +} + +func (ch *Channel) sendClosed(msg message) (err error) { + // After a 'channel.close' is sent or received the only valid response is + // channel.close-ok + if _, ok := msg.(*channelCloseOk); ok { + return ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: msg, + }) + } + + return ErrClosed +} + +func (ch *Channel) sendOpen(msg message) (err error) { + if content, ok := msg.(messageWithContent); ok { + props, body := content.getContent() + class, _ := content.id() + + // catch client max frame size==0 and server max frame size==0 + // set size to length of what we're trying to publish + var size int + if ch.connection.Config.FrameSize > 0 { + size = ch.connection.Config.FrameSize - frameHeaderSize + } else { + size = len(body) + } + + if err = ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: content, + }); err != nil { + return + } + + if err = ch.connection.send(&headerFrame{ + ChannelId: ch.id, + ClassId: class, + Size: uint64(len(body)), + Properties: props, + }); err != nil { + return + } + + // chunk body into size (max frame size - frame header size) + for i, j := 0, size; i < len(body); i, j = j, j+size { + if j > len(body) { + j = len(body) + } + + if err = ch.connection.send(&bodyFrame{ + ChannelId: ch.id, + Body: body[i:j], + }); err != nil { + return + } + } + } else { + err = ch.connection.send(&methodFrame{ + ChannelId: ch.id, + Method: msg, + }) + } + + return +} + +// Eventually called via the state machine from the connection's reader +// goroutine, so assumes serialized access. +func (ch *Channel) dispatch(msg message) { + switch m := msg.(type) { + case *channelClose: + // lock before sending connection.close-ok + // to avoid unexpected interleaving with basic.publish frames if + // publishing is happening concurrently + ch.m.Lock() + ch.send(&channelCloseOk{}) + ch.m.Unlock() + ch.connection.closeChannel(ch, newError(m.ReplyCode, m.ReplyText)) + + case *channelFlow: + ch.notifyM.RLock() + for _, c := range ch.flows { + c <- m.Active + } + ch.notifyM.RUnlock() + ch.send(&channelFlowOk{Active: m.Active}) + + case *basicCancel: + ch.notifyM.RLock() + for _, c := range ch.cancels { + c <- m.ConsumerTag + } + ch.notifyM.RUnlock() + ch.consumers.cancel(m.ConsumerTag) + + case *basicReturn: + ret := newReturn(*m) + ch.notifyM.RLock() + for _, c := range ch.returns { + c <- *ret + } + ch.notifyM.RUnlock() + + case *basicAck: + if ch.confirming { + if m.Multiple { + ch.confirms.Multiple(Confirmation{m.DeliveryTag, true}) + } else { + ch.confirms.One(Confirmation{m.DeliveryTag, true}) + } + } + + case *basicNack: + if ch.confirming { + if m.Multiple { + ch.confirms.Multiple(Confirmation{m.DeliveryTag, false}) + } else { + ch.confirms.One(Confirmation{m.DeliveryTag, false}) + } + } + + case *basicDeliver: + ch.consumers.send(m.ConsumerTag, newDelivery(ch, m)) + // TODO log failed consumer and close channel, this can happen when + // deliveries are in flight and a no-wait cancel has happened + + default: + ch.rpc <- msg + } +} + +func (ch *Channel) transition(f func(*Channel, frame) error) error { + ch.recv = f + return nil +} + +func (ch *Channel) recvMethod(f frame) error { + switch frame := f.(type) { + case *methodFrame: + if msg, ok := frame.Method.(messageWithContent); ok { + ch.body = make([]byte, 0) + ch.message = msg + return ch.transition((*Channel).recvHeader) + } + + ch.dispatch(frame.Method) // termination state + return ch.transition((*Channel).recvMethod) + + case *headerFrame: + // drop + return ch.transition((*Channel).recvMethod) + + case *bodyFrame: + // drop + return ch.transition((*Channel).recvMethod) + } + + panic("unexpected frame type") +} + +func (ch *Channel) recvHeader(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return ch.recvMethod(f) + + case *headerFrame: + // start collecting if we expect body frames + ch.header = frame + + if frame.Size == 0 { + ch.message.setContent(ch.header.Properties, ch.body) + ch.dispatch(ch.message) // termination state + return ch.transition((*Channel).recvMethod) + } + return ch.transition((*Channel).recvContent) + + case *bodyFrame: + // drop and reset + return ch.transition((*Channel).recvMethod) + } + + panic("unexpected frame type") +} + +// state after method + header and before the length +// defined by the header has been reached +func (ch *Channel) recvContent(f frame) error { + switch frame := f.(type) { + case *methodFrame: + // interrupt content and handle method + return ch.recvMethod(f) + + case *headerFrame: + // drop and reset + return ch.transition((*Channel).recvMethod) + + case *bodyFrame: + if cap(ch.body) == 0 { + ch.body = make([]byte, 0, ch.header.Size) + } + ch.body = append(ch.body, frame.Body...) + + if uint64(len(ch.body)) >= ch.header.Size { + ch.message.setContent(ch.header.Properties, ch.body) + ch.dispatch(ch.message) // termination state + return ch.transition((*Channel).recvMethod) + } + + return ch.transition((*Channel).recvContent) + } + + panic("unexpected frame type") +} + +/* +Close initiate a clean channel closure by sending a close message with the error +code set to '200'. + +It is safe to call this method multiple times. + +*/ +func (ch *Channel) Close() error { + defer ch.connection.closeChannel(ch, nil) + return ch.call( + &channelClose{ReplyCode: replySuccess}, + &channelCloseOk{}, + ) +} + +/* +NotifyClose registers a listener for when the server sends a channel or +connection exception in the form of a Connection.Close or Channel.Close method. +Connection exceptions will be broadcast to all open channels and all channels +will be closed, where channel exceptions will only be broadcast to listeners to +this channel. + +The chan provided will be closed when the Channel is closed and on a +graceful close, no error will be sent. + +*/ +func (ch *Channel) NotifyClose(c chan *Error) chan *Error { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.closes = append(ch.closes, c) + } + + return c +} + +/* +NotifyFlow registers a listener for basic.flow methods sent by the server. +When `false` is sent on one of the listener channels, all publishers should +pause until a `true` is sent. + +The server may ask the producer to pause or restart the flow of Publishings +sent by on a channel. This is a simple flow-control mechanism that a server can +use to avoid overflowing its queues or otherwise finding itself receiving more +messages than it can process. Note that this method is not intended for window +control. It does not affect contents returned by basic.get-ok methods. + +When a new channel is opened, it is active (flow is active). Some +applications assume that channels are inactive until started. To emulate +this behavior a client MAY open the channel, then pause it. + +Publishers should respond to a flow messages as rapidly as possible and the +server may disconnect over producing channels that do not respect these +messages. + +basic.flow-ok methods will always be returned to the server regardless of +the number of listeners there are. + +To control the flow of deliveries from the server, use the Channel.Flow() +method instead. + +Note: RabbitMQ will rather use TCP pushback on the network connection instead +of sending basic.flow. This means that if a single channel is producing too +much on the same connection, all channels using that connection will suffer, +including acknowledgments from deliveries. Use different Connections if you +desire to interleave consumers and producers in the same process to avoid your +basic.ack messages from getting rate limited with your basic.publish messages. + +*/ +func (ch *Channel) NotifyFlow(c chan bool) chan bool { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.flows = append(ch.flows, c) + } + + return c +} + +/* +NotifyReturn registers a listener for basic.return methods. These can be sent +from the server when a publish is undeliverable either from the mandatory or +immediate flags. + +A return struct has a copy of the Publishing along with some error +information about why the publishing failed. + +*/ +func (ch *Channel) NotifyReturn(c chan Return) chan Return { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.returns = append(ch.returns, c) + } + + return c +} + +/* +NotifyCancel registers a listener for basic.cancel methods. These can be sent +from the server when a queue is deleted or when consuming from a mirrored queue +where the master has just failed (and was moved to another node). + +The subscription tag is returned to the listener. + +*/ +func (ch *Channel) NotifyCancel(c chan string) chan string { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(c) + } else { + ch.cancels = append(ch.cancels, c) + } + + return c +} + +/* +NotifyConfirm calls NotifyPublish and starts a goroutine sending +ordered Ack and Nack DeliveryTag to the respective channels. + +For strict ordering, use NotifyPublish instead. +*/ +func (ch *Channel) NotifyConfirm(ack, nack chan uint64) (chan uint64, chan uint64) { + confirms := ch.NotifyPublish(make(chan Confirmation, cap(ack)+cap(nack))) + + go func() { + for c := range confirms { + if c.Ack { + ack <- c.DeliveryTag + } else { + nack <- c.DeliveryTag + } + } + close(ack) + if nack != ack { + close(nack) + } + }() + + return ack, nack +} + +/* +NotifyPublish registers a listener for reliable publishing. Receives from this +chan for every publish after Channel.Confirm will be in order starting with +DeliveryTag 1. + +There will be one and only one Confirmation Publishing starting with the +delivery tag of 1 and progressing sequentially until the total number of +Publishings have been seen by the server. + +Acknowledgments will be received in the order of delivery from the +NotifyPublish channels even if the server acknowledges them out of order. + +The listener chan will be closed when the Channel is closed. + +The capacity of the chan Confirmation must be at least as large as the +number of outstanding publishings. Not having enough buffered chans will +create a deadlock if you attempt to perform other operations on the Connection +or Channel while confirms are in-flight. + +It's advisable to wait for all Confirmations to arrive before calling +Channel.Close() or Connection.Close(). + +*/ +func (ch *Channel) NotifyPublish(confirm chan Confirmation) chan Confirmation { + ch.notifyM.Lock() + defer ch.notifyM.Unlock() + + if ch.noNotify { + close(confirm) + } else { + ch.confirms.Listen(confirm) + } + + return confirm + +} + +/* +Qos controls how many messages or how many bytes the server will try to keep on +the network for consumers before receiving delivery acks. The intent of Qos is +to make sure the network buffers stay full between the server and client. + +With a prefetch count greater than zero, the server will deliver that many +messages to consumers before acknowledgments are received. The server ignores +this option when consumers are started with noAck because no acknowledgments +are expected or sent. + +With a prefetch size greater than zero, the server will try to keep at least +that many bytes of deliveries flushed to the network before receiving +acknowledgments from the consumers. This option is ignored when consumers are +started with noAck. + +When global is true, these Qos settings apply to all existing and future +consumers on all channels on the same connection. When false, the Channel.Qos +settings will apply to all existing and future consumers on this channel. + +Please see the RabbitMQ Consumer Prefetch documentation for an explanation of +how the global flag is implemented in RabbitMQ, as it differs from the +AMQP 0.9.1 specification in that global Qos settings are limited in scope to +channels, not connections (https://www.rabbitmq.com/consumer-prefetch.html). + +To get round-robin behavior between consumers consuming from the same queue on +different connections, set the prefetch count to 1, and the next available +message on the server will be delivered to the next available consumer. + +If your consumer work time is reasonably consistent and not much greater +than two times your network round trip time, you will see significant +throughput improvements starting with a prefetch count of 2 or slightly +greater as described by benchmarks on RabbitMQ. + +http://www.rabbitmq.com/blog/2012/04/25/rabbitmq-performance-measurements-part-2/ +*/ +func (ch *Channel) Qos(prefetchCount, prefetchSize int, global bool) error { + return ch.call( + &basicQos{ + PrefetchCount: uint16(prefetchCount), + PrefetchSize: uint32(prefetchSize), + Global: global, + }, + &basicQosOk{}, + ) +} + +/* +Cancel stops deliveries to the consumer chan established in Channel.Consume and +identified by consumer. + +Only use this method to cleanly stop receiving deliveries from the server and +cleanly shut down the consumer chan identified by this tag. Using this method +and waiting for remaining messages to flush from the consumer chan will ensure +all messages received on the network will be delivered to the receiver of your +consumer chan. + +Continue consuming from the chan Delivery provided by Channel.Consume until the +chan closes. + +When noWait is true, do not wait for the server to acknowledge the cancel. +Only use this when you are certain there are no deliveries in flight that +require an acknowledgment, otherwise they will arrive and be dropped in the +client without an ack, and will not be redelivered to other consumers. + +*/ +func (ch *Channel) Cancel(consumer string, noWait bool) error { + req := &basicCancel{ + ConsumerTag: consumer, + NoWait: noWait, + } + res := &basicCancelOk{} + + if err := ch.call(req, res); err != nil { + return err + } + + if req.wait() { + ch.consumers.cancel(res.ConsumerTag) + } else { + // Potentially could drop deliveries in flight + ch.consumers.cancel(consumer) + } + + return nil +} + +/* +QueueDeclare declares a queue to hold messages and deliver to consumers. +Declaring creates a queue if it doesn't already exist, or ensures that an +existing queue matches the same parameters. + +Every queue declared gets a default binding to the empty exchange "" which has +the type "direct" with the routing key matching the queue's name. With this +default binding, it is possible to publish messages that route directly to +this queue by publishing to "" with the routing key of the queue name. + + QueueDeclare("alerts", true, false, false, false, nil) + Publish("", "alerts", false, false, Publishing{Body: []byte("...")}) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alerts -> "" -> alerts -> alerts + +The queue name may be empty, in which case the server will generate a unique name +which will be returned in the Name field of Queue struct. + +Durable and Non-Auto-Deleted queues will survive server restarts and remain +when there are no remaining consumers or bindings. Persistent publishings will +be restored in this queue on server restart. These queues are only able to be +bound to durable exchanges. + +Non-Durable and Auto-Deleted queues will not be redeclared on server restart +and will be deleted by the server after a short time when the last consumer is +canceled or the last consumer's channel is closed. Queues with this lifetime +can also be deleted normally with QueueDelete. These durable queues can only +be bound to non-durable exchanges. + +Non-Durable and Non-Auto-Deleted queues will remain declared as long as the +server is running regardless of how many consumers. This lifetime is useful +for temporary topologies that may have long delays between consumer activity. +These queues can only be bound to non-durable exchanges. + +Durable and Auto-Deleted queues will be restored on server restart, but without +active consumers will not survive and be removed. This Lifetime is unlikely +to be useful. + +Exclusive queues are only accessible by the connection that declares them and +will be deleted when the connection closes. Channels on other connections +will receive an error when attempting to declare, bind, consume, purge or +delete a queue with the same name. + +When noWait is true, the queue will assume to be declared on the server. A +channel exception will arrive if the conditions are met for existing queues +or attempting to modify an existing queue from a different connection. + +When the error return value is not nil, you can assume the queue could not be +declared with these parameters, and the channel will be closed. + +*/ +func (ch *Channel) QueueDeclare(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := ch.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{Name: name}, nil +} + +/* + +QueueDeclarePassive is functionally and parametrically equivalent to +QueueDeclare, except that it sets the "passive" attribute to true. A passive +queue is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent queue will cause RabbitMQ to throw an exception. This function +can be used to test for the existence of a queue. + +*/ +func (ch *Channel) QueueDeclarePassive(name string, durable, autoDelete, exclusive, noWait bool, args Table) (Queue, error) { + if err := args.Validate(); err != nil { + return Queue{}, err + } + + req := &queueDeclare{ + Queue: name, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &queueDeclareOk{} + + if err := ch.call(req, res); err != nil { + return Queue{}, err + } + + if req.wait() { + return Queue{ + Name: res.Queue, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + }, nil + } + + return Queue{Name: name}, nil +} + +/* +QueueInspect passively declares a queue by name to inspect the current message +count and consumer count. + +Use this method to check how many messages ready for delivery reside in the queue, +how many consumers are receiving deliveries, and whether a queue by this +name already exists. + +If the queue by this name exists, use Channel.QueueDeclare check if it is +declared with specific parameters. + +If a queue by this name does not exist, an error will be returned and the +channel will be closed. + +*/ +func (ch *Channel) QueueInspect(name string) (Queue, error) { + req := &queueDeclare{ + Queue: name, + Passive: true, + } + res := &queueDeclareOk{} + + err := ch.call(req, res) + + state := Queue{ + Name: name, + Messages: int(res.MessageCount), + Consumers: int(res.ConsumerCount), + } + + return state, err +} + +/* +QueueBind binds an exchange to a queue so that publishings to the exchange will +be routed to the queue when the publishing routing key matches the binding +routing key. + + QueueBind("pagers", "alert", "log", false, nil) + QueueBind("emails", "info", "log", false, nil) + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> log ----> alert --> pagers + key: info ---> log ----> info ---> emails + key: debug --> log (none) (dropped) + +If a binding with the same key and arguments already exists between the +exchange and queue, the attempt to rebind will be ignored and the existing +binding will be retained. + +In the case that multiple bindings may cause the message to be routed to the +same queue, the server will only route the publishing once. This is possible +with topic exchanges. + + QueueBind("pagers", "alert", "amq.topic", false, nil) + QueueBind("emails", "info", "amq.topic", false, nil) + QueueBind("emails", "#", "amq.topic", false, nil) // match everything + + Delivery Exchange Key Queue + ----------------------------------------------- + key: alert --> amq.topic ----> alert --> pagers + key: info ---> amq.topic ----> # ------> emails + \---> info ---/ + key: debug --> amq.topic ----> # ------> emails + +It is only possible to bind a durable queue to a durable exchange regardless of +whether the queue or exchange is auto-deleted. Bindings between durable queues +and exchanges will also be restored on server restart. + +If the binding could not complete, an error will be returned and the channel +will be closed. + +When noWait is false and the queue could not be bound, the channel will be +closed with an error. + +*/ +func (ch *Channel) QueueBind(name, key, exchange string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &queueBind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &queueBindOk{}, + ) +} + +/* +QueueUnbind removes a binding between an exchange and queue matching the key and +arguments. + +It is possible to send and empty string for the exchange name which means to +unbind the queue from the default exchange. + +*/ +func (ch *Channel) QueueUnbind(name, key, exchange string, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &queueUnbind{ + Queue: name, + Exchange: exchange, + RoutingKey: key, + Arguments: args, + }, + &queueUnbindOk{}, + ) +} + +/* +QueuePurge removes all messages from the named queue which are not waiting to +be acknowledged. Messages that have been delivered but have not yet been +acknowledged will not be removed. + +When successful, returns the number of messages purged. + +If noWait is true, do not wait for the server response and the number of +messages purged will not be meaningful. +*/ +func (ch *Channel) QueuePurge(name string, noWait bool) (int, error) { + req := &queuePurge{ + Queue: name, + NoWait: noWait, + } + res := &queuePurgeOk{} + + err := ch.call(req, res) + + return int(res.MessageCount), err +} + +/* +QueueDelete removes the queue from the server including all bindings then +purges the messages based on server configuration, returning the number of +messages purged. + +When ifUnused is true, the queue will not be deleted if there are any +consumers on the queue. If there are consumers, an error will be returned and +the channel will be closed. + +When ifEmpty is true, the queue will not be deleted if there are any messages +remaining on the queue. If there are messages, an error will be returned and +the channel will be closed. + +When noWait is true, the queue will be deleted without waiting for a response +from the server. The purged message count will not be meaningful. If the queue +could not be deleted, a channel exception will be raised and the channel will +be closed. + +*/ +func (ch *Channel) QueueDelete(name string, ifUnused, ifEmpty, noWait bool) (int, error) { + req := &queueDelete{ + Queue: name, + IfUnused: ifUnused, + IfEmpty: ifEmpty, + NoWait: noWait, + } + res := &queueDeleteOk{} + + err := ch.call(req, res) + + return int(res.MessageCount), err +} + +/* +Consume immediately starts delivering queued messages. + +Begin receiving on the returned chan Delivery before any other operation on the +Connection or Channel. + +Continues deliveries to the returned chan Delivery until Channel.Cancel, +Connection.Close, Channel.Close, or an AMQP exception occurs. Consumers must +range over the chan to ensure all deliveries are received. Unreceived +deliveries will block all methods on the same connection. + +All deliveries in AMQP must be acknowledged. It is expected of the consumer to +call Delivery.Ack after it has successfully processed the delivery. If the +consumer is cancelled or the channel or connection is closed any unacknowledged +deliveries will be requeued at the end of the same queue. + +The consumer is identified by a string that is unique and scoped for all +consumers on this channel. If you wish to eventually cancel the consumer, use +the same non-empty identifier in Channel.Cancel. An empty string will cause +the library to generate a unique identity. The consumer identity will be +included in every Delivery in the ConsumerTag field + +When autoAck (also known as noAck) is true, the server will acknowledge +deliveries to this consumer prior to writing the delivery to the network. When +autoAck is true, the consumer should not call Delivery.Ack. Automatically +acknowledging deliveries means that some deliveries may get lost if the +consumer is unable to process them after the server delivers them. +See http://www.rabbitmq.com/confirms.html for more details. + +When exclusive is true, the server will ensure that this is the sole consumer +from this queue. When exclusive is false, the server will fairly distribute +deliveries across multiple consumers. + +The noLocal flag is not supported by RabbitMQ. + +It's advisable to use separate connections for +Channel.Publish and Channel.Consume so not to have TCP pushback on publishing +affect the ability to consume messages, so this parameter is here mostly for +completeness. + +When noWait is true, do not wait for the server to confirm the request and +immediately begin deliveries. If it is not possible to consume, a channel +exception will be raised and the channel will be closed. + +Optional arguments can be provided that have specific semantics for the queue +or server. + +Inflight messages, limited by Channel.Qos will be buffered until received from +the returned chan. + +When the Channel or Connection is closed, all buffered and inflight messages will +be dropped. + +When the consumer tag is cancelled, all inflight messages will be delivered until +the returned chan is closed. + +*/ +func (ch *Channel) Consume(queue, consumer string, autoAck, exclusive, noLocal, noWait bool, args Table) (<-chan Delivery, error) { + // When we return from ch.call, there may be a delivery already for the + // consumer that hasn't been added to the consumer hash yet. Because of + // this, we never rely on the server picking a consumer tag for us. + + if err := args.Validate(); err != nil { + return nil, err + } + + if consumer == "" { + consumer = uniqueConsumerTag() + } + + req := &basicConsume{ + Queue: queue, + ConsumerTag: consumer, + NoLocal: noLocal, + NoAck: autoAck, + Exclusive: exclusive, + NoWait: noWait, + Arguments: args, + } + res := &basicConsumeOk{} + + deliveries := make(chan Delivery) + + ch.consumers.add(consumer, deliveries) + + if err := ch.call(req, res); err != nil { + ch.consumers.cancel(consumer) + return nil, err + } + + return (<-chan Delivery)(deliveries), nil +} + +/* +ExchangeDeclare declares an exchange on the server. If the exchange does not +already exist, the server will create it. If the exchange exists, the server +verifies that it is of the provided type, durability and auto-delete flags. + +Errors returned from this method will close the channel. + +Exchange names starting with "amq." are reserved for pre-declared and +standardized exchanges. The client MAY declare an exchange starting with +"amq." if the passive option is set, or the exchange already exists. Names can +consist of a non-empty sequence of letters, digits, hyphen, underscore, +period, or colon. + +Each exchange belongs to one of a set of exchange kinds/types implemented by +the server. The exchange types define the functionality of the exchange - i.e. +how messages are routed through it. Once an exchange is declared, its type +cannot be changed. The common types are "direct", "fanout", "topic" and +"headers". + +Durable and Non-Auto-Deleted exchanges will survive server restarts and remain +declared when there are no remaining bindings. This is the best lifetime for +long-lived exchange configurations like stable routes and default exchanges. + +Non-Durable and Auto-Deleted exchanges will be deleted when there are no +remaining bindings and not restored on server restart. This lifetime is +useful for temporary topologies that should not pollute the virtual host on +failure or after the consumers have completed. + +Non-Durable and Non-Auto-deleted exchanges will remain as long as the server is +running including when there are no remaining bindings. This is useful for +temporary topologies that may have long delays between bindings. + +Durable and Auto-Deleted exchanges will survive server restarts and will be +removed before and after server restarts when there are no remaining bindings. +These exchanges are useful for robust temporary topologies or when you require +binding durable queues to auto-deleted exchanges. + +Note: RabbitMQ declares the default exchange types like 'amq.fanout' as +durable, so queues that bind to these pre-declared exchanges must also be +durable. + +Exchanges declared as `internal` do not accept accept publishings. Internal +exchanges are useful when you wish to implement inter-exchange topologies +that should not be exposed to users of the broker. + +When noWait is true, declare without waiting for a confirmation from the server. +The channel may be closed as a result of an error. Add a NotifyClose listener +to respond to any exceptions. + +Optional amqp.Table of arguments that are specific to the server's implementation of +the exchange can be sent for exchange types that require extra parameters. +*/ +func (ch *Channel) ExchangeDeclare(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: false, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* + +ExchangeDeclarePassive is functionally and parametrically equivalent to +ExchangeDeclare, except that it sets the "passive" attribute to true. A passive +exchange is assumed by RabbitMQ to already exist, and attempting to connect to a +non-existent exchange will cause RabbitMQ to throw an exception. This function +can be used to detect the existence of an exchange. + +*/ +func (ch *Channel) ExchangeDeclarePassive(name, kind string, durable, autoDelete, internal, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeDeclare{ + Exchange: name, + Type: kind, + Passive: true, + Durable: durable, + AutoDelete: autoDelete, + Internal: internal, + NoWait: noWait, + Arguments: args, + }, + &exchangeDeclareOk{}, + ) +} + +/* +ExchangeDelete removes the named exchange from the server. When an exchange is +deleted all queue bindings on the exchange are also deleted. If this exchange +does not exist, the channel will be closed with an error. + +When ifUnused is true, the server will only delete the exchange if it has no queue +bindings. If the exchange has queue bindings the server does not delete it +but close the channel with an exception instead. Set this to true if you are +not the sole owner of the exchange. + +When noWait is true, do not wait for a server confirmation that the exchange has +been deleted. Failing to delete the channel could close the channel. Add a +NotifyClose listener to respond to these channel exceptions. +*/ +func (ch *Channel) ExchangeDelete(name string, ifUnused, noWait bool) error { + return ch.call( + &exchangeDelete{ + Exchange: name, + IfUnused: ifUnused, + NoWait: noWait, + }, + &exchangeDeleteOk{}, + ) +} + +/* +ExchangeBind binds an exchange to another exchange to create inter-exchange +routing topologies on the server. This can decouple the private topology and +routing exchanges from exchanges intended solely for publishing endpoints. + +Binding two exchanges with identical arguments will not create duplicate +bindings. + +Binding one exchange to another with multiple bindings will only deliver a +message once. For example if you bind your exchange to `amq.fanout` with two +different binding keys, only a single message will be delivered to your +exchange even though multiple bindings will match. + +Given a message delivered to the source exchange, the message will be forwarded +to the destination exchange when the routing key is matched. + + ExchangeBind("sell", "MSFT", "trade", false, nil) + ExchangeBind("buy", "AAPL", "trade", false, nil) + + Delivery Source Key Destination + example exchange exchange + ----------------------------------------------- + key: AAPL --> trade ----> MSFT sell + \---> AAPL --> buy + +When noWait is true, do not wait for the server to confirm the binding. If any +error occurs the channel will be closed. Add a listener to NotifyClose to +handle these errors. + +Optional arguments specific to the exchanges bound can also be specified. +*/ +func (ch *Channel) ExchangeBind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeBind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeBindOk{}, + ) +} + +/* +ExchangeUnbind unbinds the destination exchange from the source exchange on the +server by removing the routing key between them. This is the inverse of +ExchangeBind. If the binding does not currently exist, an error will be +returned. + +When noWait is true, do not wait for the server to confirm the deletion of the +binding. If any error occurs the channel will be closed. Add a listener to +NotifyClose to handle these errors. + +Optional arguments that are specific to the type of exchanges bound can also be +provided. These must match the same arguments specified in ExchangeBind to +identify the binding. +*/ +func (ch *Channel) ExchangeUnbind(destination, key, source string, noWait bool, args Table) error { + if err := args.Validate(); err != nil { + return err + } + + return ch.call( + &exchangeUnbind{ + Destination: destination, + Source: source, + RoutingKey: key, + NoWait: noWait, + Arguments: args, + }, + &exchangeUnbindOk{}, + ) +} + +/* +Publish sends a Publishing from the client to an exchange on the server. + +When you want a single message to be delivered to a single queue, you can +publish to the default exchange with the routingKey of the queue name. This is +because every declared queue gets an implicit route to the default exchange. + +Since publishings are asynchronous, any undeliverable message will get returned +by the server. Add a listener with Channel.NotifyReturn to handle any +undeliverable message when calling publish with either the mandatory or +immediate parameters as true. + +Publishings can be undeliverable when the mandatory flag is true and no queue is +bound that matches the routing key, or when the immediate flag is true and no +consumer on the matched queue is ready to accept the delivery. + +This can return an error when the channel, connection or socket is closed. The +error or lack of an error does not indicate whether the server has received this +publishing. + +It is possible for publishing to not reach the broker if the underlying socket +is shut down without pending publishing packets being flushed from the kernel +buffers. The easy way of making it probable that all publishings reach the +server is to always call Connection.Close before terminating your publishing +application. The way to ensure that all publishings reach the server is to add +a listener to Channel.NotifyPublish and put the channel in confirm mode with +Channel.Confirm. Publishing delivery tags and their corresponding +confirmations start at 1. Exit when all publishings are confirmed. + +When Publish does not return an error and the channel is in confirm mode, the +internal counter for DeliveryTags with the first confirmation starts at 1. + +*/ +func (ch *Channel) Publish(exchange, key string, mandatory, immediate bool, msg Publishing) error { + if err := msg.Headers.Validate(); err != nil { + return err + } + + ch.m.Lock() + defer ch.m.Unlock() + + if err := ch.send(&basicPublish{ + Exchange: exchange, + RoutingKey: key, + Mandatory: mandatory, + Immediate: immediate, + Body: msg.Body, + Properties: properties{ + Headers: msg.Headers, + ContentType: msg.ContentType, + ContentEncoding: msg.ContentEncoding, + DeliveryMode: msg.DeliveryMode, + Priority: msg.Priority, + CorrelationId: msg.CorrelationId, + ReplyTo: msg.ReplyTo, + Expiration: msg.Expiration, + MessageId: msg.MessageId, + Timestamp: msg.Timestamp, + Type: msg.Type, + UserId: msg.UserId, + AppId: msg.AppId, + }, + }); err != nil { + return err + } + + if ch.confirming { + ch.confirms.Publish() + } + + return nil +} + +/* +Get synchronously receives a single Delivery from the head of a queue from the +server to the client. In almost all cases, using Channel.Consume will be +preferred. + +If there was a delivery waiting on the queue and that delivery was received, the +second return value will be true. If there was no delivery waiting or an error +occurred, the ok bool will be false. + +All deliveries must be acknowledged including those from Channel.Get. Call +Delivery.Ack on the returned delivery when you have fully processed this +delivery. + +When autoAck is true, the server will automatically acknowledge this message so +you don't have to. But if you are unable to fully process this message before +the channel or connection is closed, the message will not get requeued. + +*/ +func (ch *Channel) Get(queue string, autoAck bool) (msg Delivery, ok bool, err error) { + req := &basicGet{Queue: queue, NoAck: autoAck} + res := &basicGetOk{} + empty := &basicGetEmpty{} + + if err := ch.call(req, res, empty); err != nil { + return Delivery{}, false, err + } + + if res.DeliveryTag > 0 { + return *(newDelivery(ch, res)), true, nil + } + + return Delivery{}, false, nil +} + +/* +Tx puts the channel into transaction mode on the server. All publishings and +acknowledgments following this method will be atomically committed or rolled +back for a single queue. Call either Channel.TxCommit or Channel.TxRollback to +leave a this transaction and immediately start a new transaction. + +The atomicity across multiple queues is not defined as queue declarations and +bindings are not included in the transaction. + +The behavior of publishings that are delivered as mandatory or immediate while +the channel is in a transaction is not defined. + +Once a channel has been put into transaction mode, it cannot be taken out of +transaction mode. Use a different channel for non-transactional semantics. + +*/ +func (ch *Channel) Tx() error { + return ch.call( + &txSelect{}, + &txSelectOk{}, + ) +} + +/* +TxCommit atomically commits all publishings and acknowledgments for a single +queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (ch *Channel) TxCommit() error { + return ch.call( + &txCommit{}, + &txCommitOk{}, + ) +} + +/* +TxRollback atomically rolls back all publishings and acknowledgments for a +single queue and immediately start a new transaction. + +Calling this method without having called Channel.Tx is an error. + +*/ +func (ch *Channel) TxRollback() error { + return ch.call( + &txRollback{}, + &txRollbackOk{}, + ) +} + +/* +Flow pauses the delivery of messages to consumers on this channel. Channels +are opened with flow control active, to open a channel with paused +deliveries immediately call this method with `false` after calling +Connection.Channel. + +When active is `false`, this method asks the server to temporarily pause deliveries +until called again with active as `true`. + +Channel.Get methods will not be affected by flow control. + +This method is not intended to act as window control. Use Channel.Qos to limit +the number of unacknowledged messages or bytes in flight instead. + +The server may also send us flow methods to throttle our publishings. A well +behaving publishing client should add a listener with Channel.NotifyFlow and +pause its publishings when `false` is sent on that channel. + +Note: RabbitMQ prefers to use TCP push back to control flow for all channels on +a connection, so under high volume scenarios, it's wise to open separate +Connections for publishings and deliveries. + +*/ +func (ch *Channel) Flow(active bool) error { + return ch.call( + &channelFlow{Active: active}, + &channelFlowOk{}, + ) +} + +/* +Confirm puts this channel into confirm mode so that the client can ensure all +publishings have successfully been received by the server. After entering this +mode, the server will send a basic.ack or basic.nack message with the deliver +tag set to a 1 based incremental index corresponding to every publishing +received after the this method returns. + +Add a listener to Channel.NotifyPublish to respond to the Confirmations. If +Channel.NotifyPublish is not called, the Confirmations will be silently +ignored. + +The order of acknowledgments is not bound to the order of deliveries. + +Ack and Nack confirmations will arrive at some point in the future. + +Unroutable mandatory or immediate messages are acknowledged immediately after +any Channel.NotifyReturn listeners have been notified. Other messages are +acknowledged when all queues that should have the message routed to them have +either received acknowledgment of delivery or have enqueued the message, +persisting the message if necessary. + +When noWait is true, the client will not wait for a response. A channel +exception could occur if the server does not support this method. + +*/ +func (ch *Channel) Confirm(noWait bool) error { + if err := ch.call( + &confirmSelect{Nowait: noWait}, + &confirmSelectOk{}, + ); err != nil { + return err + } + + ch.confirmM.Lock() + ch.confirming = true + ch.confirmM.Unlock() + + return nil +} + +/* +Recover redelivers all unacknowledged deliveries on this channel. + +When requeue is false, messages will be redelivered to the original consumer. + +When requeue is true, messages will be redelivered to any available consumer, +potentially including the original. + +If the deliveries cannot be recovered, an error will be returned and the channel +will be closed. + +Note: this method is not implemented on RabbitMQ, use Delivery.Nack instead +*/ +func (ch *Channel) Recover(requeue bool) error { + return ch.call( + &basicRecover{Requeue: requeue}, + &basicRecoverOk{}, + ) +} + +/* +Ack acknowledges a delivery by its delivery tag when having been consumed with +Channel.Consume or Channel.Get. + +Ack acknowledges all message received prior to the delivery tag when multiple +is true. + +See also Delivery.Ack +*/ +func (ch *Channel) Ack(tag uint64, multiple bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicAck{ + DeliveryTag: tag, + Multiple: multiple, + }) +} + +/* +Nack negatively acknowledges a delivery by its delivery tag. Prefer this +method to notify the server that you were not able to process this delivery and +it must be redelivered or dropped. + +See also Delivery.Nack +*/ +func (ch *Channel) Nack(tag uint64, multiple bool, requeue bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicNack{ + DeliveryTag: tag, + Multiple: multiple, + Requeue: requeue, + }) +} + +/* +Reject negatively acknowledges a delivery by its delivery tag. Prefer Nack +over Reject when communicating with a RabbitMQ server because you can Nack +multiple messages, reducing the amount of protocol messages to exchange. + +See also Delivery.Reject +*/ +func (ch *Channel) Reject(tag uint64, requeue bool) error { + ch.m.Lock() + defer ch.m.Unlock() + + return ch.send(&basicReject{ + DeliveryTag: tag, + Requeue: requeue, + }) +} diff --git a/vendor/github.com/streadway/amqp/confirms.go b/vendor/github.com/streadway/amqp/confirms.go new file mode 100644 index 000000000..06cbaa711 --- /dev/null +++ b/vendor/github.com/streadway/amqp/confirms.go @@ -0,0 +1,94 @@ +package amqp + +import "sync" + +// confirms resequences and notifies one or multiple publisher confirmation listeners +type confirms struct { + m sync.Mutex + listeners []chan Confirmation + sequencer map[uint64]Confirmation + published uint64 + expecting uint64 +} + +// newConfirms allocates a confirms +func newConfirms() *confirms { + return &confirms{ + sequencer: map[uint64]Confirmation{}, + published: 0, + expecting: 1, + } +} + +func (c *confirms) Listen(l chan Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + c.listeners = append(c.listeners, l) +} + +// publish increments the publishing counter +func (c *confirms) Publish() uint64 { + c.m.Lock() + defer c.m.Unlock() + + c.published++ + return c.published +} + +// confirm confirms one publishing, increments the expecting delivery tag, and +// removes bookkeeping for that delivery tag. +func (c *confirms) confirm(confirmation Confirmation) { + delete(c.sequencer, c.expecting) + c.expecting++ + for _, l := range c.listeners { + l <- confirmation + } +} + +// resequence confirms any out of order delivered confirmations +func (c *confirms) resequence() { + for c.expecting <= c.published { + sequenced, found := c.sequencer[c.expecting] + if !found { + return + } + c.confirm(sequenced) + } +} + +// one confirms one publishing and all following in the publishing sequence +func (c *confirms) One(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + if c.expecting == confirmed.DeliveryTag { + c.confirm(confirmed) + } else { + c.sequencer[confirmed.DeliveryTag] = confirmed + } + c.resequence() +} + +// multiple confirms all publishings up until the delivery tag +func (c *confirms) Multiple(confirmed Confirmation) { + c.m.Lock() + defer c.m.Unlock() + + for c.expecting <= confirmed.DeliveryTag { + c.confirm(Confirmation{c.expecting, confirmed.Ack}) + } + c.resequence() +} + +// Close closes all listeners, discarding any out of sequence confirmations +func (c *confirms) Close() error { + c.m.Lock() + defer c.m.Unlock() + + for _, l := range c.listeners { + close(l) + } + c.listeners = nil + return nil +} diff --git a/vendor/github.com/streadway/amqp/connection.go b/vendor/github.com/streadway/amqp/connection.go new file mode 100644 index 000000000..252852e80 --- /dev/null +++ b/vendor/github.com/streadway/amqp/connection.go @@ -0,0 +1,852 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "crypto/tls" + "io" + "net" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +const ( + maxChannelMax = (2 << 15) - 1 + + defaultHeartbeat = 10 * time.Second + defaultConnectionTimeout = 30 * time.Second + defaultProduct = "https://github.com/streadway/amqp" + defaultVersion = "β" + // Safer default that makes channel leaks a lot easier to spot + // before they create operational headaches. See https://github.com/rabbitmq/rabbitmq-server/issues/1593. + defaultChannelMax = (2 << 10) - 1 + defaultLocale = "en_US" +) + +// Config is used in DialConfig and Open to specify the desired tuning +// parameters used during a connection open handshake. The negotiated tuning +// will be stored in the returned connection's Config field. +type Config struct { + // The SASL mechanisms to try in the client request, and the successful + // mechanism used on the Connection object. + // If SASL is nil, PlainAuth from the URL is used. + SASL []Authentication + + // Vhost specifies the namespace of permissions, exchanges, queues and + // bindings on the server. Dial sets this to the path parsed from the URL. + Vhost string + + ChannelMax int // 0 max channels means 2^16 - 1 + FrameSize int // 0 max bytes means unlimited + Heartbeat time.Duration // less than 1s uses the server's interval + + // TLSClientConfig specifies the client configuration of the TLS connection + // when establishing a tls transport. + // If the URL uses an amqps scheme, then an empty tls.Config with the + // ServerName from the URL is used. + TLSClientConfig *tls.Config + + // Properties is table of properties that the client advertises to the server. + // This is an optional setting - if the application does not set this, + // the underlying library will use a generic set of client properties. + Properties Table + + // Connection locale that we expect to always be en_US + // Even though servers must return it as per the AMQP 0-9-1 spec, + // we are not aware of it being used other than to satisfy the spec requirements + Locale string + + // Dial returns a net.Conn prepared for a TLS handshake with TSLClientConfig, + // then an AMQP connection handshake. + // If Dial is nil, net.DialTimeout with a 30s connection and 30s deadline is + // used during TLS and AMQP handshaking. + Dial func(network, addr string) (net.Conn, error) +} + +// Connection manages the serialization and deserialization of frames from IO +// and dispatches the frames to the appropriate channel. All RPC methods and +// asynchronous Publishing, Delivery, Ack, Nack and Return messages are +// multiplexed on this channel. There must always be active receivers for +// every asynchronous message on this connection. +type Connection struct { + destructor sync.Once // shutdown once + sendM sync.Mutex // conn writer mutex + m sync.Mutex // struct field mutex + + conn io.ReadWriteCloser + + rpc chan message + writer *writer + sends chan time.Time // timestamps of each frame sent + deadlines chan readDeadliner // heartbeater updates read deadlines + + allocator *allocator // id generator valid after openTune + channels map[uint16]*Channel + + noNotify bool // true when we will never notify again + closes []chan *Error + blocks []chan Blocking + + errors chan *Error + + Config Config // The negotiated Config after connection.open + + Major int // Server's major version + Minor int // Server's minor version + Properties Table // Server properties + Locales []string // Server locales + + closed int32 // Will be 1 if the connection is closed, 0 otherwise. Should only be accessed as atomic +} + +type readDeadliner interface { + SetReadDeadline(time.Time) error +} + +// DefaultDial establishes a connection when config.Dial is not provided +func DefaultDial(connectionTimeout time.Duration) func(network, addr string) (net.Conn, error) { + return func(network, addr string) (net.Conn, error) { + conn, err := net.DialTimeout(network, addr, connectionTimeout) + if err != nil { + return nil, err + } + + // Heartbeating hasn't started yet, don't stall forever on a dead server. + // A deadline is set for TLS and AMQP handshaking. After AMQP is established, + // the deadline is cleared in openComplete. + if err := conn.SetDeadline(time.Now().Add(connectionTimeout)); err != nil { + return nil, err + } + + return conn, nil + } +} + +// Dial accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the handshake deadline to 30 seconds. After handshake, +// deadlines are cleared. +// +// Dial uses the zero value of tls.Config when it encounters an amqps:// +// scheme. It is equivalent to calling DialTLS(amqp, nil). +func Dial(url string) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + Locale: defaultLocale, + }) +} + +// DialTLS accepts a string in the AMQP URI format and returns a new Connection +// over TCP using PlainAuth. Defaults to a server heartbeat interval of 10 +// seconds and sets the initial read deadline to 30 seconds. +// +// DialTLS uses the provided tls.Config when encountering an amqps:// scheme. +func DialTLS(url string, amqps *tls.Config) (*Connection, error) { + return DialConfig(url, Config{ + Heartbeat: defaultHeartbeat, + TLSClientConfig: amqps, + Locale: defaultLocale, + }) +} + +// DialConfig accepts a string in the AMQP URI format and a configuration for +// the transport and connection setup, returning a new Connection. Defaults to +// a server heartbeat interval of 10 seconds and sets the initial read deadline +// to 30 seconds. +func DialConfig(url string, config Config) (*Connection, error) { + var err error + var conn net.Conn + + uri, err := ParseURI(url) + if err != nil { + return nil, err + } + + if config.SASL == nil { + config.SASL = []Authentication{uri.PlainAuth()} + } + + if config.Vhost == "" { + config.Vhost = uri.Vhost + } + + addr := net.JoinHostPort(uri.Host, strconv.FormatInt(int64(uri.Port), 10)) + + dialer := config.Dial + if dialer == nil { + dialer = DefaultDial(defaultConnectionTimeout) + } + + conn, err = dialer("tcp", addr) + if err != nil { + return nil, err + } + + if uri.Scheme == "amqps" { + if config.TLSClientConfig == nil { + config.TLSClientConfig = new(tls.Config) + } + + // If ServerName has not been specified in TLSClientConfig, + // set it to the URI host used for this connection. + if config.TLSClientConfig.ServerName == "" { + config.TLSClientConfig.ServerName = uri.Host + } + + client := tls.Client(conn, config.TLSClientConfig) + if err := client.Handshake(); err != nil { + + conn.Close() + return nil, err + } + + conn = client + } + + return Open(conn, config) +} + +/* +Open accepts an already established connection, or other io.ReadWriteCloser as +a transport. Use this method if you have established a TLS connection or wish +to use your own custom transport. + +*/ +func Open(conn io.ReadWriteCloser, config Config) (*Connection, error) { + c := &Connection{ + conn: conn, + writer: &writer{bufio.NewWriter(conn)}, + channels: make(map[uint16]*Channel), + rpc: make(chan message), + sends: make(chan time.Time), + errors: make(chan *Error, 1), + deadlines: make(chan readDeadliner, 1), + } + go c.reader(conn) + return c, c.open(config) +} + +/* +LocalAddr returns the local TCP peer address, or ":0" (the zero value of net.TCPAddr) +as a fallback default value if the underlying transport does not support LocalAddr(). +*/ +func (c *Connection) LocalAddr() net.Addr { + if conn, ok := c.conn.(interface { + LocalAddr() net.Addr + }); ok { + return conn.LocalAddr() + } + return &net.TCPAddr{} +} + +// ConnectionState returns basic TLS details of the underlying transport. +// Returns a zero value when the underlying connection does not implement +// ConnectionState() tls.ConnectionState. +func (c *Connection) ConnectionState() tls.ConnectionState { + if conn, ok := c.conn.(interface { + ConnectionState() tls.ConnectionState + }); ok { + return conn.ConnectionState() + } + return tls.ConnectionState{} +} + +/* +NotifyClose registers a listener for close events either initiated by an error +accompanying a connection.close method or by a normal shutdown. + +On normal shutdowns, the chan will be closed. + +To reconnect after a transport or protocol error, register a listener here and +re-run your setup process. + +*/ +func (c *Connection) NotifyClose(receiver chan *Error) chan *Error { + c.m.Lock() + defer c.m.Unlock() + + if c.noNotify { + close(receiver) + } else { + c.closes = append(c.closes, receiver) + } + + return receiver +} + +/* +NotifyBlocked registers a listener for RabbitMQ specific TCP flow control +method extensions connection.blocked and connection.unblocked. Flow control is +active with a reason when Blocking.Blocked is true. When a Connection is +blocked, all methods will block across all connections until server resources +become free again. + +This optional extension is supported by the server when the +"connection.blocked" server capability key is true. + +*/ +func (c *Connection) NotifyBlocked(receiver chan Blocking) chan Blocking { + c.m.Lock() + defer c.m.Unlock() + + if c.noNotify { + close(receiver) + } else { + c.blocks = append(c.blocks, receiver) + } + + return receiver +} + +/* +Close requests and waits for the response to close the AMQP connection. + +It's advisable to use this message when publishing to ensure all kernel buffers +have been flushed on the server and client before exiting. + +An error indicates that server may not have received this request to close but +the connection should be treated as closed regardless. + +After returning from this call, all resources associated with this connection, +including the underlying io, Channels, Notify listeners and Channel consumers +will also be closed. +*/ +func (c *Connection) Close() error { + if c.IsClosed() { + return ErrClosed + } + + defer c.shutdown(nil) + return c.call( + &connectionClose{ + ReplyCode: replySuccess, + ReplyText: "kthxbai", + }, + &connectionCloseOk{}, + ) +} + +func (c *Connection) closeWith(err *Error) error { + if c.IsClosed() { + return ErrClosed + } + + defer c.shutdown(err) + return c.call( + &connectionClose{ + ReplyCode: uint16(err.Code), + ReplyText: err.Reason, + }, + &connectionCloseOk{}, + ) +} + +// IsClosed returns true if the connection is marked as closed, otherwise false +// is returned. +func (c *Connection) IsClosed() bool { + return (atomic.LoadInt32(&c.closed) == 1) +} + +func (c *Connection) send(f frame) error { + if c.IsClosed() { + return ErrClosed + } + + c.sendM.Lock() + err := c.writer.WriteFrame(f) + c.sendM.Unlock() + + if err != nil { + // shutdown could be re-entrant from signaling notify chans + go c.shutdown(&Error{ + Code: FrameError, + Reason: err.Error(), + }) + } else { + // Broadcast we sent a frame, reducing heartbeats, only + // if there is something that can receive - like a non-reentrant + // call or if the heartbeater isn't running + select { + case c.sends <- time.Now(): + default: + } + } + + return err +} + +func (c *Connection) shutdown(err *Error) { + atomic.StoreInt32(&c.closed, 1) + + c.destructor.Do(func() { + c.m.Lock() + defer c.m.Unlock() + + if err != nil { + for _, c := range c.closes { + c <- err + } + } + + if err != nil { + c.errors <- err + } + // Shutdown handler goroutine can still receive the result. + close(c.errors) + + for _, c := range c.closes { + close(c) + } + + for _, c := range c.blocks { + close(c) + } + + // Shutdown the channel, but do not use closeChannel() as it calls + // releaseChannel() which requires the connection lock. + // + // Ranging over c.channels and calling releaseChannel() that mutates + // c.channels is racy - see commit 6063341 for an example. + for _, ch := range c.channels { + ch.shutdown(err) + } + + c.conn.Close() + + c.channels = map[uint16]*Channel{} + c.allocator = newAllocator(1, c.Config.ChannelMax) + c.noNotify = true + }) +} + +// All methods sent to the connection channel should be synchronous so we +// can handle them directly without a framing component +func (c *Connection) demux(f frame) { + if f.channel() == 0 { + c.dispatch0(f) + } else { + c.dispatchN(f) + } +} + +func (c *Connection) dispatch0(f frame) { + switch mf := f.(type) { + case *methodFrame: + switch m := mf.Method.(type) { + case *connectionClose: + // Send immediately as shutdown will close our side of the writer. + c.send(&methodFrame{ + ChannelId: 0, + Method: &connectionCloseOk{}, + }) + + c.shutdown(newError(m.ReplyCode, m.ReplyText)) + case *connectionBlocked: + for _, c := range c.blocks { + c <- Blocking{Active: true, Reason: m.Reason} + } + case *connectionUnblocked: + for _, c := range c.blocks { + c <- Blocking{Active: false} + } + default: + c.rpc <- m + } + case *heartbeatFrame: + // kthx - all reads reset our deadline. so we can drop this + default: + // lolwat - channel0 only responds to methods and heartbeats + c.closeWith(ErrUnexpectedFrame) + } +} + +func (c *Connection) dispatchN(f frame) { + c.m.Lock() + channel := c.channels[f.channel()] + c.m.Unlock() + + if channel != nil { + channel.recv(channel, f) + } else { + c.dispatchClosed(f) + } +} + +// section 2.3.7: "When a peer decides to close a channel or connection, it +// sends a Close method. The receiving peer MUST respond to a Close with a +// Close-Ok, and then both parties can close their channel or connection. Note +// that if peers ignore Close, deadlock can happen when both peers send Close +// at the same time." +// +// When we don't have a channel, so we must respond with close-ok on a close +// method. This can happen between a channel exception on an asynchronous +// method like basic.publish and a synchronous close with channel.close. +// In that case, we'll get both a channel.close and channel.close-ok in any +// order. +func (c *Connection) dispatchClosed(f frame) { + // Only consider method frames, drop content/header frames + if mf, ok := f.(*methodFrame); ok { + switch mf.Method.(type) { + case *channelClose: + c.send(&methodFrame{ + ChannelId: f.channel(), + Method: &channelCloseOk{}, + }) + case *channelCloseOk: + // we are already closed, so do nothing + default: + // unexpected method on closed channel + c.closeWith(ErrClosed) + } + } +} + +// Reads each frame off the IO and hand off to the connection object that +// will demux the streams and dispatch to one of the opened channels or +// handle on channel 0 (the connection channel). +func (c *Connection) reader(r io.Reader) { + buf := bufio.NewReader(r) + frames := &reader{buf} + conn, haveDeadliner := r.(readDeadliner) + + for { + frame, err := frames.ReadFrame() + + if err != nil { + c.shutdown(&Error{Code: FrameError, Reason: err.Error()}) + return + } + + c.demux(frame) + + if haveDeadliner { + select { + case c.deadlines <- conn: + default: + // On c.Close() c.heartbeater() might exit just before c.deadlines <- conn is called. + // Which results in this goroutine being stuck forever. + } + } + } +} + +// Ensures that at least one frame is being sent at the tuned interval with a +// jitter tolerance of 1s +func (c *Connection) heartbeater(interval time.Duration, done chan *Error) { + const maxServerHeartbeatsInFlight = 3 + + var sendTicks <-chan time.Time + if interval > 0 { + ticker := time.NewTicker(interval) + defer ticker.Stop() + sendTicks = ticker.C + } + + lastSent := time.Now() + + for { + select { + case at, stillSending := <-c.sends: + // When actively sending, depend on sent frames to reset server timer + if stillSending { + lastSent = at + } else { + return + } + + case at := <-sendTicks: + // When idle, fill the space with a heartbeat frame + if at.Sub(lastSent) > interval-time.Second { + if err := c.send(&heartbeatFrame{}); err != nil { + // send heartbeats even after close/closeOk so we + // tick until the connection starts erroring + return + } + } + + case conn := <-c.deadlines: + // When reading, reset our side of the deadline, if we've negotiated one with + // a deadline that covers at least 2 server heartbeats + if interval > 0 { + conn.SetReadDeadline(time.Now().Add(maxServerHeartbeatsInFlight * interval)) + } + + case <-done: + return + } + } +} + +// Convenience method to inspect the Connection.Properties["capabilities"] +// Table for server identified capabilities like "basic.ack" or +// "confirm.select". +func (c *Connection) isCapable(featureName string) bool { + capabilities, _ := c.Properties["capabilities"].(Table) + hasFeature, _ := capabilities[featureName].(bool) + return hasFeature +} + +// allocateChannel records but does not open a new channel with a unique id. +// This method is the initial part of the channel lifecycle and paired with +// releaseChannel +func (c *Connection) allocateChannel() (*Channel, error) { + c.m.Lock() + defer c.m.Unlock() + + if c.IsClosed() { + return nil, ErrClosed + } + + id, ok := c.allocator.next() + if !ok { + return nil, ErrChannelMax + } + + ch := newChannel(c, uint16(id)) + c.channels[uint16(id)] = ch + + return ch, nil +} + +// releaseChannel removes a channel from the registry as the final part of the +// channel lifecycle +func (c *Connection) releaseChannel(id uint16) { + c.m.Lock() + defer c.m.Unlock() + + delete(c.channels, id) + c.allocator.release(int(id)) +} + +// openChannel allocates and opens a channel, must be paired with closeChannel +func (c *Connection) openChannel() (*Channel, error) { + ch, err := c.allocateChannel() + if err != nil { + return nil, err + } + + if err := ch.open(); err != nil { + c.releaseChannel(ch.id) + return nil, err + } + return ch, nil +} + +// closeChannel releases and initiates a shutdown of the channel. All channel +// closures should be initiated here for proper channel lifecycle management on +// this connection. +func (c *Connection) closeChannel(ch *Channel, e *Error) { + ch.shutdown(e) + c.releaseChannel(ch.id) +} + +/* +Channel opens a unique, concurrent server channel to process the bulk of AMQP +messages. Any error from methods on this receiver will render the receiver +invalid and a new Channel should be opened. + +*/ +func (c *Connection) Channel() (*Channel, error) { + return c.openChannel() +} + +func (c *Connection) call(req message, res ...message) error { + // Special case for when the protocol header frame is sent insted of a + // request method + if req != nil { + if err := c.send(&methodFrame{ChannelId: 0, Method: req}); err != nil { + return err + } + } + + select { + case err, ok := <-c.errors: + if !ok { + return ErrClosed + } + return err + + case msg := <-c.rpc: + // Try to match one of the result types + for _, try := range res { + if reflect.TypeOf(msg) == reflect.TypeOf(try) { + // *res = *msg + vres := reflect.ValueOf(try).Elem() + vmsg := reflect.ValueOf(msg).Elem() + vres.Set(vmsg) + return nil + } + } + return ErrCommandInvalid + } + // unreachable +} + +// Connection = open-Connection *use-Connection close-Connection +// open-Connection = C:protocol-header +// S:START C:START-OK +// *challenge +// S:TUNE C:TUNE-OK +// C:OPEN S:OPEN-OK +// challenge = S:SECURE C:SECURE-OK +// use-Connection = *channel +// close-Connection = C:CLOSE S:CLOSE-OK +// / S:CLOSE C:CLOSE-OK +func (c *Connection) open(config Config) error { + if err := c.send(&protocolHeader{}); err != nil { + return err + } + + return c.openStart(config) +} + +func (c *Connection) openStart(config Config) error { + start := &connectionStart{} + + if err := c.call(nil, start); err != nil { + return err + } + + c.Major = int(start.VersionMajor) + c.Minor = int(start.VersionMinor) + c.Properties = Table(start.ServerProperties) + c.Locales = strings.Split(start.Locales, " ") + + // eventually support challenge/response here by also responding to + // connectionSecure. + auth, ok := pickSASLMechanism(config.SASL, strings.Split(start.Mechanisms, " ")) + if !ok { + return ErrSASL + } + + // Save this mechanism off as the one we chose + c.Config.SASL = []Authentication{auth} + + // Set the connection locale to client locale + c.Config.Locale = config.Locale + + return c.openTune(config, auth) +} + +func (c *Connection) openTune(config Config, auth Authentication) error { + if len(config.Properties) == 0 { + config.Properties = Table{ + "product": defaultProduct, + "version": defaultVersion, + } + } + + config.Properties["capabilities"] = Table{ + "connection.blocked": true, + "consumer_cancel_notify": true, + } + + ok := &connectionStartOk{ + ClientProperties: config.Properties, + Mechanism: auth.Mechanism(), + Response: auth.Response(), + Locale: config.Locale, + } + tune := &connectionTune{} + + if err := c.call(ok, tune); err != nil { + // per spec, a connection can only be closed when it has been opened + // so at this point, we know it's an auth error, but the socket + // was closed instead. Return a meaningful error. + return ErrCredentials + } + + // When the server and client both use default 0, then the max channel is + // only limited by uint16. + c.Config.ChannelMax = pick(config.ChannelMax, int(tune.ChannelMax)) + if c.Config.ChannelMax == 0 { + c.Config.ChannelMax = defaultChannelMax + } + c.Config.ChannelMax = min(c.Config.ChannelMax, maxChannelMax) + + // Frame size includes headers and end byte (len(payload)+8), even if + // this is less than FrameMinSize, use what the server sends because the + // alternative is to stop the handshake here. + c.Config.FrameSize = pick(config.FrameSize, int(tune.FrameMax)) + + // Save this off for resetDeadline() + c.Config.Heartbeat = time.Second * time.Duration(pick( + int(config.Heartbeat/time.Second), + int(tune.Heartbeat))) + + // "The client should start sending heartbeats after receiving a + // Connection.Tune method" + go c.heartbeater(c.Config.Heartbeat, c.NotifyClose(make(chan *Error, 1))) + + if err := c.send(&methodFrame{ + ChannelId: 0, + Method: &connectionTuneOk{ + ChannelMax: uint16(c.Config.ChannelMax), + FrameMax: uint32(c.Config.FrameSize), + Heartbeat: uint16(c.Config.Heartbeat / time.Second), + }, + }); err != nil { + return err + } + + return c.openVhost(config) +} + +func (c *Connection) openVhost(config Config) error { + req := &connectionOpen{VirtualHost: config.Vhost} + res := &connectionOpenOk{} + + if err := c.call(req, res); err != nil { + // Cannot be closed yet, but we know it's a vhost problem + return ErrVhost + } + + c.Config.Vhost = config.Vhost + + return c.openComplete() +} + +// openComplete performs any final Connection initialization dependent on the +// connection handshake and clears any state needed for TLS and AMQP handshaking. +func (c *Connection) openComplete() error { + // We clear the deadlines and let the heartbeater reset the read deadline if requested. + // RabbitMQ uses TCP flow control at this point for pushback so Writes can + // intentionally block. + if deadliner, ok := c.conn.(interface { + SetDeadline(time.Time) error + }); ok { + _ = deadliner.SetDeadline(time.Time{}) + } + + c.allocator = newAllocator(1, c.Config.ChannelMax) + return nil +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func pick(client, server int) int { + if client == 0 || server == 0 { + return max(client, server) + } + return min(client, server) +} diff --git a/vendor/github.com/streadway/amqp/consumers.go b/vendor/github.com/streadway/amqp/consumers.go new file mode 100644 index 000000000..887ac7494 --- /dev/null +++ b/vendor/github.com/streadway/amqp/consumers.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "os" + "strconv" + "sync" + "sync/atomic" +) + +var consumerSeq uint64 + +const consumerTagLengthMax = 0xFF // see writeShortstr + +func uniqueConsumerTag() string { + return commandNameBasedUniqueConsumerTag(os.Args[0]) +} + +func commandNameBasedUniqueConsumerTag(commandName string) string { + tagPrefix := "ctag-" + tagInfix := commandName + tagSuffix := "-" + strconv.FormatUint(atomic.AddUint64(&consumerSeq, 1), 10) + + if len(tagPrefix)+len(tagInfix)+len(tagSuffix) > consumerTagLengthMax { + tagInfix = "streadway/amqp" + } + + return tagPrefix + tagInfix + tagSuffix +} + +type consumerBuffers map[string]chan *Delivery + +// Concurrent type that manages the consumerTag -> +// ingress consumerBuffer mapping +type consumers struct { + sync.WaitGroup // one for buffer + closed chan struct{} // signal buffer + + sync.Mutex // protects below + chans consumerBuffers +} + +func makeConsumers() *consumers { + return &consumers{ + closed: make(chan struct{}), + chans: make(consumerBuffers), + } +} + +func (subs *consumers) buffer(in chan *Delivery, out chan Delivery) { + defer close(out) + defer subs.Done() + + var inflight = in + var queue []*Delivery + + for delivery := range in { + queue = append(queue, delivery) + + for len(queue) > 0 { + select { + case <-subs.closed: + // closed before drained, drop in-flight + return + + case delivery, consuming := <-inflight: + if consuming { + queue = append(queue, delivery) + } else { + inflight = nil + } + + case out <- *queue[0]: + queue = queue[1:] + } + } + } +} + +// On key conflict, close the previous channel. +func (subs *consumers) add(tag string, consumer chan Delivery) { + subs.Lock() + defer subs.Unlock() + + if prev, found := subs.chans[tag]; found { + close(prev) + } + + in := make(chan *Delivery) + subs.chans[tag] = in + + subs.Add(1) + go subs.buffer(in, consumer) +} + +func (subs *consumers) cancel(tag string) (found bool) { + subs.Lock() + defer subs.Unlock() + + ch, found := subs.chans[tag] + + if found { + delete(subs.chans, tag) + close(ch) + } + + return found +} + +func (subs *consumers) close() { + subs.Lock() + defer subs.Unlock() + + close(subs.closed) + + for tag, ch := range subs.chans { + delete(subs.chans, tag) + close(ch) + } + + subs.Wait() +} + +// Sends a delivery to a the consumer identified by `tag`. +// If unbuffered channels are used for Consume this method +// could block all deliveries until the consumer +// receives on the other end of the channel. +func (subs *consumers) send(tag string, msg *Delivery) bool { + subs.Lock() + defer subs.Unlock() + + buffer, found := subs.chans[tag] + if found { + buffer <- msg + } + + return found +} diff --git a/vendor/github.com/streadway/amqp/delivery.go b/vendor/github.com/streadway/amqp/delivery.go new file mode 100644 index 000000000..724126442 --- /dev/null +++ b/vendor/github.com/streadway/amqp/delivery.go @@ -0,0 +1,173 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "time" +) + +var errDeliveryNotInitialized = errors.New("delivery not initialized") + +// Acknowledger notifies the server of successful or failed consumption of +// delivieries via identifier found in the Delivery.DeliveryTag field. +// +// Applications can provide mock implementations in tests of Delivery handlers. +type Acknowledger interface { + Ack(tag uint64, multiple bool) error + Nack(tag uint64, multiple bool, requeue bool) error + Reject(tag uint64, requeue bool) error +} + +// Delivery captures the fields for a previously delivered message resident in +// a queue to be delivered by the server to a consumer from Channel.Consume or +// Channel.Get. +type Delivery struct { + Acknowledger Acknowledger // the channel from which this delivery arrived + + Headers Table // Application or header exchange table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // queue implementation use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user - should be authenticated user + AppId string // application use - creating application id + + // Valid only with Channel.Consume + ConsumerTag string + + // Valid only with Channel.Get + MessageCount uint32 + + DeliveryTag uint64 + Redelivered bool + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + Body []byte +} + +func newDelivery(channel *Channel, msg messageWithContent) *Delivery { + props, body := msg.getContent() + + delivery := Delivery{ + Acknowledger: channel, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } + + // Properties for the delivery types + switch m := msg.(type) { + case *basicDeliver: + delivery.ConsumerTag = m.ConsumerTag + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + + case *basicGetOk: + delivery.MessageCount = m.MessageCount + delivery.DeliveryTag = m.DeliveryTag + delivery.Redelivered = m.Redelivered + delivery.Exchange = m.Exchange + delivery.RoutingKey = m.RoutingKey + } + + return &delivery +} + +/* +Ack delegates an acknowledgement through the Acknowledger interface that the +client or server has finished work on a delivery. + +All deliveries in AMQP must be acknowledged. If you called Channel.Consume +with autoAck true then the server will be automatically ack each message and +this method should not be called. Otherwise, you must call Delivery.Ack after +you have successfully processed this delivery. + +When multiple is true, this delivery and all prior unacknowledged deliveries +on the same channel will be acknowledged. This is useful for batch processing +of deliveries. + +An error will indicate that the acknowledge could not be delivered to the +channel it was sent from. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Ack(multiple bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Ack(d.DeliveryTag, multiple) +} + +/* +Reject delegates a negatively acknowledgement through the Acknowledger interface. + +When requeue is true, queue this message to be delivered to a consumer on a +different channel. When requeue is false or the server is unable to queue this +message, it will be dropped. + +If you are batch processing deliveries, and your server supports it, prefer +Delivery.Nack. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Reject(requeue bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Reject(d.DeliveryTag, requeue) +} + +/* +Nack negatively acknowledge the delivery of message(s) identified by the +delivery tag from either the client or server. + +When multiple is true, nack messages up to and including delivered messages up +until the delivery tag delivered on the same channel. + +When requeue is true, request the server to deliver this message to a different +consumer. If it is not possible or requeue is false, the message will be +dropped or delivered to a server configured dead-letter queue. + +This method must not be used to select or requeue messages the client wishes +not to handle, rather it is to inform the server that the client is incapable +of handling this message at this time. + +Either Delivery.Ack, Delivery.Reject or Delivery.Nack must be called for every +delivery that is not automatically acknowledged. +*/ +func (d Delivery) Nack(multiple, requeue bool) error { + if d.Acknowledger == nil { + return errDeliveryNotInitialized + } + return d.Acknowledger.Nack(d.DeliveryTag, multiple, requeue) +} diff --git a/vendor/github.com/streadway/amqp/doc.go b/vendor/github.com/streadway/amqp/doc.go new file mode 100644 index 000000000..ee69c5b38 --- /dev/null +++ b/vendor/github.com/streadway/amqp/doc.go @@ -0,0 +1,108 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* +Package amqp is an AMQP 0.9.1 client with RabbitMQ extensions + +Understand the AMQP 0.9.1 messaging model by reviewing these links first. Much +of the terminology in this library directly relates to AMQP concepts. + + Resources + + http://www.rabbitmq.com/tutorials/amqp-concepts.html + http://www.rabbitmq.com/getstarted.html + http://www.rabbitmq.com/amqp-0-9-1-reference.html + +Design + +Most other broker clients publish to queues, but in AMQP, clients publish +Exchanges instead. AMQP is programmable, meaning that both the producers and +consumers agree on the configuration of the broker, instead of requiring an +operator or system configuration that declares the logical topology in the +broker. The routing between producers and consumer queues is via Bindings. +These bindings form the logical topology of the broker. + +In this library, a message sent from publisher is called a "Publishing" and a +message received to a consumer is called a "Delivery". The fields of +Publishings and Deliveries are close but not exact mappings to the underlying +wire format to maintain stronger types. Many other libraries will combine +message properties with message headers. In this library, the message well +known properties are strongly typed fields on the Publishings and Deliveries, +whereas the user defined headers are in the Headers field. + +The method naming closely matches the protocol's method name with positional +parameters mapping to named protocol message fields. The motivation here is to +present a comprehensive view over all possible interactions with the server. + +Generally, methods that map to protocol methods of the "basic" class will be +elided in this interface, and "select" methods of various channel mode selectors +will be elided for example Channel.Confirm and Channel.Tx. + +The library is intentionally designed to be synchronous, where responses for +each protocol message are required to be received in an RPC manner. Some +methods have a noWait parameter like Channel.QueueDeclare, and some methods are +asynchronous like Channel.Publish. The error values should still be checked for +these methods as they will indicate IO failures like when the underlying +connection closes. + +Asynchronous Events + +Clients of this library may be interested in receiving some of the protocol +messages other than Deliveries like basic.ack methods while a channel is in +confirm mode. + +The Notify* methods with Connection and Channel receivers model the pattern of +asynchronous events like closes due to exceptions, or messages that are sent out +of band from an RPC call like basic.ack or basic.flow. + +Any asynchronous events, including Deliveries and Publishings must always have +a receiver until the corresponding chans are closed. Without asynchronous +receivers, the sychronous methods will block. + +Use Case + +It's important as a client to an AMQP topology to ensure the state of the +broker matches your expectations. For both publish and consume use cases, +make sure you declare the queues, exchanges and bindings you expect to exist +prior to calling Channel.Publish or Channel.Consume. + + // Connections start with amqp.Dial() typically from a command line argument + // or environment variable. + connection, err := amqp.Dial(os.Getenv("AMQP_URL")) + + // To cleanly shutdown by flushing kernel buffers, make sure to close and + // wait for the response. + defer connection.Close() + + // Most operations happen on a channel. If any error is returned on a + // channel, the channel will no longer be valid, throw it away and try with + // a different channel. If you use many channels, it's useful for the + // server to + channel, err := connection.Channel() + + // Declare your topology here, if it doesn't exist, it will be created, if + // it existed already and is not what you expect, then that's considered an + // error. + + // Use your connection on this topology with either Publish or Consume, or + // inspect your queues with QueueInspect. It's unwise to mix Publish and + // Consume to let TCP do its job well. + +SSL/TLS - Secure connections + +When Dial encounters an amqps:// scheme, it will use the zero value of a +tls.Config. This will only perform server certificate and host verification. + +Use DialTLS when you wish to provide a client certificate (recommended), +include a private certificate authority's certificate in the cert chain for +server validity, or run insecure by not verifying the server certificate dial +your own connection. DialTLS will use the provided tls.Config when it +encounters an amqps:// scheme and will dial a plain connection when it +encounters an amqp:// scheme. + +SSL/TLS in RabbitMQ is documented here: http://www.rabbitmq.com/ssl.html + +*/ +package amqp diff --git a/vendor/github.com/streadway/amqp/fuzz.go b/vendor/github.com/streadway/amqp/fuzz.go new file mode 100644 index 000000000..16e626ce7 --- /dev/null +++ b/vendor/github.com/streadway/amqp/fuzz.go @@ -0,0 +1,17 @@ +// +build gofuzz + +package amqp + +import "bytes" + +func Fuzz(data []byte) int { + r := reader{bytes.NewReader(data)} + frame, err := r.ReadFrame() + if err != nil { + if frame != nil { + panic("frame is not nil") + } + return 0 + } + return 1 +} diff --git a/vendor/github.com/streadway/amqp/gen.sh b/vendor/github.com/streadway/amqp/gen.sh new file mode 100644 index 000000000..d46e19bd8 --- /dev/null +++ b/vendor/github.com/streadway/amqp/gen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +go run spec/gen.go < spec/amqp0-9-1.stripped.extended.xml | gofmt > spec091.go diff --git a/vendor/github.com/streadway/amqp/pre-commit b/vendor/github.com/streadway/amqp/pre-commit new file mode 100644 index 000000000..371553007 --- /dev/null +++ b/vendor/github.com/streadway/amqp/pre-commit @@ -0,0 +1,67 @@ +#!/bin/sh + +LATEST_STABLE_SUPPORTED_GO_VERSION="1.11" + +main() { + if local_go_version_is_latest_stable + then + run_gofmt + run_golint + run_govet + fi + run_unit_tests +} + +local_go_version_is_latest_stable() { + go version | grep -q $LATEST_STABLE_SUPPORTED_GO_VERSION +} + +log_error() { + echo "$*" 1>&2 +} + +run_gofmt() { + GOFMT_FILES=$(gofmt -l .) + if [ -n "$GOFMT_FILES" ] + then + log_error "gofmt failed for the following files: +$GOFMT_FILES + +please run 'gofmt -w .' on your changes before committing." + exit 1 + fi +} + +run_golint() { + GOLINT_ERRORS=$(golint ./... | grep -v "Id should be") + if [ -n "$GOLINT_ERRORS" ] + then + log_error "golint failed for the following reasons: +$GOLINT_ERRORS + +please run 'golint ./...' on your changes before committing." + exit 1 + fi +} + +run_govet() { + GOVET_ERRORS=$(go tool vet ./*.go 2>&1) + if [ -n "$GOVET_ERRORS" ] + then + log_error "go vet failed for the following reasons: +$GOVET_ERRORS + +please run 'go tool vet ./*.go' on your changes before committing." + exit 1 + fi +} + +run_unit_tests() { + if [ -z "$NOTEST" ] + then + log_error 'Running short tests...' + env AMQP_URL= go test -short + fi +} + +main diff --git a/vendor/github.com/streadway/amqp/read.go b/vendor/github.com/streadway/amqp/read.go new file mode 100644 index 000000000..3aa0b3381 --- /dev/null +++ b/vendor/github.com/streadway/amqp/read.go @@ -0,0 +1,456 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bytes" + "encoding/binary" + "errors" + "io" + "time" +) + +/* +Reads a frame from an input stream and returns an interface that can be cast into +one of the following: + + methodFrame + PropertiesFrame + bodyFrame + heartbeatFrame + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a +'frame-end' octet that detects malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or + +“gathering reads” to avoid doing three separate system calls to read a frame. +*/ +func (r *reader) ReadFrame() (frame frame, err error) { + var scratch [7]byte + + if _, err = io.ReadFull(r.r, scratch[:7]); err != nil { + return + } + + typ := uint8(scratch[0]) + channel := binary.BigEndian.Uint16(scratch[1:3]) + size := binary.BigEndian.Uint32(scratch[3:7]) + + switch typ { + case frameMethod: + if frame, err = r.parseMethodFrame(channel, size); err != nil { + return + } + + case frameHeader: + if frame, err = r.parseHeaderFrame(channel, size); err != nil { + return + } + + case frameBody: + if frame, err = r.parseBodyFrame(channel, size); err != nil { + return nil, err + } + + case frameHeartbeat: + if frame, err = r.parseHeartbeatFrame(channel, size); err != nil { + return + } + + default: + return nil, ErrFrame + } + + if _, err = io.ReadFull(r.r, scratch[:1]); err != nil { + return nil, err + } + + if scratch[0] != frameEnd { + return nil, ErrFrame + } + + return +} + +func readShortstr(r io.Reader) (v string, err error) { + var length uint8 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readLongstr(r io.Reader) (v string, err error) { + var length uint32 + if err = binary.Read(r, binary.BigEndian, &length); err != nil { + return + } + + // slices can't be longer than max int32 value + if length > (^uint32(0) >> 1) { + return + } + + bytes := make([]byte, length) + if _, err = io.ReadFull(r, bytes); err != nil { + return + } + return string(bytes), nil +} + +func readDecimal(r io.Reader) (v Decimal, err error) { + if err = binary.Read(r, binary.BigEndian, &v.Scale); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &v.Value); err != nil { + return + } + return +} + +func readFloat32(r io.Reader) (v float32, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readFloat64(r io.Reader) (v float64, err error) { + if err = binary.Read(r, binary.BigEndian, &v); err != nil { + return + } + return +} + +func readTimestamp(r io.Reader) (v time.Time, err error) { + var sec int64 + if err = binary.Read(r, binary.BigEndian, &sec); err != nil { + return + } + return time.Unix(sec, 0), nil +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func readField(r io.Reader) (v interface{}, err error) { + var typ byte + if err = binary.Read(r, binary.BigEndian, &typ); err != nil { + return + } + + switch typ { + case 't': + var value uint8 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return (value != 0), nil + + case 'b': + var value [1]byte + if _, err = io.ReadFull(r, value[0:1]); err != nil { + return + } + return value[0], nil + + case 's': + var value int16 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'I': + var value int32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'l': + var value int64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'f': + var value float32 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'd': + var value float64 + if err = binary.Read(r, binary.BigEndian, &value); err != nil { + return + } + return value, nil + + case 'D': + return readDecimal(r) + + case 'S': + return readLongstr(r) + + case 'A': + return readArray(r) + + case 'T': + return readTimestamp(r) + + case 'F': + return readTable(r) + + case 'x': + var len int32 + if err = binary.Read(r, binary.BigEndian, &len); err != nil { + return nil, err + } + + value := make([]byte, len) + if _, err = io.ReadFull(r, value); err != nil { + return nil, err + } + return value, err + + case 'V': + return nil, nil + } + + return nil, ErrSyntax +} + +/* + Field tables are long strings that contain packed name-value pairs. The + name-value pairs are encoded as short string defining the name, and octet + defining the values type and then the value itself. The valid field types for + tables are an extension of the native integer, bit, string, and timestamp + types, and are shown in the grammar. Multi-octet integer fields are always + held in network byte order. +*/ +func readTable(r io.Reader) (table Table, err error) { + var nested bytes.Buffer + var str string + + if str, err = readLongstr(r); err != nil { + return + } + + nested.Write([]byte(str)) + + table = make(Table) + + for nested.Len() > 0 { + var key string + var value interface{} + + if key, err = readShortstr(&nested); err != nil { + return + } + + if value, err = readField(&nested); err != nil { + return + } + + table[key] = value + } + + return +} + +func readArray(r io.Reader) ([]interface{}, error) { + var ( + size uint32 + err error + ) + + if err = binary.Read(r, binary.BigEndian, &size); err != nil { + return nil, err + } + + var ( + lim = &io.LimitedReader{R: r, N: int64(size)} + arr = []interface{}{} + field interface{} + ) + + for { + if field, err = readField(lim); err != nil { + if err == io.EOF { + break + } + return nil, err + } + arr = append(arr, field) + } + + return arr, nil +} + +// Checks if this bit mask matches the flags bitset +func hasProperty(mask uint16, prop int) bool { + return int(mask)&prop > 0 +} + +func (r *reader) parseHeaderFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &headerFrame{ + ChannelId: channel, + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.ClassId); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.weight); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &hf.Size); err != nil { + return + } + + var flags uint16 + + if err = binary.Read(r.r, binary.BigEndian, &flags); err != nil { + return + } + + if hasProperty(flags, flagContentType) { + if hf.Properties.ContentType, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagContentEncoding) { + if hf.Properties.ContentEncoding, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagHeaders) { + if hf.Properties.Headers, err = readTable(r.r); err != nil { + return + } + } + if hasProperty(flags, flagDeliveryMode) { + if err = binary.Read(r.r, binary.BigEndian, &hf.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(flags, flagPriority) { + if err = binary.Read(r.r, binary.BigEndian, &hf.Properties.Priority); err != nil { + return + } + } + if hasProperty(flags, flagCorrelationId) { + if hf.Properties.CorrelationId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagReplyTo) { + if hf.Properties.ReplyTo, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagExpiration) { + if hf.Properties.Expiration, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagMessageId) { + if hf.Properties.MessageId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagTimestamp) { + if hf.Properties.Timestamp, err = readTimestamp(r.r); err != nil { + return + } + } + if hasProperty(flags, flagType) { + if hf.Properties.Type, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagUserId) { + if hf.Properties.UserId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagAppId) { + if hf.Properties.AppId, err = readShortstr(r.r); err != nil { + return + } + } + if hasProperty(flags, flagReserved1) { + if hf.Properties.reserved1, err = readShortstr(r.r); err != nil { + return + } + } + + return hf, nil +} + +func (r *reader) parseBodyFrame(channel uint16, size uint32) (frame frame, err error) { + bf := &bodyFrame{ + ChannelId: channel, + Body: make([]byte, size), + } + + if _, err = io.ReadFull(r.r, bf.Body); err != nil { + return nil, err + } + + return bf, nil +} + +var errHeartbeatPayload = errors.New("Heartbeats should not have a payload") + +func (r *reader) parseHeartbeatFrame(channel uint16, size uint32) (frame frame, err error) { + hf := &heartbeatFrame{ + ChannelId: channel, + } + + if size > 0 { + return nil, errHeartbeatPayload + } + + return hf, nil +} diff --git a/vendor/github.com/streadway/amqp/return.go b/vendor/github.com/streadway/amqp/return.go new file mode 100644 index 000000000..10dcedb2c --- /dev/null +++ b/vendor/github.com/streadway/amqp/return.go @@ -0,0 +1,64 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "time" +) + +// Return captures a flattened struct of fields returned by the server when a +// Publishing is unable to be delivered either due to the `mandatory` flag set +// and no route found, or `immediate` flag set and no free consumer. +type Return struct { + ReplyCode uint16 // reason + ReplyText string // description + Exchange string // basic.publish exchange + RoutingKey string // basic.publish routing key + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implementation use - non-persistent (1) or persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + + Body []byte +} + +func newReturn(msg basicReturn) *Return { + props, body := msg.getContent() + + return &Return{ + ReplyCode: msg.ReplyCode, + ReplyText: msg.ReplyText, + Exchange: msg.Exchange, + RoutingKey: msg.RoutingKey, + + Headers: props.Headers, + ContentType: props.ContentType, + ContentEncoding: props.ContentEncoding, + DeliveryMode: props.DeliveryMode, + Priority: props.Priority, + CorrelationId: props.CorrelationId, + ReplyTo: props.ReplyTo, + Expiration: props.Expiration, + MessageId: props.MessageId, + Timestamp: props.Timestamp, + Type: props.Type, + UserId: props.UserId, + AppId: props.AppId, + + Body: body, + } +} diff --git a/vendor/github.com/streadway/amqp/spec091.go b/vendor/github.com/streadway/amqp/spec091.go new file mode 100644 index 000000000..cd53ebe74 --- /dev/null +++ b/vendor/github.com/streadway/amqp/spec091.go @@ -0,0 +1,3306 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +/* GENERATED FILE - DO NOT EDIT */ +/* Rebuild from the spec/gen.go tool */ + +package amqp + +import ( + "encoding/binary" + "fmt" + "io" +) + +// Error codes that can be sent from the server during a connection or +// channel exception or used by the client to indicate a class of error like +// ErrCredentials. The text of the error is likely more interesting than +// these constants. +const ( + frameMethod = 1 + frameHeader = 2 + frameBody = 3 + frameHeartbeat = 8 + frameMinSize = 4096 + frameEnd = 206 + replySuccess = 200 + ContentTooLarge = 311 + NoRoute = 312 + NoConsumers = 313 + ConnectionForced = 320 + InvalidPath = 402 + AccessRefused = 403 + NotFound = 404 + ResourceLocked = 405 + PreconditionFailed = 406 + FrameError = 501 + SyntaxError = 502 + CommandInvalid = 503 + ChannelError = 504 + UnexpectedFrame = 505 + ResourceError = 506 + NotAllowed = 530 + NotImplemented = 540 + InternalError = 541 +) + +func isSoftExceptionCode(code int) bool { + switch code { + case 311: + return true + case 312: + return true + case 313: + return true + case 403: + return true + case 404: + return true + case 405: + return true + case 406: + return true + + } + return false +} + +type connectionStart struct { + VersionMajor byte + VersionMinor byte + ServerProperties Table + Mechanisms string + Locales string +} + +func (msg *connectionStart) id() (uint16, uint16) { + return 10, 10 +} + +func (msg *connectionStart) wait() bool { + return true +} + +func (msg *connectionStart) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.VersionMajor); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.VersionMinor); err != nil { + return + } + + if err = writeTable(w, msg.ServerProperties); err != nil { + return + } + + if err = writeLongstr(w, msg.Mechanisms); err != nil { + return + } + if err = writeLongstr(w, msg.Locales); err != nil { + return + } + + return +} + +func (msg *connectionStart) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.VersionMajor); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.VersionMinor); err != nil { + return + } + + if msg.ServerProperties, err = readTable(r); err != nil { + return + } + + if msg.Mechanisms, err = readLongstr(r); err != nil { + return + } + if msg.Locales, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionStartOk struct { + ClientProperties Table + Mechanism string + Response string + Locale string +} + +func (msg *connectionStartOk) id() (uint16, uint16) { + return 10, 11 +} + +func (msg *connectionStartOk) wait() bool { + return true +} + +func (msg *connectionStartOk) write(w io.Writer) (err error) { + + if err = writeTable(w, msg.ClientProperties); err != nil { + return + } + + if err = writeShortstr(w, msg.Mechanism); err != nil { + return + } + + if err = writeLongstr(w, msg.Response); err != nil { + return + } + + if err = writeShortstr(w, msg.Locale); err != nil { + return + } + + return +} + +func (msg *connectionStartOk) read(r io.Reader) (err error) { + + if msg.ClientProperties, err = readTable(r); err != nil { + return + } + + if msg.Mechanism, err = readShortstr(r); err != nil { + return + } + + if msg.Response, err = readLongstr(r); err != nil { + return + } + + if msg.Locale, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionSecure struct { + Challenge string +} + +func (msg *connectionSecure) id() (uint16, uint16) { + return 10, 20 +} + +func (msg *connectionSecure) wait() bool { + return true +} + +func (msg *connectionSecure) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.Challenge); err != nil { + return + } + + return +} + +func (msg *connectionSecure) read(r io.Reader) (err error) { + + if msg.Challenge, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionSecureOk struct { + Response string +} + +func (msg *connectionSecureOk) id() (uint16, uint16) { + return 10, 21 +} + +func (msg *connectionSecureOk) wait() bool { + return true +} + +func (msg *connectionSecureOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.Response); err != nil { + return + } + + return +} + +func (msg *connectionSecureOk) read(r io.Reader) (err error) { + + if msg.Response, err = readLongstr(r); err != nil { + return + } + + return +} + +type connectionTune struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (msg *connectionTune) id() (uint16, uint16) { + return 10, 30 +} + +func (msg *connectionTune) wait() bool { + return true +} + +func (msg *connectionTune) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.Heartbeat); err != nil { + return + } + + return +} + +func (msg *connectionTune) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.Heartbeat); err != nil { + return + } + + return +} + +type connectionTuneOk struct { + ChannelMax uint16 + FrameMax uint32 + Heartbeat uint16 +} + +func (msg *connectionTuneOk) id() (uint16, uint16) { + return 10, 31 +} + +func (msg *connectionTuneOk) wait() bool { + return true +} + +func (msg *connectionTuneOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ChannelMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.FrameMax); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.Heartbeat); err != nil { + return + } + + return +} + +func (msg *connectionTuneOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ChannelMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.FrameMax); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.Heartbeat); err != nil { + return + } + + return +} + +type connectionOpen struct { + VirtualHost string + reserved1 string + reserved2 bool +} + +func (msg *connectionOpen) id() (uint16, uint16) { + return 10, 40 +} + +func (msg *connectionOpen) wait() bool { + return true +} + +func (msg *connectionOpen) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.VirtualHost); err != nil { + return + } + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + if msg.reserved2 { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *connectionOpen) read(r io.Reader) (err error) { + var bits byte + + if msg.VirtualHost, err = readShortstr(r); err != nil { + return + } + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.reserved2 = (bits&(1<<0) > 0) + + return +} + +type connectionOpenOk struct { + reserved1 string +} + +func (msg *connectionOpenOk) id() (uint16, uint16) { + return 10, 41 +} + +func (msg *connectionOpenOk) wait() bool { + return true +} + +func (msg *connectionOpenOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *connectionOpenOk) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (msg *connectionClose) id() (uint16, uint16) { + return 10, 50 +} + +func (msg *connectionClose) wait() bool { + return true +} + +func (msg *connectionClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.MethodId); err != nil { + return + } + + return +} + +func (msg *connectionClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.MethodId); err != nil { + return + } + + return +} + +type connectionCloseOk struct { +} + +func (msg *connectionCloseOk) id() (uint16, uint16) { + return 10, 51 +} + +func (msg *connectionCloseOk) wait() bool { + return true +} + +func (msg *connectionCloseOk) write(w io.Writer) (err error) { + + return +} + +func (msg *connectionCloseOk) read(r io.Reader) (err error) { + + return +} + +type connectionBlocked struct { + Reason string +} + +func (msg *connectionBlocked) id() (uint16, uint16) { + return 10, 60 +} + +func (msg *connectionBlocked) wait() bool { + return false +} + +func (msg *connectionBlocked) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.Reason); err != nil { + return + } + + return +} + +func (msg *connectionBlocked) read(r io.Reader) (err error) { + + if msg.Reason, err = readShortstr(r); err != nil { + return + } + + return +} + +type connectionUnblocked struct { +} + +func (msg *connectionUnblocked) id() (uint16, uint16) { + return 10, 61 +} + +func (msg *connectionUnblocked) wait() bool { + return false +} + +func (msg *connectionUnblocked) write(w io.Writer) (err error) { + + return +} + +func (msg *connectionUnblocked) read(r io.Reader) (err error) { + + return +} + +type channelOpen struct { + reserved1 string +} + +func (msg *channelOpen) id() (uint16, uint16) { + return 20, 10 +} + +func (msg *channelOpen) wait() bool { + return true +} + +func (msg *channelOpen) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *channelOpen) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type channelOpenOk struct { + reserved1 string +} + +func (msg *channelOpenOk) id() (uint16, uint16) { + return 20, 11 +} + +func (msg *channelOpenOk) wait() bool { + return true +} + +func (msg *channelOpenOk) write(w io.Writer) (err error) { + + if err = writeLongstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *channelOpenOk) read(r io.Reader) (err error) { + + if msg.reserved1, err = readLongstr(r); err != nil { + return + } + + return +} + +type channelFlow struct { + Active bool +} + +func (msg *channelFlow) id() (uint16, uint16) { + return 20, 20 +} + +func (msg *channelFlow) wait() bool { + return true +} + +func (msg *channelFlow) write(w io.Writer) (err error) { + var bits byte + + if msg.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *channelFlow) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Active = (bits&(1<<0) > 0) + + return +} + +type channelFlowOk struct { + Active bool +} + +func (msg *channelFlowOk) id() (uint16, uint16) { + return 20, 21 +} + +func (msg *channelFlowOk) wait() bool { + return false +} + +func (msg *channelFlowOk) write(w io.Writer) (err error) { + var bits byte + + if msg.Active { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *channelFlowOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Active = (bits&(1<<0) > 0) + + return +} + +type channelClose struct { + ReplyCode uint16 + ReplyText string + ClassId uint16 + MethodId uint16 +} + +func (msg *channelClose) id() (uint16, uint16) { + return 20, 40 +} + +func (msg *channelClose) wait() bool { + return true +} + +func (msg *channelClose) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.ClassId); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.MethodId); err != nil { + return + } + + return +} + +func (msg *channelClose) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.ClassId); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.MethodId); err != nil { + return + } + + return +} + +type channelCloseOk struct { +} + +func (msg *channelCloseOk) id() (uint16, uint16) { + return 20, 41 +} + +func (msg *channelCloseOk) wait() bool { + return true +} + +func (msg *channelCloseOk) write(w io.Writer) (err error) { + + return +} + +func (msg *channelCloseOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDeclare struct { + reserved1 uint16 + Exchange string + Type string + Passive bool + Durable bool + AutoDelete bool + Internal bool + NoWait bool + Arguments Table +} + +func (msg *exchangeDeclare) id() (uint16, uint16) { + return 40, 10 +} + +func (msg *exchangeDeclare) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.Type); err != nil { + return + } + + if msg.Passive { + bits |= 1 << 0 + } + + if msg.Durable { + bits |= 1 << 1 + } + + if msg.AutoDelete { + bits |= 1 << 2 + } + + if msg.Internal { + bits |= 1 << 3 + } + + if msg.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.Type, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Passive = (bits&(1<<0) > 0) + msg.Durable = (bits&(1<<1) > 0) + msg.AutoDelete = (bits&(1<<2) > 0) + msg.Internal = (bits&(1<<3) > 0) + msg.NoWait = (bits&(1<<4) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeDeclareOk struct { +} + +func (msg *exchangeDeclareOk) id() (uint16, uint16) { + return 40, 11 +} + +func (msg *exchangeDeclareOk) wait() bool { + return true +} + +func (msg *exchangeDeclareOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeDeclareOk) read(r io.Reader) (err error) { + + return +} + +type exchangeDelete struct { + reserved1 uint16 + Exchange string + IfUnused bool + NoWait bool +} + +func (msg *exchangeDelete) id() (uint16, uint16) { + return 40, 20 +} + +func (msg *exchangeDelete) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + + if msg.IfUnused { + bits |= 1 << 0 + } + + if msg.NoWait { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *exchangeDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.IfUnused = (bits&(1<<0) > 0) + msg.NoWait = (bits&(1<<1) > 0) + + return +} + +type exchangeDeleteOk struct { +} + +func (msg *exchangeDeleteOk) id() (uint16, uint16) { + return 40, 21 +} + +func (msg *exchangeDeleteOk) wait() bool { + return true +} + +func (msg *exchangeDeleteOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeDeleteOk) read(r io.Reader) (err error) { + + return +} + +type exchangeBind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *exchangeBind) id() (uint16, uint16) { + return 40, 30 +} + +func (msg *exchangeBind) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Destination); err != nil { + return + } + if err = writeShortstr(w, msg.Source); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Destination, err = readShortstr(r); err != nil { + return + } + if msg.Source, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeBindOk struct { +} + +func (msg *exchangeBindOk) id() (uint16, uint16) { + return 40, 31 +} + +func (msg *exchangeBindOk) wait() bool { + return true +} + +func (msg *exchangeBindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeBindOk) read(r io.Reader) (err error) { + + return +} + +type exchangeUnbind struct { + reserved1 uint16 + Destination string + Source string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *exchangeUnbind) id() (uint16, uint16) { + return 40, 40 +} + +func (msg *exchangeUnbind) wait() bool { + return true && !msg.NoWait +} + +func (msg *exchangeUnbind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Destination); err != nil { + return + } + if err = writeShortstr(w, msg.Source); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *exchangeUnbind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Destination, err = readShortstr(r); err != nil { + return + } + if msg.Source, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type exchangeUnbindOk struct { +} + +func (msg *exchangeUnbindOk) id() (uint16, uint16) { + return 40, 51 +} + +func (msg *exchangeUnbindOk) wait() bool { + return true +} + +func (msg *exchangeUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *exchangeUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queueDeclare struct { + reserved1 uint16 + Queue string + Passive bool + Durable bool + Exclusive bool + AutoDelete bool + NoWait bool + Arguments Table +} + +func (msg *queueDeclare) id() (uint16, uint16) { + return 50, 10 +} + +func (msg *queueDeclare) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueDeclare) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.Passive { + bits |= 1 << 0 + } + + if msg.Durable { + bits |= 1 << 1 + } + + if msg.Exclusive { + bits |= 1 << 2 + } + + if msg.AutoDelete { + bits |= 1 << 3 + } + + if msg.NoWait { + bits |= 1 << 4 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueDeclare) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Passive = (bits&(1<<0) > 0) + msg.Durable = (bits&(1<<1) > 0) + msg.Exclusive = (bits&(1<<2) > 0) + msg.AutoDelete = (bits&(1<<3) > 0) + msg.NoWait = (bits&(1<<4) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueDeclareOk struct { + Queue string + MessageCount uint32 + ConsumerCount uint32 +} + +func (msg *queueDeclareOk) id() (uint16, uint16) { + return 50, 11 +} + +func (msg *queueDeclareOk) wait() bool { + return true +} + +func (msg *queueDeclareOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + if err = binary.Write(w, binary.BigEndian, msg.ConsumerCount); err != nil { + return + } + + return +} + +func (msg *queueDeclareOk) read(r io.Reader) (err error) { + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + if err = binary.Read(r, binary.BigEndian, &msg.ConsumerCount); err != nil { + return + } + + return +} + +type queueBind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + NoWait bool + Arguments Table +} + +func (msg *queueBind) id() (uint16, uint16) { + return 50, 20 +} + +func (msg *queueBind) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueBind) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueBind) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueBindOk struct { +} + +func (msg *queueBindOk) id() (uint16, uint16) { + return 50, 21 +} + +func (msg *queueBindOk) wait() bool { + return true +} + +func (msg *queueBindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *queueBindOk) read(r io.Reader) (err error) { + + return +} + +type queueUnbind struct { + reserved1 uint16 + Queue string + Exchange string + RoutingKey string + Arguments Table +} + +func (msg *queueUnbind) id() (uint16, uint16) { + return 50, 50 +} + +func (msg *queueUnbind) wait() bool { + return true +} + +func (msg *queueUnbind) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *queueUnbind) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type queueUnbindOk struct { +} + +func (msg *queueUnbindOk) id() (uint16, uint16) { + return 50, 51 +} + +func (msg *queueUnbindOk) wait() bool { + return true +} + +func (msg *queueUnbindOk) write(w io.Writer) (err error) { + + return +} + +func (msg *queueUnbindOk) read(r io.Reader) (err error) { + + return +} + +type queuePurge struct { + reserved1 uint16 + Queue string + NoWait bool +} + +func (msg *queuePurge) id() (uint16, uint16) { + return 50, 30 +} + +func (msg *queuePurge) wait() bool { + return true && !msg.NoWait +} + +func (msg *queuePurge) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *queuePurge) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + return +} + +type queuePurgeOk struct { + MessageCount uint32 +} + +func (msg *queuePurgeOk) id() (uint16, uint16) { + return 50, 31 +} + +func (msg *queuePurgeOk) wait() bool { + return true +} + +func (msg *queuePurgeOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *queuePurgeOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type queueDelete struct { + reserved1 uint16 + Queue string + IfUnused bool + IfEmpty bool + NoWait bool +} + +func (msg *queueDelete) id() (uint16, uint16) { + return 50, 40 +} + +func (msg *queueDelete) wait() bool { + return true && !msg.NoWait +} + +func (msg *queueDelete) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.IfUnused { + bits |= 1 << 0 + } + + if msg.IfEmpty { + bits |= 1 << 1 + } + + if msg.NoWait { + bits |= 1 << 2 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *queueDelete) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.IfUnused = (bits&(1<<0) > 0) + msg.IfEmpty = (bits&(1<<1) > 0) + msg.NoWait = (bits&(1<<2) > 0) + + return +} + +type queueDeleteOk struct { + MessageCount uint32 +} + +func (msg *queueDeleteOk) id() (uint16, uint16) { + return 50, 41 +} + +func (msg *queueDeleteOk) wait() bool { + return true +} + +func (msg *queueDeleteOk) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *queueDeleteOk) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type basicQos struct { + PrefetchSize uint32 + PrefetchCount uint16 + Global bool +} + +func (msg *basicQos) id() (uint16, uint16) { + return 60, 10 +} + +func (msg *basicQos) wait() bool { + return true +} + +func (msg *basicQos) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.PrefetchSize); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.PrefetchCount); err != nil { + return + } + + if msg.Global { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicQos) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.PrefetchSize); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.PrefetchCount); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Global = (bits&(1<<0) > 0) + + return +} + +type basicQosOk struct { +} + +func (msg *basicQosOk) id() (uint16, uint16) { + return 60, 11 +} + +func (msg *basicQosOk) wait() bool { + return true +} + +func (msg *basicQosOk) write(w io.Writer) (err error) { + + return +} + +func (msg *basicQosOk) read(r io.Reader) (err error) { + + return +} + +type basicConsume struct { + reserved1 uint16 + Queue string + ConsumerTag string + NoLocal bool + NoAck bool + Exclusive bool + NoWait bool + Arguments Table +} + +func (msg *basicConsume) id() (uint16, uint16) { + return 60, 20 +} + +func (msg *basicConsume) wait() bool { + return true && !msg.NoWait +} + +func (msg *basicConsume) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if msg.NoLocal { + bits |= 1 << 0 + } + + if msg.NoAck { + bits |= 1 << 1 + } + + if msg.Exclusive { + bits |= 1 << 2 + } + + if msg.NoWait { + bits |= 1 << 3 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeTable(w, msg.Arguments); err != nil { + return + } + + return +} + +func (msg *basicConsume) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoLocal = (bits&(1<<0) > 0) + msg.NoAck = (bits&(1<<1) > 0) + msg.Exclusive = (bits&(1<<2) > 0) + msg.NoWait = (bits&(1<<3) > 0) + + if msg.Arguments, err = readTable(r); err != nil { + return + } + + return +} + +type basicConsumeOk struct { + ConsumerTag string +} + +func (msg *basicConsumeOk) id() (uint16, uint16) { + return 60, 21 +} + +func (msg *basicConsumeOk) wait() bool { + return true +} + +func (msg *basicConsumeOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + return +} + +func (msg *basicConsumeOk) read(r io.Reader) (err error) { + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicCancel struct { + ConsumerTag string + NoWait bool +} + +func (msg *basicCancel) id() (uint16, uint16) { + return 60, 30 +} + +func (msg *basicCancel) wait() bool { + return true && !msg.NoWait +} + +func (msg *basicCancel) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if msg.NoWait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicCancel) read(r io.Reader) (err error) { + var bits byte + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoWait = (bits&(1<<0) > 0) + + return +} + +type basicCancelOk struct { + ConsumerTag string +} + +func (msg *basicCancelOk) id() (uint16, uint16) { + return 60, 31 +} + +func (msg *basicCancelOk) wait() bool { + return true +} + +func (msg *basicCancelOk) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + return +} + +func (msg *basicCancelOk) read(r io.Reader) (err error) { + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicPublish struct { + reserved1 uint16 + Exchange string + RoutingKey string + Mandatory bool + Immediate bool + Properties properties + Body []byte +} + +func (msg *basicPublish) id() (uint16, uint16) { + return 60, 40 +} + +func (msg *basicPublish) wait() bool { + return false +} + +func (msg *basicPublish) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicPublish) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicPublish) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if msg.Mandatory { + bits |= 1 << 0 + } + + if msg.Immediate { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicPublish) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Mandatory = (bits&(1<<0) > 0) + msg.Immediate = (bits&(1<<1) > 0) + + return +} + +type basicReturn struct { + ReplyCode uint16 + ReplyText string + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (msg *basicReturn) id() (uint16, uint16) { + return 60, 50 +} + +func (msg *basicReturn) wait() bool { + return false +} + +func (msg *basicReturn) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicReturn) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicReturn) write(w io.Writer) (err error) { + + if err = binary.Write(w, binary.BigEndian, msg.ReplyCode); err != nil { + return + } + + if err = writeShortstr(w, msg.ReplyText); err != nil { + return + } + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + return +} + +func (msg *basicReturn) read(r io.Reader) (err error) { + + if err = binary.Read(r, binary.BigEndian, &msg.ReplyCode); err != nil { + return + } + + if msg.ReplyText, err = readShortstr(r); err != nil { + return + } + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicDeliver struct { + ConsumerTag string + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + Properties properties + Body []byte +} + +func (msg *basicDeliver) id() (uint16, uint16) { + return 60, 60 +} + +func (msg *basicDeliver) wait() bool { + return false +} + +func (msg *basicDeliver) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicDeliver) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicDeliver) write(w io.Writer) (err error) { + var bits byte + + if err = writeShortstr(w, msg.ConsumerTag); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + return +} + +func (msg *basicDeliver) read(r io.Reader) (err error) { + var bits byte + + if msg.ConsumerTag, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Redelivered = (bits&(1<<0) > 0) + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicGet struct { + reserved1 uint16 + Queue string + NoAck bool +} + +func (msg *basicGet) id() (uint16, uint16) { + return 60, 70 +} + +func (msg *basicGet) wait() bool { + return true +} + +func (msg *basicGet) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.reserved1); err != nil { + return + } + + if err = writeShortstr(w, msg.Queue); err != nil { + return + } + + if msg.NoAck { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicGet) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.reserved1); err != nil { + return + } + + if msg.Queue, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.NoAck = (bits&(1<<0) > 0) + + return +} + +type basicGetOk struct { + DeliveryTag uint64 + Redelivered bool + Exchange string + RoutingKey string + MessageCount uint32 + Properties properties + Body []byte +} + +func (msg *basicGetOk) id() (uint16, uint16) { + return 60, 71 +} + +func (msg *basicGetOk) wait() bool { + return true +} + +func (msg *basicGetOk) getContent() (properties, []byte) { + return msg.Properties, msg.Body +} + +func (msg *basicGetOk) setContent(props properties, body []byte) { + msg.Properties, msg.Body = props, body +} + +func (msg *basicGetOk) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Redelivered { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + if err = writeShortstr(w, msg.Exchange); err != nil { + return + } + if err = writeShortstr(w, msg.RoutingKey); err != nil { + return + } + + if err = binary.Write(w, binary.BigEndian, msg.MessageCount); err != nil { + return + } + + return +} + +func (msg *basicGetOk) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Redelivered = (bits&(1<<0) > 0) + + if msg.Exchange, err = readShortstr(r); err != nil { + return + } + if msg.RoutingKey, err = readShortstr(r); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &msg.MessageCount); err != nil { + return + } + + return +} + +type basicGetEmpty struct { + reserved1 string +} + +func (msg *basicGetEmpty) id() (uint16, uint16) { + return 60, 72 +} + +func (msg *basicGetEmpty) wait() bool { + return true +} + +func (msg *basicGetEmpty) write(w io.Writer) (err error) { + + if err = writeShortstr(w, msg.reserved1); err != nil { + return + } + + return +} + +func (msg *basicGetEmpty) read(r io.Reader) (err error) { + + if msg.reserved1, err = readShortstr(r); err != nil { + return + } + + return +} + +type basicAck struct { + DeliveryTag uint64 + Multiple bool +} + +func (msg *basicAck) id() (uint16, uint16) { + return 60, 80 +} + +func (msg *basicAck) wait() bool { + return false +} + +func (msg *basicAck) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Multiple { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicAck) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Multiple = (bits&(1<<0) > 0) + + return +} + +type basicReject struct { + DeliveryTag uint64 + Requeue bool +} + +func (msg *basicReject) id() (uint16, uint16) { + return 60, 90 +} + +func (msg *basicReject) wait() bool { + return false +} + +func (msg *basicReject) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicReject) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverAsync struct { + Requeue bool +} + +func (msg *basicRecoverAsync) id() (uint16, uint16) { + return 60, 100 +} + +func (msg *basicRecoverAsync) wait() bool { + return false +} + +func (msg *basicRecoverAsync) write(w io.Writer) (err error) { + var bits byte + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicRecoverAsync) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecover struct { + Requeue bool +} + +func (msg *basicRecover) id() (uint16, uint16) { + return 60, 110 +} + +func (msg *basicRecover) wait() bool { + return true +} + +func (msg *basicRecover) write(w io.Writer) (err error) { + var bits byte + + if msg.Requeue { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicRecover) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Requeue = (bits&(1<<0) > 0) + + return +} + +type basicRecoverOk struct { +} + +func (msg *basicRecoverOk) id() (uint16, uint16) { + return 60, 111 +} + +func (msg *basicRecoverOk) wait() bool { + return true +} + +func (msg *basicRecoverOk) write(w io.Writer) (err error) { + + return +} + +func (msg *basicRecoverOk) read(r io.Reader) (err error) { + + return +} + +type basicNack struct { + DeliveryTag uint64 + Multiple bool + Requeue bool +} + +func (msg *basicNack) id() (uint16, uint16) { + return 60, 120 +} + +func (msg *basicNack) wait() bool { + return false +} + +func (msg *basicNack) write(w io.Writer) (err error) { + var bits byte + + if err = binary.Write(w, binary.BigEndian, msg.DeliveryTag); err != nil { + return + } + + if msg.Multiple { + bits |= 1 << 0 + } + + if msg.Requeue { + bits |= 1 << 1 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *basicNack) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &msg.DeliveryTag); err != nil { + return + } + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Multiple = (bits&(1<<0) > 0) + msg.Requeue = (bits&(1<<1) > 0) + + return +} + +type txSelect struct { +} + +func (msg *txSelect) id() (uint16, uint16) { + return 90, 10 +} + +func (msg *txSelect) wait() bool { + return true +} + +func (msg *txSelect) write(w io.Writer) (err error) { + + return +} + +func (msg *txSelect) read(r io.Reader) (err error) { + + return +} + +type txSelectOk struct { +} + +func (msg *txSelectOk) id() (uint16, uint16) { + return 90, 11 +} + +func (msg *txSelectOk) wait() bool { + return true +} + +func (msg *txSelectOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txSelectOk) read(r io.Reader) (err error) { + + return +} + +type txCommit struct { +} + +func (msg *txCommit) id() (uint16, uint16) { + return 90, 20 +} + +func (msg *txCommit) wait() bool { + return true +} + +func (msg *txCommit) write(w io.Writer) (err error) { + + return +} + +func (msg *txCommit) read(r io.Reader) (err error) { + + return +} + +type txCommitOk struct { +} + +func (msg *txCommitOk) id() (uint16, uint16) { + return 90, 21 +} + +func (msg *txCommitOk) wait() bool { + return true +} + +func (msg *txCommitOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txCommitOk) read(r io.Reader) (err error) { + + return +} + +type txRollback struct { +} + +func (msg *txRollback) id() (uint16, uint16) { + return 90, 30 +} + +func (msg *txRollback) wait() bool { + return true +} + +func (msg *txRollback) write(w io.Writer) (err error) { + + return +} + +func (msg *txRollback) read(r io.Reader) (err error) { + + return +} + +type txRollbackOk struct { +} + +func (msg *txRollbackOk) id() (uint16, uint16) { + return 90, 31 +} + +func (msg *txRollbackOk) wait() bool { + return true +} + +func (msg *txRollbackOk) write(w io.Writer) (err error) { + + return +} + +func (msg *txRollbackOk) read(r io.Reader) (err error) { + + return +} + +type confirmSelect struct { + Nowait bool +} + +func (msg *confirmSelect) id() (uint16, uint16) { + return 85, 10 +} + +func (msg *confirmSelect) wait() bool { + return true +} + +func (msg *confirmSelect) write(w io.Writer) (err error) { + var bits byte + + if msg.Nowait { + bits |= 1 << 0 + } + + if err = binary.Write(w, binary.BigEndian, bits); err != nil { + return + } + + return +} + +func (msg *confirmSelect) read(r io.Reader) (err error) { + var bits byte + + if err = binary.Read(r, binary.BigEndian, &bits); err != nil { + return + } + msg.Nowait = (bits&(1<<0) > 0) + + return +} + +type confirmSelectOk struct { +} + +func (msg *confirmSelectOk) id() (uint16, uint16) { + return 85, 11 +} + +func (msg *confirmSelectOk) wait() bool { + return true +} + +func (msg *confirmSelectOk) write(w io.Writer) (err error) { + + return +} + +func (msg *confirmSelectOk) read(r io.Reader) (err error) { + + return +} + +func (r *reader) parseMethodFrame(channel uint16, size uint32) (f frame, err error) { + mf := &methodFrame{ + ChannelId: channel, + } + + if err = binary.Read(r.r, binary.BigEndian, &mf.ClassId); err != nil { + return + } + + if err = binary.Read(r.r, binary.BigEndian, &mf.MethodId); err != nil { + return + } + + switch mf.ClassId { + + case 10: // connection + switch mf.MethodId { + + case 10: // connection start + //fmt.Println("NextMethod: class:10 method:10") + method := &connectionStart{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // connection start-ok + //fmt.Println("NextMethod: class:10 method:11") + method := &connectionStartOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // connection secure + //fmt.Println("NextMethod: class:10 method:20") + method := &connectionSecure{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // connection secure-ok + //fmt.Println("NextMethod: class:10 method:21") + method := &connectionSecureOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // connection tune + //fmt.Println("NextMethod: class:10 method:30") + method := &connectionTune{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // connection tune-ok + //fmt.Println("NextMethod: class:10 method:31") + method := &connectionTuneOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // connection open + //fmt.Println("NextMethod: class:10 method:40") + method := &connectionOpen{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // connection open-ok + //fmt.Println("NextMethod: class:10 method:41") + method := &connectionOpenOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // connection close + //fmt.Println("NextMethod: class:10 method:50") + method := &connectionClose{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // connection close-ok + //fmt.Println("NextMethod: class:10 method:51") + method := &connectionCloseOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 60: // connection blocked + //fmt.Println("NextMethod: class:10 method:60") + method := &connectionBlocked{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 61: // connection unblocked + //fmt.Println("NextMethod: class:10 method:61") + method := &connectionUnblocked{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 20: // channel + switch mf.MethodId { + + case 10: // channel open + //fmt.Println("NextMethod: class:20 method:10") + method := &channelOpen{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // channel open-ok + //fmt.Println("NextMethod: class:20 method:11") + method := &channelOpenOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // channel flow + //fmt.Println("NextMethod: class:20 method:20") + method := &channelFlow{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // channel flow-ok + //fmt.Println("NextMethod: class:20 method:21") + method := &channelFlowOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // channel close + //fmt.Println("NextMethod: class:20 method:40") + method := &channelClose{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // channel close-ok + //fmt.Println("NextMethod: class:20 method:41") + method := &channelCloseOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 40: // exchange + switch mf.MethodId { + + case 10: // exchange declare + //fmt.Println("NextMethod: class:40 method:10") + method := &exchangeDeclare{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // exchange declare-ok + //fmt.Println("NextMethod: class:40 method:11") + method := &exchangeDeclareOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // exchange delete + //fmt.Println("NextMethod: class:40 method:20") + method := &exchangeDelete{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // exchange delete-ok + //fmt.Println("NextMethod: class:40 method:21") + method := &exchangeDeleteOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // exchange bind + //fmt.Println("NextMethod: class:40 method:30") + method := &exchangeBind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // exchange bind-ok + //fmt.Println("NextMethod: class:40 method:31") + method := &exchangeBindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // exchange unbind + //fmt.Println("NextMethod: class:40 method:40") + method := &exchangeUnbind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // exchange unbind-ok + //fmt.Println("NextMethod: class:40 method:51") + method := &exchangeUnbindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 50: // queue + switch mf.MethodId { + + case 10: // queue declare + //fmt.Println("NextMethod: class:50 method:10") + method := &queueDeclare{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // queue declare-ok + //fmt.Println("NextMethod: class:50 method:11") + method := &queueDeclareOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // queue bind + //fmt.Println("NextMethod: class:50 method:20") + method := &queueBind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // queue bind-ok + //fmt.Println("NextMethod: class:50 method:21") + method := &queueBindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // queue unbind + //fmt.Println("NextMethod: class:50 method:50") + method := &queueUnbind{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 51: // queue unbind-ok + //fmt.Println("NextMethod: class:50 method:51") + method := &queueUnbindOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // queue purge + //fmt.Println("NextMethod: class:50 method:30") + method := &queuePurge{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // queue purge-ok + //fmt.Println("NextMethod: class:50 method:31") + method := &queuePurgeOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // queue delete + //fmt.Println("NextMethod: class:50 method:40") + method := &queueDelete{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 41: // queue delete-ok + //fmt.Println("NextMethod: class:50 method:41") + method := &queueDeleteOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 60: // basic + switch mf.MethodId { + + case 10: // basic qos + //fmt.Println("NextMethod: class:60 method:10") + method := &basicQos{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // basic qos-ok + //fmt.Println("NextMethod: class:60 method:11") + method := &basicQosOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // basic consume + //fmt.Println("NextMethod: class:60 method:20") + method := &basicConsume{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // basic consume-ok + //fmt.Println("NextMethod: class:60 method:21") + method := &basicConsumeOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // basic cancel + //fmt.Println("NextMethod: class:60 method:30") + method := &basicCancel{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // basic cancel-ok + //fmt.Println("NextMethod: class:60 method:31") + method := &basicCancelOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 40: // basic publish + //fmt.Println("NextMethod: class:60 method:40") + method := &basicPublish{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 50: // basic return + //fmt.Println("NextMethod: class:60 method:50") + method := &basicReturn{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 60: // basic deliver + //fmt.Println("NextMethod: class:60 method:60") + method := &basicDeliver{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 70: // basic get + //fmt.Println("NextMethod: class:60 method:70") + method := &basicGet{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 71: // basic get-ok + //fmt.Println("NextMethod: class:60 method:71") + method := &basicGetOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 72: // basic get-empty + //fmt.Println("NextMethod: class:60 method:72") + method := &basicGetEmpty{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 80: // basic ack + //fmt.Println("NextMethod: class:60 method:80") + method := &basicAck{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 90: // basic reject + //fmt.Println("NextMethod: class:60 method:90") + method := &basicReject{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 100: // basic recover-async + //fmt.Println("NextMethod: class:60 method:100") + method := &basicRecoverAsync{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 110: // basic recover + //fmt.Println("NextMethod: class:60 method:110") + method := &basicRecover{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 111: // basic recover-ok + //fmt.Println("NextMethod: class:60 method:111") + method := &basicRecoverOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 120: // basic nack + //fmt.Println("NextMethod: class:60 method:120") + method := &basicNack{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 90: // tx + switch mf.MethodId { + + case 10: // tx select + //fmt.Println("NextMethod: class:90 method:10") + method := &txSelect{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // tx select-ok + //fmt.Println("NextMethod: class:90 method:11") + method := &txSelectOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 20: // tx commit + //fmt.Println("NextMethod: class:90 method:20") + method := &txCommit{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 21: // tx commit-ok + //fmt.Println("NextMethod: class:90 method:21") + method := &txCommitOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 30: // tx rollback + //fmt.Println("NextMethod: class:90 method:30") + method := &txRollback{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 31: // tx rollback-ok + //fmt.Println("NextMethod: class:90 method:31") + method := &txRollbackOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + case 85: // confirm + switch mf.MethodId { + + case 10: // confirm select + //fmt.Println("NextMethod: class:85 method:10") + method := &confirmSelect{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + case 11: // confirm select-ok + //fmt.Println("NextMethod: class:85 method:11") + method := &confirmSelectOk{} + if err = method.read(r.r); err != nil { + return + } + mf.Method = method + + default: + return nil, fmt.Errorf("Bad method frame, unknown method %d for class %d", mf.MethodId, mf.ClassId) + } + + default: + return nil, fmt.Errorf("Bad method frame, unknown class %d", mf.ClassId) + } + + return mf, nil +} diff --git a/vendor/github.com/streadway/amqp/types.go b/vendor/github.com/streadway/amqp/types.go new file mode 100644 index 000000000..83bd92f97 --- /dev/null +++ b/vendor/github.com/streadway/amqp/types.go @@ -0,0 +1,428 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "fmt" + "io" + "time" +) + +// Constants for standard AMQP 0-9-1 exchange types. +const ( + ExchangeDirect = "direct" + ExchangeFanout = "fanout" + ExchangeTopic = "topic" + ExchangeHeaders = "headers" +) + +var ( + // ErrClosed is returned when the channel or connection is not open + ErrClosed = &Error{Code: ChannelError, Reason: "channel/connection is not open"} + + // ErrChannelMax is returned when Connection.Channel has been called enough + // times that all channel IDs have been exhausted in the client or the + // server. + ErrChannelMax = &Error{Code: ChannelError, Reason: "channel id space exhausted"} + + // ErrSASL is returned from Dial when the authentication mechanism could not + // be negoated. + ErrSASL = &Error{Code: AccessRefused, Reason: "SASL could not negotiate a shared mechanism"} + + // ErrCredentials is returned when the authenticated client is not authorized + // to any vhost. + ErrCredentials = &Error{Code: AccessRefused, Reason: "username or password not allowed"} + + // ErrVhost is returned when the authenticated user is not permitted to + // access the requested Vhost. + ErrVhost = &Error{Code: AccessRefused, Reason: "no access to this vhost"} + + // ErrSyntax is hard protocol error, indicating an unsupported protocol, + // implementation or encoding. + ErrSyntax = &Error{Code: SyntaxError, Reason: "invalid field or value inside of a frame"} + + // ErrFrame is returned when the protocol frame cannot be read from the + // server, indicating an unsupported protocol or unsupported frame type. + ErrFrame = &Error{Code: FrameError, Reason: "frame could not be parsed"} + + // ErrCommandInvalid is returned when the server sends an unexpected response + // to this requested message type. This indicates a bug in this client. + ErrCommandInvalid = &Error{Code: CommandInvalid, Reason: "unexpected command received"} + + // ErrUnexpectedFrame is returned when something other than a method or + // heartbeat frame is delivered to the Connection, indicating a bug in the + // client. + ErrUnexpectedFrame = &Error{Code: UnexpectedFrame, Reason: "unexpected frame received"} + + // ErrFieldType is returned when writing a message containing a Go type unsupported by AMQP. + ErrFieldType = &Error{Code: SyntaxError, Reason: "unsupported table field type"} +) + +// Error captures the code and reason a channel or connection has been closed +// by the server. +type Error struct { + Code int // constant code from the specification + Reason string // description of the error + Server bool // true when initiated from the server, false when from this library + Recover bool // true when this error can be recovered by retrying later or with different parameters +} + +func newError(code uint16, text string) *Error { + return &Error{ + Code: int(code), + Reason: text, + Recover: isSoftExceptionCode(int(code)), + Server: true, + } +} + +func (e Error) Error() string { + return fmt.Sprintf("Exception (%d) Reason: %q", e.Code, e.Reason) +} + +// Used by header frames to capture routing and header information +type properties struct { + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + Headers Table // Application or header exchange table + DeliveryMode uint8 // queue implementation use - Transient (1) or Persistent (2) + Priority uint8 // queue implementation use - 0 to 9 + CorrelationId string // application use - correlation identifier + ReplyTo string // application use - address to to reply to (ex: RPC) + Expiration string // implementation use - message expiration spec + MessageId string // application use - message identifier + Timestamp time.Time // application use - message timestamp + Type string // application use - message type name + UserId string // application use - creating user id + AppId string // application use - creating application + reserved1 string // was cluster-id - process for buffer consumption +} + +// DeliveryMode. Transient means higher throughput but messages will not be +// restored on broker restart. The delivery mode of publishings is unrelated +// to the durability of the queues they reside on. Transient messages will +// not be restored to durable queues, persistent messages will be restored to +// durable queues and lost on non-durable queues during server restart. +// +// This remains typed as uint8 to match Publishing.DeliveryMode. Other +// delivery modes specific to custom queue implementations are not enumerated +// here. +const ( + Transient uint8 = 1 + Persistent uint8 = 2 +) + +// The property flags are an array of bits that indicate the presence or +// absence of each property value in sequence. The bits are ordered from most +// high to low - bit 15 indicates the first property. +const ( + flagContentType = 0x8000 + flagContentEncoding = 0x4000 + flagHeaders = 0x2000 + flagDeliveryMode = 0x1000 + flagPriority = 0x0800 + flagCorrelationId = 0x0400 + flagReplyTo = 0x0200 + flagExpiration = 0x0100 + flagMessageId = 0x0080 + flagTimestamp = 0x0040 + flagType = 0x0020 + flagUserId = 0x0010 + flagAppId = 0x0008 + flagReserved1 = 0x0004 +) + +// Queue captures the current server state of the queue on the server returned +// from Channel.QueueDeclare or Channel.QueueInspect. +type Queue struct { + Name string // server confirmed or generated name + Messages int // count of messages not awaiting acknowledgment + Consumers int // number of consumers receiving deliveries +} + +// Publishing captures the client message sent to the server. The fields +// outside of the Headers table included in this struct mirror the underlying +// fields in the content frame. They use native types for convenience and +// efficiency. +type Publishing struct { + // Application or exchange specific fields, + // the headers exchange will inspect this field. + Headers Table + + // Properties + ContentType string // MIME content type + ContentEncoding string // MIME content encoding + DeliveryMode uint8 // Transient (0 or 1) or Persistent (2) + Priority uint8 // 0 to 9 + CorrelationId string // correlation identifier + ReplyTo string // address to to reply to (ex: RPC) + Expiration string // message expiration spec + MessageId string // message identifier + Timestamp time.Time // message timestamp + Type string // message type name + UserId string // creating user id - ex: "guest" + AppId string // creating application id + + // The application specific payload of the message + Body []byte +} + +// Blocking notifies the server's TCP flow control of the Connection. When a +// server hits a memory or disk alarm it will block all connections until the +// resources are reclaimed. Use NotifyBlock on the Connection to receive these +// events. +type Blocking struct { + Active bool // TCP pushback active/inactive on server + Reason string // Server reason for activation +} + +// Confirmation notifies the acknowledgment or negative acknowledgement of a +// publishing identified by its delivery tag. Use NotifyPublish on the Channel +// to consume these events. +type Confirmation struct { + DeliveryTag uint64 // A 1 based counter of publishings from when the channel was put in Confirm mode + Ack bool // True when the server successfully received the publishing +} + +// Decimal matches the AMQP decimal type. Scale is the number of decimal +// digits Scale == 2, Value == 12345, Decimal == 123.45 +type Decimal struct { + Scale uint8 + Value int32 +} + +// Table stores user supplied fields of the following types: +// +// bool +// byte +// float32 +// float64 +// int +// int16 +// int32 +// int64 +// nil +// string +// time.Time +// amqp.Decimal +// amqp.Table +// []byte +// []interface{} - containing above types +// +// Functions taking a table will immediately fail when the table contains a +// value of an unsupported type. +// +// The caller must be specific in which precision of integer it wishes to +// encode. +// +// Use a type assertion when reading values from a table for type conversion. +// +// RabbitMQ expects int32 for integer values. +// +type Table map[string]interface{} + +func validateField(f interface{}) error { + switch fv := f.(type) { + case nil, bool, byte, int, int16, int32, int64, float32, float64, string, []byte, Decimal, time.Time: + return nil + + case []interface{}: + for _, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("in array %s", err) + } + } + return nil + + case Table: + for k, v := range fv { + if err := validateField(v); err != nil { + return fmt.Errorf("table field %q %s", k, err) + } + } + return nil + } + + return fmt.Errorf("value %T not supported", f) +} + +// Validate returns and error if any Go types in the table are incompatible with AMQP types. +func (t Table) Validate() error { + return validateField(t) +} + +// Heap interface for maintaining delivery tags +type tagSet []uint64 + +func (set tagSet) Len() int { return len(set) } +func (set tagSet) Less(i, j int) bool { return (set)[i] < (set)[j] } +func (set tagSet) Swap(i, j int) { (set)[i], (set)[j] = (set)[j], (set)[i] } +func (set *tagSet) Push(tag interface{}) { *set = append(*set, tag.(uint64)) } +func (set *tagSet) Pop() interface{} { + val := (*set)[len(*set)-1] + *set = (*set)[:len(*set)-1] + return val +} + +type message interface { + id() (uint16, uint16) + wait() bool + read(io.Reader) error + write(io.Writer) error +} + +type messageWithContent interface { + message + getContent() (properties, []byte) + setContent(properties, []byte) +} + +/* +The base interface implemented as: + +2.3.5 frame Details + +All frames consist of a header (7 octets), a payload of arbitrary size, and a 'frame-end' octet that detects +malformed frames: + + 0 1 3 7 size+7 size+8 + +------+---------+-------------+ +------------+ +-----------+ + | type | channel | size | | payload | | frame-end | + +------+---------+-------------+ +------------+ +-----------+ + octet short long size octets octet + +To read a frame, we: + + 1. Read the header and check the frame type and channel. + 2. Depending on the frame type, we read the payload and process it. + 3. Read the frame end octet. + +In realistic implementations where performance is a concern, we would use +“read-ahead buffering” or “gathering reads” to avoid doing three separate +system calls to read a frame. + +*/ +type frame interface { + write(io.Writer) error + channel() uint16 +} + +type reader struct { + r io.Reader +} + +type writer struct { + w io.Writer +} + +// Implements the frame interface for Connection RPC +type protocolHeader struct{} + +func (protocolHeader) write(w io.Writer) error { + _, err := w.Write([]byte{'A', 'M', 'Q', 'P', 0, 0, 9, 1}) + return err +} + +func (protocolHeader) channel() uint16 { + panic("only valid as initial handshake") +} + +/* +Method frames carry the high-level protocol commands (which we call "methods"). +One method frame carries one command. The method frame payload has this format: + + 0 2 4 + +----------+-----------+-------------- - - + | class-id | method-id | arguments... + +----------+-----------+-------------- - - + short short ... + +To process a method frame, we: + 1. Read the method frame payload. + 2. Unpack it into a structure. A given method always has the same structure, + so we can unpack the method rapidly. 3. Check that the method is allowed in + the current context. + 4. Check that the method arguments are valid. + 5. Execute the method. + +Method frame bodies are constructed as a list of AMQP data fields (bits, +integers, strings and string tables). The marshalling code is trivially +generated directly from the protocol specifications, and can be very rapid. +*/ +type methodFrame struct { + ChannelId uint16 + ClassId uint16 + MethodId uint16 + Method message +} + +func (f *methodFrame) channel() uint16 { return f.ChannelId } + +/* +Heartbeating is a technique designed to undo one of TCP/IP's features, namely +its ability to recover from a broken physical connection by closing only after +a quite long time-out. In some scenarios we need to know very rapidly if a +peer is disconnected or not responding for other reasons (e.g. it is looping). +Since heartbeating can be done at a low level, we implement this as a special +type of frame that peers exchange at the transport level, rather than as a +class method. +*/ +type heartbeatFrame struct { + ChannelId uint16 +} + +func (f *heartbeatFrame) channel() uint16 { return f.ChannelId } + +/* +Certain methods (such as Basic.Publish, Basic.Deliver, etc.) are formally +defined as carrying content. When a peer sends such a method frame, it always +follows it with a content header and zero or more content body frames. + +A content header frame has this format: + + 0 2 4 12 14 + +----------+--------+-----------+----------------+------------- - - + | class-id | weight | body size | property flags | property list... + +----------+--------+-----------+----------------+------------- - - + short short long long short remainder... + +We place content body in distinct frames (rather than including it in the +method) so that AMQP may support "zero copy" techniques in which content is +never marshalled or encoded. We place the content properties in their own +frame so that recipients can selectively discard contents they do not want to +process +*/ +type headerFrame struct { + ChannelId uint16 + ClassId uint16 + weight uint16 + Size uint64 + Properties properties +} + +func (f *headerFrame) channel() uint16 { return f.ChannelId } + +/* +Content is the application data we carry from client-to-client via the AMQP +server. Content is, roughly speaking, a set of properties plus a binary data +part. The set of allowed properties are defined by the Basic class, and these +form the "content header frame". The data can be any size, and MAY be broken +into several (or many) chunks, each forming a "content body frame". + +Looking at the frames for a specific channel, as they pass on the wire, we +might see something like this: + + [method] + [method] [header] [body] [body] + [method] + ... +*/ +type bodyFrame struct { + ChannelId uint16 + Body []byte +} + +func (f *bodyFrame) channel() uint16 { return f.ChannelId } diff --git a/vendor/github.com/streadway/amqp/uri.go b/vendor/github.com/streadway/amqp/uri.go new file mode 100644 index 000000000..e58471549 --- /dev/null +++ b/vendor/github.com/streadway/amqp/uri.go @@ -0,0 +1,176 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "errors" + "net" + "net/url" + "strconv" + "strings" +) + +var errURIScheme = errors.New("AMQP scheme must be either 'amqp://' or 'amqps://'") +var errURIWhitespace = errors.New("URI must not contain whitespace") + +var schemePorts = map[string]int{ + "amqp": 5672, + "amqps": 5671, +} + +var defaultURI = URI{ + Scheme: "amqp", + Host: "localhost", + Port: 5672, + Username: "guest", + Password: "guest", + Vhost: "/", +} + +// URI represents a parsed AMQP URI string. +type URI struct { + Scheme string + Host string + Port int + Username string + Password string + Vhost string +} + +// ParseURI attempts to parse the given AMQP URI according to the spec. +// See http://www.rabbitmq.com/uri-spec.html. +// +// Default values for the fields are: +// +// Scheme: amqp +// Host: localhost +// Port: 5672 +// Username: guest +// Password: guest +// Vhost: / +// +func ParseURI(uri string) (URI, error) { + builder := defaultURI + + if strings.Contains(uri, " ") == true { + return builder, errURIWhitespace + } + + u, err := url.Parse(uri) + if err != nil { + return builder, err + } + + defaultPort, okScheme := schemePorts[u.Scheme] + + if okScheme { + builder.Scheme = u.Scheme + } else { + return builder, errURIScheme + } + + host := u.Hostname() + port := u.Port() + + if host != "" { + builder.Host = host + } + + if port != "" { + port32, err := strconv.ParseInt(port, 10, 32) + if err != nil { + return builder, err + } + builder.Port = int(port32) + } else { + builder.Port = defaultPort + } + + if u.User != nil { + builder.Username = u.User.Username() + if password, ok := u.User.Password(); ok { + builder.Password = password + } + } + + if u.Path != "" { + if strings.HasPrefix(u.Path, "/") { + if u.Host == "" && strings.HasPrefix(u.Path, "///") { + // net/url doesn't handle local context authorities and leaves that up + // to the scheme handler. In our case, we translate amqp:/// into the + // default host and whatever the vhost should be + if len(u.Path) > 3 { + builder.Vhost = u.Path[3:] + } + } else if len(u.Path) > 1 { + builder.Vhost = u.Path[1:] + } + } else { + builder.Vhost = u.Path + } + } + + return builder, nil +} + +// PlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (uri URI) PlainAuth() *PlainAuth { + return &PlainAuth{ + Username: uri.Username, + Password: uri.Password, + } +} + +// AMQPlainAuth returns a PlainAuth structure based on the parsed URI's +// Username and Password fields. +func (uri URI) AMQPlainAuth() *AMQPlainAuth { + return &AMQPlainAuth{ + Username: uri.Username, + Password: uri.Password, + } +} + +func (uri URI) String() string { + authority, err := url.Parse("") + if err != nil { + return err.Error() + } + + authority.Scheme = uri.Scheme + + if uri.Username != defaultURI.Username || uri.Password != defaultURI.Password { + authority.User = url.User(uri.Username) + + if uri.Password != defaultURI.Password { + authority.User = url.UserPassword(uri.Username, uri.Password) + } + } + + authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port)) + + if defaultPort, found := schemePorts[uri.Scheme]; !found || defaultPort != uri.Port { + authority.Host = net.JoinHostPort(uri.Host, strconv.Itoa(uri.Port)) + } else { + // JoinHostPort() automatically add brackets to the host if it's + // an IPv6 address. + // + // If not port is specified, JoinHostPort() return an IP address in the + // form of "[::1]:", so we use TrimSuffix() to remove the extra ":". + authority.Host = strings.TrimSuffix(net.JoinHostPort(uri.Host, ""), ":") + } + + if uri.Vhost != defaultURI.Vhost { + // Make sure net/url does not double escape, e.g. + // "%2F" does not become "%252F". + authority.Path = uri.Vhost + authority.RawPath = url.QueryEscape(uri.Vhost) + } else { + authority.Path = "/" + } + + return authority.String() +} diff --git a/vendor/github.com/streadway/amqp/write.go b/vendor/github.com/streadway/amqp/write.go new file mode 100644 index 000000000..94a46d115 --- /dev/null +++ b/vendor/github.com/streadway/amqp/write.go @@ -0,0 +1,416 @@ +// Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// Source code and contact info at http://github.com/streadway/amqp + +package amqp + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "io" + "math" + "time" +) + +func (w *writer) WriteFrame(frame frame) (err error) { + if err = frame.write(w.w); err != nil { + return + } + + if buf, ok := w.w.(*bufio.Writer); ok { + err = buf.Flush() + } + + return +} + +func (f *methodFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + + if f.Method == nil { + return errors.New("malformed frame: missing method") + } + + class, method := f.Method.id() + + if err = binary.Write(&payload, binary.BigEndian, class); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, method); err != nil { + return + } + + if err = f.Method.write(&payload); err != nil { + return + } + + return writeFrame(w, frameMethod, f.ChannelId, payload.Bytes()) +} + +// Heartbeat +// +// Payload is empty +func (f *heartbeatFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameHeartbeat, f.ChannelId, []byte{}) +} + +// CONTENT HEADER +// 0 2 4 12 14 +// +----------+--------+-----------+----------------+------------- - - +// | class-id | weight | body size | property flags | property list... +// +----------+--------+-----------+----------------+------------- - - +// short short long long short remainder... +// +func (f *headerFrame) write(w io.Writer) (err error) { + var payload bytes.Buffer + var zeroTime time.Time + + if err = binary.Write(&payload, binary.BigEndian, f.ClassId); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, f.weight); err != nil { + return + } + + if err = binary.Write(&payload, binary.BigEndian, f.Size); err != nil { + return + } + + // First pass will build the mask to be serialized, second pass will serialize + // each of the fields that appear in the mask. + + var mask uint16 + + if len(f.Properties.ContentType) > 0 { + mask = mask | flagContentType + } + if len(f.Properties.ContentEncoding) > 0 { + mask = mask | flagContentEncoding + } + if f.Properties.Headers != nil && len(f.Properties.Headers) > 0 { + mask = mask | flagHeaders + } + if f.Properties.DeliveryMode > 0 { + mask = mask | flagDeliveryMode + } + if f.Properties.Priority > 0 { + mask = mask | flagPriority + } + if len(f.Properties.CorrelationId) > 0 { + mask = mask | flagCorrelationId + } + if len(f.Properties.ReplyTo) > 0 { + mask = mask | flagReplyTo + } + if len(f.Properties.Expiration) > 0 { + mask = mask | flagExpiration + } + if len(f.Properties.MessageId) > 0 { + mask = mask | flagMessageId + } + if f.Properties.Timestamp != zeroTime { + mask = mask | flagTimestamp + } + if len(f.Properties.Type) > 0 { + mask = mask | flagType + } + if len(f.Properties.UserId) > 0 { + mask = mask | flagUserId + } + if len(f.Properties.AppId) > 0 { + mask = mask | flagAppId + } + + if err = binary.Write(&payload, binary.BigEndian, mask); err != nil { + return + } + + if hasProperty(mask, flagContentType) { + if err = writeShortstr(&payload, f.Properties.ContentType); err != nil { + return + } + } + if hasProperty(mask, flagContentEncoding) { + if err = writeShortstr(&payload, f.Properties.ContentEncoding); err != nil { + return + } + } + if hasProperty(mask, flagHeaders) { + if err = writeTable(&payload, f.Properties.Headers); err != nil { + return + } + } + if hasProperty(mask, flagDeliveryMode) { + if err = binary.Write(&payload, binary.BigEndian, f.Properties.DeliveryMode); err != nil { + return + } + } + if hasProperty(mask, flagPriority) { + if err = binary.Write(&payload, binary.BigEndian, f.Properties.Priority); err != nil { + return + } + } + if hasProperty(mask, flagCorrelationId) { + if err = writeShortstr(&payload, f.Properties.CorrelationId); err != nil { + return + } + } + if hasProperty(mask, flagReplyTo) { + if err = writeShortstr(&payload, f.Properties.ReplyTo); err != nil { + return + } + } + if hasProperty(mask, flagExpiration) { + if err = writeShortstr(&payload, f.Properties.Expiration); err != nil { + return + } + } + if hasProperty(mask, flagMessageId) { + if err = writeShortstr(&payload, f.Properties.MessageId); err != nil { + return + } + } + if hasProperty(mask, flagTimestamp) { + if err = binary.Write(&payload, binary.BigEndian, uint64(f.Properties.Timestamp.Unix())); err != nil { + return + } + } + if hasProperty(mask, flagType) { + if err = writeShortstr(&payload, f.Properties.Type); err != nil { + return + } + } + if hasProperty(mask, flagUserId) { + if err = writeShortstr(&payload, f.Properties.UserId); err != nil { + return + } + } + if hasProperty(mask, flagAppId) { + if err = writeShortstr(&payload, f.Properties.AppId); err != nil { + return + } + } + + return writeFrame(w, frameHeader, f.ChannelId, payload.Bytes()) +} + +// Body +// +// Payload is one byterange from the full body who's size is declared in the +// Header frame +func (f *bodyFrame) write(w io.Writer) (err error) { + return writeFrame(w, frameBody, f.ChannelId, f.Body) +} + +func writeFrame(w io.Writer, typ uint8, channel uint16, payload []byte) (err error) { + end := []byte{frameEnd} + size := uint(len(payload)) + + _, err = w.Write([]byte{ + byte(typ), + byte((channel & 0xff00) >> 8), + byte((channel & 0x00ff) >> 0), + byte((size & 0xff000000) >> 24), + byte((size & 0x00ff0000) >> 16), + byte((size & 0x0000ff00) >> 8), + byte((size & 0x000000ff) >> 0), + }) + + if err != nil { + return + } + + if _, err = w.Write(payload); err != nil { + return + } + + if _, err = w.Write(end); err != nil { + return + } + + return +} + +func writeShortstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length = uint8(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +func writeLongstr(w io.Writer, s string) (err error) { + b := []byte(s) + + var length = uint32(len(b)) + + if err = binary.Write(w, binary.BigEndian, length); err != nil { + return + } + + if _, err = w.Write(b[:length]); err != nil { + return + } + + return +} + +/* +'A': []interface{} +'D': Decimal +'F': Table +'I': int32 +'S': string +'T': time.Time +'V': nil +'b': byte +'d': float64 +'f': float32 +'l': int64 +'s': int16 +'t': bool +'x': []byte +*/ +func writeField(w io.Writer, value interface{}) (err error) { + var buf [9]byte + var enc []byte + + switch v := value.(type) { + case bool: + buf[0] = 't' + if v { + buf[1] = byte(1) + } else { + buf[1] = byte(0) + } + enc = buf[:2] + + case byte: + buf[0] = 'b' + buf[1] = byte(v) + enc = buf[:2] + + case int16: + buf[0] = 's' + binary.BigEndian.PutUint16(buf[1:3], uint16(v)) + enc = buf[:3] + + case int: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int32: + buf[0] = 'I' + binary.BigEndian.PutUint32(buf[1:5], uint32(v)) + enc = buf[:5] + + case int64: + buf[0] = 'l' + binary.BigEndian.PutUint64(buf[1:9], uint64(v)) + enc = buf[:9] + + case float32: + buf[0] = 'f' + binary.BigEndian.PutUint32(buf[1:5], math.Float32bits(v)) + enc = buf[:5] + + case float64: + buf[0] = 'd' + binary.BigEndian.PutUint64(buf[1:9], math.Float64bits(v)) + enc = buf[:9] + + case Decimal: + buf[0] = 'D' + buf[1] = byte(v.Scale) + binary.BigEndian.PutUint32(buf[2:6], uint32(v.Value)) + enc = buf[:6] + + case string: + buf[0] = 'S' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + enc = append(buf[:5], []byte(v)...) + + case []interface{}: // field-array + buf[0] = 'A' + + sec := new(bytes.Buffer) + for _, val := range v { + if err = writeField(sec, val); err != nil { + return + } + } + + binary.BigEndian.PutUint32(buf[1:5], uint32(sec.Len())) + if _, err = w.Write(buf[:5]); err != nil { + return + } + + if _, err = w.Write(sec.Bytes()); err != nil { + return + } + + return + + case time.Time: + buf[0] = 'T' + binary.BigEndian.PutUint64(buf[1:9], uint64(v.Unix())) + enc = buf[:9] + + case Table: + if _, err = w.Write([]byte{'F'}); err != nil { + return + } + return writeTable(w, v) + + case []byte: + buf[0] = 'x' + binary.BigEndian.PutUint32(buf[1:5], uint32(len(v))) + if _, err = w.Write(buf[0:5]); err != nil { + return + } + if _, err = w.Write(v); err != nil { + return + } + return + + case nil: + buf[0] = 'V' + enc = buf[:1] + + default: + return ErrFieldType + } + + _, err = w.Write(enc) + + return +} + +func writeTable(w io.Writer, table Table) (err error) { + var buf bytes.Buffer + + for key, val := range table { + if err = writeShortstr(&buf, key); err != nil { + return + } + if err = writeField(&buf, val); err != nil { + return + } + } + + return writeLongstr(w, string(buf.Bytes())) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 19e106f58..6b8748316 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -337,6 +337,9 @@ github.com/spf13/pflag # github.com/stoewer/go-strcase v1.2.0 ## explicit; go 1.11 github.com/stoewer/go-strcase +# github.com/streadway/amqp v1.1.0 +## explicit; go 1.10 +github.com/streadway/amqp # github.com/stretchr/testify v1.8.3 ## explicit; go 1.20 github.com/stretchr/testify/assert