diff --git a/.cspell-code.json b/.cspell-code.json deleted file mode 100644 index 2e846c2e82..0000000000 --- a/.cspell-code.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "version": "0.2", - "language": "en", - "allowCompoundWords": true, - "dictionaryDefinitions": [ - { - "name": "custom-dictionary", - "path": "./.cspell/custom-dictionary.txt", - "addWords": true - } - ], - "dictionaries": [ - "en", - "custom-words", - "custom-dictionary" - ], - "ignorePaths": [ - "**/package.json", - "**/docs/package-lock.json", - "**/docs/docs/examples/model-training/Stable-Diffusion-Dreambooth/index.md", - "docs/docs/examples/model-training/Training-Tensorflow-Model/index.md", - "./webui/build", - "./webui/node_modules", - "./webui/package.json", - "./webui/package-lock.json", - "./.gitprecommit", - "./webui/tsconfig.json", - "./vendor", - "go.sum", - "go.mod", - "go.work.sum", - "apps" - ], - "ignoreRegExpList": [ - "Urls", - "Email", - "RsaCert", - "SshRsa", - "Base64MultiLine", - "Base64SingleLine", - "CommitHash", - "CommitHashLink", - "CStyleHexValue", - "CSSHexValue", - "SHA", - "HashStrings", - "UnicodeRef", - "UUID", - "/github.com.*/", - "/\\w+{12,}/" - ] -} diff --git a/.cspell/custom-dictionary.txt b/.cspell/custom-dictionary.txt index 4866751ba1..bb8b2c1c1b 100644 --- a/.cspell/custom-dictionary.txt +++ b/.cspell/custom-dictionary.txt @@ -355,6 +355,7 @@ wasmlogs wasmmodels wazero wdbaruni's +simonwo webui wesbos winderresearch @@ -371,3 +372,63 @@ yyyymmddhhmm zarr zerolog zidane +IMDC +kvstore +unmarshalling +Nowf +pkey +machineid +bacerror +Nacked +pqueue +Routez +Connz +Subsz +nuid +Noticef +Warnf +Debugf +Tracef +sresource +Syncer +mathgo +providables +JSONV +Idxs +boltdblib +hclog +THAMTShard +mergo +serde +qdisc +puuid +pkgs +pscbin +rocm +strg +otlploggrpc +yacspin +APITLSCA +APITLSCA +Milli +Errf +doesn +cicd +nvme +fdisk +mdstat +xcom +Fooco +Eventuallyf +Truef +sekret +Equalf +Doesnt +HAMT +dagpb +Berdly +frrist +swaggo +isbadactor +installationid +firstbacalhauimage \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index 8a7a7ba501..641b6e1d6e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -54,12 +54,6 @@ linters-settings: - shadow lll: line-length: 140 - misspell: - locale: US - ignore-words: - - favour - - cancelled - - cancelling nolintlint: allow-leading-space: true # don't require machine-readable nolint directives (i.e. with no leading space) allow-unused: true # report any unused nolint directives @@ -89,7 +83,6 @@ linters: - govet - ineffassign - lll - - misspell - mnd - nakedret - noctx diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ee7d6b8d4a..ee46828daa 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,7 +8,7 @@ repos: - id: detect-aws-credentials args: [--allow-missing-credentials] - id: detect-private-key - exclude: testdata/.* + exclude: 'testdata/.*|test-integration/certificates/.*' - id: check-yaml - id: check-json - repo: https://github.com/astral-sh/ruff-pre-commit diff --git a/DESIGN.md b/DESIGN.md index 62d45785aa..c322baa3ee 100644 --- a/DESIGN.md +++ b/DESIGN.md @@ -103,16 +103,11 @@ Ideally, we will also allow much more fine-grained control, specifying location, - She has a file `process.py` which includes the python code necessary to execute in a function called 'downscale()' which takes a file handle to local, processes it, and returns a bytestream. - She executes the following command: ``` -ifps job submit -f process.py -r requirements.txt -c QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR +ipfs job submit -f process.py -r requirements.txt -c QmbWqxBEKC3P8tqsKc98xmWNzrzDtRLMiMPL8wBuTGsMnR ``` - This runs the command in a local executor, first installing all the python packages necessary, and then executing them, on the subset of data available on that node. - Once complete, the system returns the CID of the updated dataset that she can download. -- **SCENARIO 3** Want to burst to cloud but cannot move entire dataset in short time - - DHASH CAN YOU HELP FLESH OUT - - **PUSH COMPUTE INTO GENE SEQUENCER** - - **PIPE TO S3** - ## Components to Build - Build an application that listens for jobs over NATS, receives payment somehow, runs the job in {kubernetes, docker, idk}, and returns the result to the use (ideally the 'result' is in the form of an ipfs object and we can just return the hash). @@ -125,13 +120,3 @@ ifps job submit -f process.py -r requirements.txt -c QmbWqxBEKC3P8tqsKc98xmWNzrz Bacalhau means cod (the fish) in Portuguese (where several folks were brainstorming this topic). Compute-Over-Data == Cod == Bacalhau - -## Prior Art / Parallel Projects -* IPFS-FAN - distributed serverless - https://research.protocol.ai/publications/ipfs-fan-a-function-addressable-computation-network/delarocha2021a.pdf -* IPLS : A Framework for Decentralized Federated Learning- https://arxiv.org/pdf/2101.01901v1.pdf -* Interplanetary Distributed Computing (2018) - https://github.com/yenkuanlee/IPDC -* IPTF - IPFS + TensorFlow (2018) - https://github.com/tesserai/iptf -* Lurk -> Run queries over Filecoin Sealed Data (no public paper yet) -* Radix - Nomad based scheduler for IPFS cluster (only) - high level spec doc https://docs.google.com/document/d/18hdYBmDlvusEOQ-iSNIO_IAEOvJVFL1MyAU_B8hON9Q/edit?usp=sharing -* Bringing Arbitrary Compute to Authoritative Data https://queue.acm.org/detail.cfm?id=2645649 -* Manta: a scalable, distributed object store https://github.com/joyent/manta diff --git a/Makefile b/Makefile index e33f0023a5..54da79acc6 100644 --- a/Makefile +++ b/Makefile @@ -185,10 +185,10 @@ release-bacalhau-flyte: resolve-earthly # Target: build ################################################################################ .PHONY: build -build: resolve-earthly build-bacalhau build-plugins +build: resolve-earthly build-bacalhau .PHONY: build-ci -build-ci: build-bacalhau install-plugins +build-ci: build-bacalhau .PHONY: build-dev build-dev: build-ci @@ -208,7 +208,7 @@ build-webui: resolve-earthly ################################################################################ # Target: build-bacalhau ################################################################################ -${BINARY_PATH}: build-bacalhau build-plugins +${BINARY_PATH}: build-bacalhau .PHONY: build-bacalhau build-bacalhau: binary-web binary @@ -306,7 +306,7 @@ images: docker/.pulled # Target: clean ################################################################################ .PHONY: clean -clean: clean-plugins +clean: ${GO} clean ${RM} -r bin/* ${RM} -r webui/build/* @@ -385,14 +385,6 @@ devstack-250: devstack-20: go run . devstack --compute-nodes 20 -.PHONY: devstack-noop -devstack-noop: - go run . devstack --noop - -.PHONY: devstack-noop-100 -devstack-noop-100: - go run . devstack --noop --compute-nodes 100 - .PHONY: devstack-race devstack-race: go run -race . devstack @@ -476,46 +468,6 @@ security: release: build-bacalhau cp bin/bacalhau . -ifeq ($(OS),Windows_NT) - detected_OS := Windows -else - detected_OS := $(shell sh -c 'uname 2>/dev/null || echo Unknown') -endif - -# TODO make the plugin path configurable instead of using the bacalhau config path. -BACALHAU_CONFIG_PATH := $(shell echo $$BACALHAU_PATH) -INSTALL_PLUGINS_DEST := $(if $(BACALHAU_CONFIG_PATH),$(BACALHAU_CONFIG_PATH)plugins/,~/.bacalhau/plugins/) - -EXECUTOR_PLUGINS := $(wildcard ./pkg/executor/plugins/executors/*/.) - -# TODO fix install on windows -ifeq ($(detected_OS),Windows) - build-plugins clean-plugins install-plugins: - @echo "Skipping executor plugins on Windows" -else - build-plugins: plugins-build - clean-plugins: plugins-clean - install-plugins: plugins-install - - .PHONY: plugins-build $(EXECUTOR_PLUGINS) - - plugins-build: $(EXECUTOR_PLUGINS) - @echo "Building executor plugins..." - @$(foreach plugin,$(EXECUTOR_PLUGINS),$(MAKE) --no-print-directory -C $(plugin) &&) true - - .PHONY: plugins-clean $(addsuffix .clean,$(EXECUTOR_PLUGINS)) - - plugins-clean: $(addsuffix .clean,$(EXECUTOR_PLUGINS)) - @echo "Cleaning executor plugins..." - @$(foreach plugin,$(addsuffix .clean,$(EXECUTOR_PLUGINS)),$(MAKE) --no-print-directory -C $(basename $(plugin)) clean &&) true - - .PHONY: plugins-install $(addsuffix .install,$(EXECUTOR_PLUGINS)) - - plugins-install: plugins-build $(addsuffix .install,$(EXECUTOR_PLUGINS)) - @echo "Installing executor plugins..." - @$(foreach plugin,$(addsuffix .install,$(EXECUTOR_PLUGINS)),mkdir -p $(INSTALL_PLUGINS_DEST) && cp $(basename $(plugin))/bin/* $(INSTALL_PLUGINS_DEST) &&) true -endif - .PHONY: spellcheck-code -spellcheck-code: ## Runs a spellchecker over all code - MVP just does one file - cspell -c .cspell-code.json lint ./pkg/authn/** +spellcheck-code: + cspell lint -c cspell.yaml --quiet "**/*.{go,js,ts,jsx,tsx,md,yml,yaml,json}" diff --git a/clients/python/docs/OrchestratorApi.md b/clients/python/docs/OrchestratorApi.md index e8f4b2c85b..8303d5e504 100644 --- a/clients/python/docs/OrchestratorApi.md +++ b/clients/python/docs/OrchestratorApi.md @@ -35,7 +35,7 @@ from pprint import pprint api_instance = bacalhau_apiclient.OrchestratorApi() id = 'id_example' # str | ID to get the job for include = 'include_example' # str | Takes history and executions as options. If empty will not include anything else. (optional) -limit = 56 # int | Number of history or exeuctions to fetch. Should be used in conjugation with include (optional) +limit = 56 # int | Number of history or executions to fetch. Should be used in conjugation with include (optional) try: # Returns a job. @@ -47,11 +47,11 @@ except ApiException as e: ### Parameters -Name | Type | Description | Notes -------------- | ------------- | ------------- | ------------- - **id** | **str**| ID to get the job for | - **include** | **str**| Takes history and executions as options. If empty will not include anything else. | [optional] - **limit** | **int**| Number of history or exeuctions to fetch. Should be used in conjugation with include | [optional] +Name | Type | Description | Notes +------------- | ------------- |--------------------------------------------------------------------------------------| ------------- + **id** | **str**| ID to get the job for | + **include** | **str**| Takes history and executions as options. If empty will not include anything else. | [optional] + **limit** | **int**| Number of history or executions to fetch. Should be used in conjugation with include | [optional] ### Return type diff --git a/cmd/cli/agent/alive_test.go b/cmd/cli/agent/alive_test.go index b25b07be67..469c6944d1 100644 --- a/cmd/cli/agent/alive_test.go +++ b/cmd/cli/agent/alive_test.go @@ -30,7 +30,7 @@ func (s *AliveSuite) TestAliveJSONOutput() { aliveInfo := &apimodels.IsAliveResponse{} err = marshaller.JSONUnmarshalWithMax([]byte(out), &aliveInfo) - s.Require().NoError(err, "Could not unmarshall the output into json - %+v", err) + s.Require().NoError(err, "Could not unmarshal the output into json - %+v", err) s.Require().True(aliveInfo.IsReady()) } @@ -40,6 +40,6 @@ func (s *AliveSuite) TestAliveYAMLOutput() { aliveInfo := &apimodels.IsAliveResponse{} err = marshaller.YAMLUnmarshalWithMax([]byte(out), &aliveInfo) - s.Require().NoError(err, "Could not unmarshall the output into yaml - %+v", out) + s.Require().NoError(err, "Could not unmarshal the output into yaml - %+v", out) s.Require().True(aliveInfo.IsReady()) } diff --git a/cmd/cli/agent/node_test.go b/cmd/cli/agent/node_test.go index 8fe7096a2b..5121e9bb65 100644 --- a/cmd/cli/agent/node_test.go +++ b/cmd/cli/agent/node_test.go @@ -28,7 +28,7 @@ func (s *NodeSuite) TestNodeJSONOutput() { nodeInfo := &models.NodeState{} err = marshaller.JSONUnmarshalWithMax([]byte(out), &nodeInfo) - s.Require().NoError(err, "Could not unmarshall the output into json - %+v", out) + s.Require().NoError(err, "Could not unmarshal the output into json - %+v", out) s.Require().Equal(s.Node.ID, nodeInfo.Info.ID(), "Node ID does not match in json.") } @@ -38,6 +38,6 @@ func (s *NodeSuite) TestNodeYAMLOutput() { nodeInfo := &models.NodeState{} err = marshaller.YAMLUnmarshalWithMax([]byte(out), &nodeInfo) - s.Require().NoError(err, "Could not unmarshall the output into yaml - %+v", out) + s.Require().NoError(err, "Could not unmarshal the output into yaml - %+v", out) s.Require().Equal(s.Node.ID, nodeInfo.Info.ID(), "Node ID does not match in yaml.") } diff --git a/cmd/cli/agent/version_test.go b/cmd/cli/agent/version_test.go index e7fe5421ef..8083ebfb36 100644 --- a/cmd/cli/agent/version_test.go +++ b/cmd/cli/agent/version_test.go @@ -45,7 +45,7 @@ func (s *VersionSuite) TestVersionJSONOutput() { expectedVersion := version.Get() printedVersion := &models.BuildVersionInfo{} err = marshaller.JSONUnmarshalWithMax([]byte(out), &printedVersion) - s.Require().NoError(err, "Could not unmarshall the output into json - %+v", out) + s.Require().NoError(err, "Could not unmarshal the output into json - %+v", out) s.Require().Equal(expectedVersion, printedVersion, "Versions do not match in json.") } @@ -58,6 +58,6 @@ func (s *VersionSuite) TestVersionYAMLOutput() { expectedVersion := version.Get() printedVersion := &models.BuildVersionInfo{} err = marshaller.YAMLUnmarshalWithMax([]byte(out), &printedVersion) - s.Require().NoError(err, "Could not unmarshall the output into yaml - %+v", out) + s.Require().NoError(err, "Could not unmarshal the output into yaml - %+v", out) s.Require().Equal(expectedVersion, printedVersion, "Versions do not match in yaml.") } diff --git a/cmd/cli/deprecated/exec.go b/cmd/cli/deprecated/exec.go new file mode 100644 index 0000000000..4fd1832eed --- /dev/null +++ b/cmd/cli/deprecated/exec.go @@ -0,0 +1,17 @@ +package deprecated + +import ( + "github.com/spf13/cobra" +) + +func NewExecCommand() *cobra.Command { + cancelCmd := &cobra.Command{ + Use: "exec", + Deprecated: "exec was an experimental feature and no longer supported", + RunE: func(cmd *cobra.Command, cmdArgs []string) error { + return nil + }, + } + + return cancelCmd +} diff --git a/cmd/cli/docker/docker_run_cli_test.go b/cmd/cli/docker/docker_run_cli_test.go index 9f5f8d2539..42ecf4dd0d 100644 --- a/cmd/cli/docker/docker_run_cli_test.go +++ b/cmd/cli/docker/docker_run_cli_test.go @@ -1,5 +1,7 @@ //go:build unit || !integration +/* spell-checker: disable */ + package docker import ( @@ -431,7 +433,7 @@ func TestJobFlagParsing(t *testing.T) { }, expectedError: false, }, - // TODO(forrest): if/when validtion on the network config is adjusted expect this test to fail. + // TODO(forrest): if/when validation on the network config is adjusted expect this test to fail. { name: "with none network and domains", flags: []string{"--network=none", "--domain=example.com", "--domain=example.io", "image:tag"}, @@ -487,30 +489,30 @@ func TestJobFlagParsing(t *testing.T) { }, { name: "with s3 publisher", - flags: []string{"--publisher=s3://mybucket/mykey", "image:tag"}, + flags: []string{"--publisher=s3://myBucket/myKey", "image:tag"}, assertJob: func(t *testing.T, j *models.Job) { defaultJobAssertions(t, j) task := j.Task() s3publisher, err := publisher_s3.DecodePublisherSpec(task.Publisher) require.NoError(t, err) assert.Equal(t, publisher_s3.PublisherSpec{ - Bucket: "mybucket", - Key: "mykey", + Bucket: "myBucket", + Key: "myKey", }, s3publisher) }, expectedError: false, }, { name: "with s3 publisher with opts", - flags: []string{"-p=s3://mybucket/mykey,opt=region=us-west-2,opt=endpoint=https://s3.custom.com", "image:tag"}, + flags: []string{"-p=s3://myBucket/myKey,opt=region=us-west-2,opt=endpoint=https://s3.custom.com", "image:tag"}, assertJob: func(t *testing.T, j *models.Job) { defaultJobAssertions(t, j) task := j.Task() s3publisher, err := publisher_s3.DecodePublisherSpec(task.Publisher) require.NoError(t, err) assert.Equal(t, publisher_s3.PublisherSpec{ - Bucket: "mybucket", - Key: "mykey", + Bucket: "myBucket", + Key: "myKey", Region: "us-west-2", Endpoint: "https://s3.custom.com", }, s3publisher) @@ -519,15 +521,15 @@ func TestJobFlagParsing(t *testing.T) { }, { name: "with s3 publisher with options", - flags: []string{"-p=s3://mybucket/mykey,option=region=us-west-2,option=endpoint=https://s3.custom.com", "image:tag"}, + flags: []string{"-p=s3://myBucket/myKey,option=region=us-west-2,option=endpoint=https://s3.custom.com", "image:tag"}, assertJob: func(t *testing.T, j *models.Job) { defaultJobAssertions(t, j) task := j.Task() s3publisher, err := publisher_s3.DecodePublisherSpec(task.Publisher) require.NoError(t, err) assert.Equal(t, publisher_s3.PublisherSpec{ - Bucket: "mybucket", - Key: "mykey", + Bucket: "myBucket", + Key: "myKey", Region: "us-west-2", Endpoint: "https://s3.custom.com", }, s3publisher) diff --git a/cmd/cli/docker/docker_run_test.go b/cmd/cli/docker/docker_run_test.go index dd0e603f7a..13c3ece935 100644 --- a/cmd/cli/docker/docker_run_test.go +++ b/cmd/cli/docker/docker_run_test.go @@ -165,12 +165,12 @@ func (s *DockerRunSuite) TestRun_SubmitUrlInputs() { {inputURL: InputURL{url: "https://raw.githubusercontent.com/bacalhau-project/bacalhau/main/main.go", pathInContainer: "/inputs", filename: "main.go", flag: "-i"}}, } - for _, turls := range testURLs { + for _, urls := range testURLs { ctx := context.Background() flagsArray := []string{"docker", "run"} - flagsArray = append(flagsArray, turls.inputURL.flag, turls.inputURL.url) - flagsArray = append(flagsArray, "ubuntu", "cat", fmt.Sprintf("%s/%s", turls.inputURL.pathInContainer, turls.inputURL.filename)) + flagsArray = append(flagsArray, urls.inputURL.flag, urls.inputURL.url) + flagsArray = append(flagsArray, "ubuntu", "cat", fmt.Sprintf("%s/%s", urls.inputURL.pathInContainer, urls.inputURL.filename)) _, out, err := s.ExecuteTestCobraCommand(flagsArray...) s.Require().NoError(err, "Error submitting job") @@ -180,8 +180,8 @@ func (s *DockerRunSuite) TestRun_SubmitUrlInputs() { s.Require().Equal(1, len(j.Task().InputSources), "Number of job urls != # of test urls.") urlSpec, err := storage_url.DecodeSpec(j.Task().InputSources[0].Source) s.Require().NoError(err) - s.Require().Equal(turls.inputURL.url, urlSpec.URL, "Test URL not equal to URL from job.") - s.Require().Equal(turls.inputURL.pathInContainer, j.Task().InputSources[0].Target, "Test Path not equal to Path from job.") + s.Require().Equal(urls.inputURL.url, urlSpec.URL, "Test URL not equal to URL from job.") + s.Require().Equal(urls.inputURL.pathInContainer, j.Task().InputSources[0].Target, "Test Path not equal to Path from job.") } } @@ -252,8 +252,8 @@ func (s *DockerRunSuite) TestRun_SubmitWorkdir() { }{ {workdir: "", errorCode: 0}, {workdir: "/", errorCode: 0}, - {workdir: "./mydir", errorCode: 1}, - {workdir: "../mydir", errorCode: 1}, + {workdir: "./myDir", errorCode: 1}, + {workdir: "../myDir", errorCode: 1}, {workdir: "http://foo.com", errorCode: 1}, {workdir: "/foo//", errorCode: 0}, // double forward slash is allowed in unix {workdir: "/foo//bar", errorCode: 0}, diff --git a/cmd/cli/exec/args.go b/cmd/cli/exec/args.go deleted file mode 100644 index b3e2df0a75..0000000000 --- a/cmd/cli/exec/args.go +++ /dev/null @@ -1,79 +0,0 @@ -package exec - -import ( - "fmt" - "strings" - - "github.com/spf13/pflag" -) - -// ExtractUnknownArgs extracts any long-form flags (--something) that are not -// currently configured for this command, they must be flags intended for the -// custom job type. -func ExtractUnknownArgs(flags *pflag.FlagSet, args []string) []string { - unknownArgs := []string{} - - for i := 0; i < len(args); i++ { - arg := args[i] - var field *pflag.Flag - - if arg[0] == '-' { - if arg[1] == '-' { - field = flags.Lookup(strings.SplitN(arg[2:], "=", 2)[0]) - } else { - for _, s := range arg[1:] { - field = flags.ShorthandLookup(string(s)) - if field == nil { - break - } - } - } - } else { - continue - } - - if field != nil { - if field.NoOptDefVal == "" && i+1 < len(args) && field.Value.String() == args[i+1] { - i++ - } - continue - } - - // Make sure we allow `--code=.` and `--code .` - if !strings.Contains(arg, "=") { - if i+1 < len(args) { - if args[i+1][0] != '-' { - arg = fmt.Sprintf("%s=%s", arg, args[i+1]) - } - } - } - - if arg == "--" { - continue - } - - unknownArgs = append(unknownArgs, arg) - } - - return unknownArgs -} - -func flagsToMap(flags []string) map[string]string { - m := make(map[string]string) - - for _, flag := range flags { - if flag == "--" { - continue // skip the user escaping the cmd args - } - - flagString := strings.TrimPrefix(flag, "-") - flagString = strings.TrimPrefix(flagString, "-") // just in case there's a second - - parts := strings.SplitN(flagString, "=", 2) - if len(parts) == 2 { - // if the flag has no value, it's probably a standalone bool - m[parts[0]] = parts[1] - } - } - - return m -} diff --git a/cmd/cli/exec/exec.go b/cmd/cli/exec/exec.go deleted file mode 100644 index ea0c9d1ac7..0000000000 --- a/cmd/cli/exec/exec.go +++ /dev/null @@ -1,277 +0,0 @@ -package exec - -import ( - "context" - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/spf13/cobra" - "gopkg.in/alessio/shellescape.v1" - "k8s.io/kubectl/pkg/util/i18n" - - "k8s.io/kubectl/pkg/util/templates" - - "github.com/bacalhau-project/bacalhau/cmd/util" - "github.com/bacalhau-project/bacalhau/cmd/util/flags/cliflags" - "github.com/bacalhau-project/bacalhau/cmd/util/hook" - "github.com/bacalhau-project/bacalhau/cmd/util/printer" - "github.com/bacalhau-project/bacalhau/pkg/lib/template" - "github.com/bacalhau-project/bacalhau/pkg/models" - "github.com/bacalhau-project/bacalhau/pkg/publicapi/apimodels" - "github.com/bacalhau-project/bacalhau/pkg/publicapi/client/v2" - "github.com/bacalhau-project/bacalhau/pkg/storage/inline" - "github.com/bacalhau-project/bacalhau/pkg/userstrings" -) - -var ( - getLong = templates.LongDesc(i18n.T( - fmt.Sprintf(`Execute a specific job type. - -Allows for the execution of a job type with the given code, -without the need to create a container, or webassembly module. -By specifying the code with the '--code' flag you can ship the code -to the cluster for execution, specified by the remainder of the -command line. See examples below. - -Supported job types: - -%s - `, supportedJobTypes()), - )) - - //nolint:lll // Documentation - getExample = templates.Examples(i18n.T(` - # Execute the app.py script with Python - bacalhau exec --code app.py python app.py - - # Run a duckdb query against a CSV file - bacalhau exec -i src=...,dst=/inputs/data.csv duckdb "select * from /inputs/data.csv" -`)) -) - -type ExecOptions struct { - JobSettings *cliflags.JobSettings - TaskSettings *cliflags.TaskSettings - RunTimeSettings *cliflags.RunTimeSettings - Code string -} - -func NewExecOptions() *ExecOptions { - return &ExecOptions{ - JobSettings: cliflags.DefaultJobSettings(), - TaskSettings: cliflags.DefaultTaskSettings(), - RunTimeSettings: cliflags.DefaultRunTimeSettings(), - } -} - -func NewCmd() *cobra.Command { - options := NewExecOptions() - return NewCmdWithOptions(options) -} - -func NewCmdWithOptions(options *ExecOptions) *cobra.Command { - execCmd := &cobra.Command{ - Use: "exec [jobtype]", - Short: "Execute a specific job type", - Long: getLong, - Example: getExample, - Args: cobra.MinimumNArgs(1), - PreRunE: hook.RemoteCmdPreRunHooks, - PostRunE: hook.RemoteCmdPostRunHooks, - FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true}, - RunE: func(cmd *cobra.Command, cmdArgs []string) error { - // Find the unknown arguments from the original args. We only want to find the - // flags that are unknown. We will only support the long form for custom - // job types as we will want to use them as keys in template completions. - unknownArgs := ExtractUnknownArgs(cmd.Flags(), os.Args[1:]) - // initialize a new or open an existing repo merging any config file(s) it contains into cfg. - cfg, err := util.SetupRepoConfig(cmd) - if err != nil { - return fmt.Errorf("failed to setup repo: %w", err) - } - // create an api client - api, err := util.GetAPIClientV2(cmd, cfg) - if err != nil { - return fmt.Errorf("failed to create api client: %w", err) - } - return exec(cmd, cmdArgs, unknownArgs, api, options) - }, - } - - cliflags.RegisterJobFlags(execCmd, options.JobSettings) - cliflags.RegisterTaskFlags(execCmd, options.TaskSettings) - - execCmd.Flags().AddFlagSet(cliflags.NewRunTimeSettingsFlags(options.RunTimeSettings)) - execCmd.Flags().StringVar(&options.Code, "code", "", "Specifies the file, or directory of code to send with the request") - - return execCmd -} - -func exec(cmd *cobra.Command, cmdArgs []string, unknownArgs []string, api client.API, options *ExecOptions) error { - job, err := PrepareJob(cmd, cmdArgs, unknownArgs, options) - if err != nil { - return err - } - - job.Normalize() - err = job.ValidateSubmission() - if err != nil { - return fmt.Errorf("%s: %w", userstrings.JobSpecBad, err) - } - - resp, err := api.Jobs().Put(cmd.Context(), &apimodels.PutJobRequest{ - Job: job, - }) - if err != nil { - return fmt.Errorf("failed request: %w", err) - } - - job.ID = resp.JobID - jobProgressPrinter := printer.NewJobProgressPrinter(api, options.RunTimeSettings) - if err := jobProgressPrinter.PrintJobProgress(cmd.Context(), job, cmd); err != nil { - return fmt.Errorf("failed to print job execution: %w", err) - } - - return nil -} - -// Provides a string to diplay the currently available job types -func supportedJobTypes() string { - tpl, _ := NewTemplateMap(embeddedFiles, "templates") - var sb strings.Builder - for _, s := range tpl.AllTemplates() { - sb.WriteString(fmt.Sprintf(" * %s\n", s)) - } - return sb.String() -} - -//nolint:funlen -func PrepareJob(cmd *cobra.Command, cmdArgs []string, unknownArgs []string, options *ExecOptions) (*models.Job, error) { - var err error - var jobType, templateString string - var job *models.Job - - // Determine the job type and lookup the template for that type. If we - // don't have a template, then we don't know how to submit that job type. - jobType = cmdArgs[0] - - for i := range cmdArgs { - // If any parameters were quoted, we should make sure we try and add - // them back in after they were stripped for us. - if strings.Contains(cmdArgs[i], " ") { - cmdArgs[i] = shellescape.Quote(cmdArgs[i]) - } - } - - tpl, err := NewTemplateMap(embeddedFiles, "templates") - if err != nil { - return nil, fmt.Errorf("failed to find supported job types, templates missing") - } - - // Get the template string, or if we can't find one for this type, then - // provide a list of ones we _do_ support. - if templateString, err = tpl.Get(jobType); err != nil { - knownTypes := tpl.AllTemplates() - - supportedTypes := "" - if len(knownTypes) > 0 { - supportedTypes = "\nSupported types:\n" - - for _, kt := range knownTypes { - supportedTypes = supportedTypes + fmt.Sprintf(" * %s\n", kt) - } - } - - return nil, fmt.Errorf("the job type '%s' is not supported."+supportedTypes, jobType) - } - - // Convert the unknown args to a map which we can use to fill in the template - replacements := flagsToMap(unknownArgs) - - parser, err := template.NewParser(template.ParserParams{ - Replacements: replacements, - }) - - if err != nil { - return nil, fmt.Errorf("failed to create %s job when parsing template: %+w", jobType, err) - } - - tplResult, err := parser.ParseBytes([]byte(templateString)) - if err != nil { - return nil, fmt.Errorf("%s: %w", userstrings.JobSpecBad, err) - } - - // tplResult is now a []byte containing json for the job we will eventually submit. - if err = json.Unmarshal(tplResult, &job); err != nil { - return nil, fmt.Errorf("%s: %w", userstrings.JobSpecBad, err) - } - - // Attach the command line arguments that were provided to exec. These are passed through - // to the template as Command/Arguments. e.g. `bacalhau exec python app.py` will set - // Command -> python, and Arguments -> ["app.py"] - job.Tasks[0].Engine.Params["Command"] = jobType - job.Tasks[0].Engine.Params["Arguments"] = cmdArgs[1:] - - // Process --code if anything was specified. In future we may want to try and determine this - // ourselves where it is not specified, but it will likely be dependent on job type. - if options.Code != "" { - if err = addInlineContent(cmd.Context(), options.Code, job); err != nil { - return nil, err - } - } - - job.Labels, err = options.JobSettings.Labels() - job.Task().Publisher = options.TaskSettings.Publisher.Value() - job.Task().ResultPaths = options.TaskSettings.ResultPaths - job.Task().Env = options.TaskSettings.EnvironmentVariables - job.Task().InputSources = options.TaskSettings.InputSources.Values() - if err != nil { - return nil, fmt.Errorf("parsing job labes: %w", err) - } - job.Constraints, err = options.JobSettings.Constraints() - if err != nil { - return nil, fmt.Errorf("parsing job constraints: %w", err) - } - - // Set the execution timeouts - job.Tasks[0].Timeouts = &models.TimeoutConfig{ - TotalTimeout: options.TaskSettings.Timeout, - } - - return job, nil -} - -// addInlineContent will use codeLocation to determine if it is a single file or a -// directory and will attach to the job as an inline attachment. -func addInlineContent(ctx context.Context, codeLocation string, job *models.Job) error { - absPath, err := filepath.Abs(codeLocation) - if err != nil { - return err - } - - target := "/code" - - if finfo, err := os.Stat(absPath); err != nil { - return fmt.Errorf("file '%s' not found", codeLocation) - } else { - if !finfo.IsDir() { - target = fmt.Sprintf("/code/%s", finfo.Name()) - } - } - - specConfig, err := inline.NewStorage().Upload(ctx, absPath) - if err != nil { - return fmt.Errorf("failed to attach code '%s' to job submission: %w", codeLocation, err) - } - - job.Tasks[0].InputSources = append(job.Tasks[0].InputSources, &models.InputSource{ - Source: &specConfig, - Alias: "code", - Target: target, - }) - - return nil -} diff --git a/cmd/cli/exec/exec_test.go b/cmd/cli/exec/exec_test.go deleted file mode 100644 index 608d3f05a5..0000000000 --- a/cmd/cli/exec/exec_test.go +++ /dev/null @@ -1,153 +0,0 @@ -//go:build unit || !integration - -package exec_test - -import ( - "testing" - - "github.com/spf13/cobra" - "github.com/stretchr/testify/suite" - - "github.com/bacalhau-project/bacalhau/cmd/cli/exec" - "github.com/bacalhau-project/bacalhau/pkg/models" -) - -type ExecSuite struct { - suite.Suite -} - -// In order for 'go test' to run this suite, we need to create -// a normal test function and pass our suite to suite.Run -func TestExecSuite(t *testing.T) { - suite.Run(t, new(ExecSuite)) -} - -type testCase struct { - name string - cmdLine []string - expectedUnknownArgs []string - expectedErrMsg string - jobCommand string - jobArguments []string - numInlinedAttachments int - numTotalAttachments int -} - -var testcases []testCase = []testCase{ - { - // bacalhau exec ruby -e "puts 'hello'" - name: "no ruby here", - cmdLine: []string{"ruby", "-e", "\"puts 'helllo'\""}, - expectedUnknownArgs: []string{}, - expectedErrMsg: "the job type 'ruby' is not supported", - }, - { - // bacalhau exec python --version=3.10 -- -c "import this" - name: "zen of python", - cmdLine: []string{"python", "--version=3.10", "--", "-c", "import this"}, - expectedUnknownArgs: []string{"--version=3.10", "-c=import this"}, - expectedErrMsg: "", - jobCommand: "python", - jobArguments: []string{"-c", "'import this'"}, - numInlinedAttachments: 0, - numTotalAttachments: 0, - }, - { - // bacalhau exec -i src=http://127.0.0.1/test.csv,dst=/inputs/test.csv python app.py - name: "run a python app", - cmdLine: []string{"-i", "src=http://127.0.0.1/test.csv,dst=/inputs/test.csv", "python", "app.py", "-x"}, - expectedUnknownArgs: []string{"-x"}, - expectedErrMsg: "", - jobCommand: "python", - jobArguments: []string{"app.py"}, - numInlinedAttachments: 0, - numTotalAttachments: 1, - }, - { - // bacalhau exec -i src=http://127.0.0.1/test.csv,dst=/inputs/test.csv python app.py - name: "run a python app with some inputs", - cmdLine: []string{"-i", "src=http://127.0.0.1/test.csv,dst=/inputs/test.csv", "python", "app.py", "/inputs/test.csv"}, - expectedUnknownArgs: []string{}, - expectedErrMsg: "", - jobCommand: "python", - jobArguments: []string{"app.py", "/inputs/test.csv"}, - numInlinedAttachments: 0, - numTotalAttachments: 1, - }, - { - // bacalhau exec -i src=http://127.0.0.1/test.csv,dst=/inputs/test.csv python app.py --code main.go - name: "run a python app with a local file", - cmdLine: []string{"-i", "src=http://127.0.0.1/test.csv,dst=/inputs/test.csv", "python", "app.py", "--code=exec_test.go"}, - expectedUnknownArgs: []string{}, - expectedErrMsg: "", - jobCommand: "python", - jobArguments: []string{"app.py"}, - numInlinedAttachments: 1, - numTotalAttachments: 2, - }, - { - // bacalhau exec -i src=http://127.0.0.1/test.csv,dst=/inputs/test.csv duckdb "select * from /inputs/test.csv" - name: "duckdb", - cmdLine: []string{"-i", "src=http://127.0.0.1/test.csv,dst=/inputs/test.csv", "duckdb", "select * from /inputs/test.csv"}, - expectedUnknownArgs: []string{}, - expectedErrMsg: "", - jobCommand: "duckdb", - jobArguments: []string{"'select * from /inputs/test.csv'"}, - numInlinedAttachments: 0, - numTotalAttachments: 1, - }, -} - -func (s *ExecSuite) TestJobPreparation() { - for _, tc := range testcases { - s.Run(tc.name, func() { - options := exec.NewExecOptions() - cmd := exec.NewCmdWithOptions(options) - - testCaseF := s.testFuncForTestCase(tc) - - cmd.PreRunE = nil - cmd.PostRunE = nil - cmd.Run = func(cmd *cobra.Command, cmdArgs []string) { - unknownArgs := exec.ExtractUnknownArgs(cmd.Flags(), tc.cmdLine) - s.Require().Equal(tc.expectedUnknownArgs, unknownArgs) - - job, err := exec.PrepareJob(cmd, cmdArgs, unknownArgs, options) - _ = testCaseF(job, err) - } - - cmd.SetArgs(tc.cmdLine) - cmd.Execute() - }) - } - -} - -func (s *ExecSuite) testFuncForTestCase(tc testCase) func(*models.Job, error) bool { - return func(job *models.Job, err error) bool { - if tc.expectedErrMsg == "" { - s.Require().NoError(err) - } else { - s.Require().Error(err) - s.Require().Contains(err.Error(), tc.expectedErrMsg) - return false - } - - task := job.Task() - - s.Require().Equal(tc.jobCommand, task.Engine.Params["Command"], "command is incorrect") - s.Require().Equal(tc.jobArguments, task.Engine.Params["Arguments"], "arguments are incorrect") - - var inlineCount = 0 - for _, src := range task.InputSources { - if src.Source.Type == "inline" { - inlineCount += 1 - } - } - - s.Require().Equal(tc.numInlinedAttachments, inlineCount, "wrong number of inline attachments") - s.Require().Equal(tc.numTotalAttachments, len(task.InputSources), "wrong number of input sources") - - return true - } -} diff --git a/cmd/cli/exec/templates.go b/cmd/cli/exec/templates.go deleted file mode 100644 index fb2f286acd..0000000000 --- a/cmd/cli/exec/templates.go +++ /dev/null @@ -1,77 +0,0 @@ -package exec - -import ( - "bufio" - "embed" - "fmt" - "io" - "io/fs" - "path" - "path/filepath" - "strings" - - "golang.org/x/exp/maps" -) - -//go:embed templates/*.tpl -var embeddedFiles embed.FS - -func ErrUnknownTemplate(name string) error { - return fmt.Errorf("unknown template specified: %s", name) -} - -type TemplateMap struct { - m map[string]string -} - -func NewTemplateMap(fsys fs.ReadDirFS, tplPath string) (*TemplateMap, error) { - entries, err := fsys.ReadDir(tplPath) - if err != nil { - return nil, err - } - - tpl := &TemplateMap{ - m: make(map[string]string), - } - - for _, entry := range entries { - if entry.IsDir() { - continue - } - - name := nameFromFile(entry.Name()) - - fd, err := fsys.Open(path.Join(tplPath, entry.Name())) - if err != nil { - return nil, err - } - defer fd.Close() - - reader := bufio.NewReader(fd) - data, err := io.ReadAll(reader) - if err != nil { - return nil, err - } - - tpl.m[strings.ToLower(name)] = string(data) - } - - return tpl, nil -} - -func (t *TemplateMap) Get(name string) (string, error) { - tpl, found := t.m[strings.ToLower(name)] - if !found { - return "", ErrUnknownTemplate(name) - } - - return tpl, nil -} - -func (t *TemplateMap) AllTemplates() []string { - return maps.Keys(t.m) -} - -func nameFromFile(filename string) string { - return strings.TrimSuffix(filename, filepath.Ext(filename)) -} diff --git a/cmd/cli/exec/templates/duckdb.tpl b/cmd/cli/exec/templates/duckdb.tpl deleted file mode 100644 index 0df78d4ac8..0000000000 --- a/cmd/cli/exec/templates/duckdb.tpl +++ /dev/null @@ -1,15 +0,0 @@ -{ - "Name": "DuckDB", - "Namespace": "default", - "Type": "batch", - "Count": 1, - "Tasks": [ - { - "Name": "execute", - "Engine": { - "Type": "duckdb", - "Params": {} - } - } - ] -} diff --git a/cmd/cli/exec/templates/python.tpl b/cmd/cli/exec/templates/python.tpl deleted file mode 100644 index a930570ab2..0000000000 --- a/cmd/cli/exec/templates/python.tpl +++ /dev/null @@ -1,17 +0,0 @@ -{ - "Name": "Python", - "Namespace": "default", - "Type": "batch", - "Count": 1, - "Tasks": [ - { - "Name": "execute", - "Engine": { - "Type": "python", - "Params": { - "Version": "{{or (index . "version") "3.11"}}" - } - } - } - ] -} diff --git a/cmd/cli/helpers/helpers.go b/cmd/cli/helpers/helpers.go index 1f639a074c..487d2b3d9e 100644 --- a/cmd/cli/helpers/helpers.go +++ b/cmd/cli/helpers/helpers.go @@ -62,7 +62,7 @@ func BuildJobFromFlags( labels, err := jobSettings.Labels() if err != nil { - return nil, fmt.Errorf("receieved invalid job labels: %w", err) + return nil, fmt.Errorf("received invalid job labels: %w", err) } job := &models.Job{ Name: jobSettings.Name(), diff --git a/cmd/cli/root.go b/cmd/cli/root.go index 8df28fa5ee..39c5ac8722 100644 --- a/cmd/cli/root.go +++ b/cmd/cli/root.go @@ -15,7 +15,6 @@ import ( "github.com/bacalhau-project/bacalhau/cmd/cli/deprecated" "github.com/bacalhau-project/bacalhau/cmd/cli/devstack" "github.com/bacalhau-project/bacalhau/cmd/cli/docker" - "github.com/bacalhau-project/bacalhau/cmd/cli/exec" "github.com/bacalhau-project/bacalhau/cmd/cli/job" "github.com/bacalhau-project/bacalhau/cmd/cli/node" "github.com/bacalhau-project/bacalhau/cmd/cli/serve" @@ -101,7 +100,6 @@ func NewRootCmd() *cobra.Command { configcli.NewCmd(), devstack.NewCmd(), docker.NewCmd(), - exec.NewCmd(), job.NewCmd(), node.NewCmd(), serve.NewCmd(), @@ -109,6 +107,7 @@ func NewRootCmd() *cobra.Command { wasm.NewCmd(), // deprecated command + deprecated.NewExecCommand(), deprecated.NewCancelCmd(), deprecated.NewCreateCmd(), deprecated.NewDescribeCmd(), diff --git a/cmd/cli/serve/serve.go b/cmd/cli/serve/serve.go index ea6edf6ccd..33d679c8fd 100644 --- a/cmd/cli/serve/serve.go +++ b/cmd/cli/serve/serve.go @@ -278,12 +278,12 @@ func parseServerAPIHost(host string) (string, error) { // We should check that the value gives us an address type // we can use to get our IP address. If it doesn't, we should // panic. - atype, ok := network.AddressTypeFromString(host) + addrType, ok := network.AddressTypeFromString(host) if !ok { return "", fmt.Errorf("invalid address type in Server API Host config: %s", host) } - addr, err := network.GetNetworkAddress(atype, network.AllAddresses) + addr, err := network.GetNetworkAddress(addrType, network.AllAddresses) if err != nil { return "", fmt.Errorf("failed to get network address for Server API Host: %s: %w", host, err) } diff --git a/cmd/cli/version/version.go b/cmd/cli/version/version.go index 9e53db306e..a2ce1e1dc0 100644 --- a/cmd/cli/version/version.go +++ b/cmd/cli/version/version.go @@ -128,10 +128,10 @@ func (oV *VersionOptions) Run( } else { // NB(forrest): since `GetAllVersions` is an API call - in the event the server is un-reachable // we timeout after 3 seconds to avoid waiting on an unavailable server to return its version information. - vctx, cancel := context.WithTimeout(ctx, time.Second*3) + vCtx, cancel := context.WithTimeout(ctx, time.Second*3) defer cancel() var err error - versions, err = util.GetAllVersions(vctx, cfg, api, r) + versions, err = util.GetAllVersions(vCtx, cfg, api, r) if err != nil { // No error on fail of version check. Just print as much as we can. log.Ctx(ctx).Warn().Err(err).Msg("failed to get updated versions") diff --git a/cmd/cli/version/version_test.go b/cmd/cli/version/version_test.go index a3d127e593..afbb2078c7 100644 --- a/cmd/cli/version/version_test.go +++ b/cmd/cli/version/version_test.go @@ -18,11 +18,12 @@ package version_test import ( "testing" - "github.com/bacalhau-project/bacalhau/cmd/util" - "github.com/bacalhau-project/bacalhau/pkg/lib/marshaller" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "github.com/bacalhau-project/bacalhau/cmd/util" + "github.com/bacalhau-project/bacalhau/pkg/lib/marshaller" + cmdtesting "github.com/bacalhau-project/bacalhau/cmd/testing" "github.com/bacalhau-project/bacalhau/cmd/util/output" ) @@ -51,7 +52,7 @@ func (suite *VersionSuite) TestVersionJSONOutput() { jsonDoc := &util.Versions{} err = marshaller.JSONUnmarshalWithMax([]byte(out), &jsonDoc) - require.NoError(suite.T(), err, "Could not unmarshall the output into json - %+v", err) + require.NoError(suite.T(), err, "Could not unmarshal the output into json - %+v", err) require.Equal(suite.T(), jsonDoc.ClientVersion.GitCommit, jsonDoc.ServerVersion.GitCommit, "Client and Server do not match in json.") } @@ -61,7 +62,7 @@ func (suite *VersionSuite) TestVersionYAMLOutput() { yamlDoc := &util.Versions{} err = marshaller.YAMLUnmarshalWithMax([]byte(out), &yamlDoc) - require.NoError(suite.T(), err, "Could not unmarshall the output into yaml - %+v", err) + require.NoError(suite.T(), err, "Could not unmarshal the output into yaml - %+v", err) require.Equal(suite.T(), yamlDoc.ClientVersion.GitCommit, yamlDoc.ServerVersion.GitCommit, "Client and Server do not match in yaml.") } diff --git a/cmd/cli/wasm/wasm_run.go b/cmd/cli/wasm/wasm_run.go index 5f4e06e206..29e07e23af 100644 --- a/cmd/cli/wasm/wasm_run.go +++ b/cmd/cli/wasm/wasm_run.go @@ -191,7 +191,7 @@ func build(ctx context.Context, args []string, opts *WasmRunOptions) (*models.Jo if err != nil { return nil, err } - envar, err := parse.StringSliceToMap(opts.EnvironmentVariables) + envVar, err := parse.StringSliceToMap(opts.EnvironmentVariables) if err != nil { return nil, fmt.Errorf("wasm env vars invalid: %w", err) } @@ -199,7 +199,7 @@ func build(ctx context.Context, args []string, opts *WasmRunOptions) (*models.Jo WithParameters(args[1:]...). WithEntrypoint(opts.Entrypoint). WithImportModules(opts.ImportModules). - WithEnvironmentVariables(envar). + WithEnvironmentVariables(envVar). Build() if err != nil { return nil, err diff --git a/cmd/testing/base.go b/cmd/testing/base.go index 304e1a005d..655834547a 100644 --- a/cmd/testing/base.go +++ b/cmd/testing/base.go @@ -112,7 +112,7 @@ func (s *BaseSuite) ExecuteTestCobraCommandWithStdin(stdin io.Reader, args ...st buf := new(bytes.Buffer) root := cli.NewRootCmd() root.SetOut(buf) - // TODO(forrest): we should separate the ouputs from a command into different buffers for stderr and sdtout, otherwise + // TODO(forrest): we should separate the outputs from a command into different buffers for stderr and stdout, otherwise // log lines and other outputs (like the update checker) will be included in the returned buffer, and commands // that make assertions on the output containing specific values, or being marshaller-able to yaml will fail. root.SetErr(buf) diff --git a/cmd/util/flags/cliflags/job.go b/cmd/util/flags/cliflags/job.go index f9f4a55b23..b9e89aed8a 100644 --- a/cmd/util/flags/cliflags/job.go +++ b/cmd/util/flags/cliflags/job.go @@ -71,7 +71,7 @@ func (j *JobSettings) Constraints() ([]*models.LabelSelectorRequirement, error) } // TODO(forrest): based on a conversation with walid we should be returning an error here if at anypoint if a label -// if provided that is invalid. We cannont remove them as we did previously. +// if provided that is invalid. We cannot remove them as we did previously. func (j *JobSettings) Labels() (map[string]string, error) { parsedLabels := make(map[string]string) rawLabels := j.labels diff --git a/cmd/util/flags/configflags/job_translation.go b/cmd/util/flags/configflags/job_translation.go index 050547ce4a..1c1bc82acb 100644 --- a/cmd/util/flags/configflags/job_translation.go +++ b/cmd/util/flags/configflags/job_translation.go @@ -1,18 +1,15 @@ package configflags import ( - "github.com/bacalhau-project/bacalhau/pkg/config" - "github.com/bacalhau-project/bacalhau/pkg/config/types" + legacy_types "github.com/bacalhau-project/bacalhau/pkg/config_legacy/types" ) var JobTranslationFlags = []Definition{ { - FlagName: "requester-job-translation-enabled", - ConfigPath: types.FeatureFlagsExecTranslationKey, - DefaultValue: config.Default.FeatureFlags.ExecTranslation, - Description: `Whether jobs should be translated at the requester node or not. Default: false`, - Deprecated: true, - EnvironmentVariables: []string{"BACALHAU_NODE_REQUESTER_TRANSLATIONENABLED"}, - DeprecatedMessage: makeDeprecationMessage(types.FeatureFlagsExecTranslationKey), + FlagName: "requester-job-translation-enabled", + ConfigPath: legacy_types.NodeRequesterTranslationEnabled, + DefaultValue: false, + Deprecated: true, + DeprecatedMessage: "job translation was an experimental feature and is no longer supported", }, } diff --git a/cmd/util/flags/configflags/register.go b/cmd/util/flags/configflags/register.go index b46a8d1fb8..550b2baefa 100644 --- a/cmd/util/flags/configflags/register.go +++ b/cmd/util/flags/configflags/register.go @@ -46,7 +46,7 @@ func BindFlags(v *viper.Viper, register map[string][]Definition) error { for _, def := range defs { // sanity check to ensure we are not binding a config key on more than one flag. if dup, ok := seen[def.ConfigPath]; ok && !def.Deprecated { - return fmt.Errorf("DEVELOPER ERROR: duplicate regsistration of config key %s for flag %s"+ + return fmt.Errorf("DEVELOPER ERROR: duplicate registration of config key %s for flag %s"+ " previously registered on on flag %s", def.ConfigPath, def.FlagName, dup.FlagName) } if !def.Deprecated { @@ -79,43 +79,43 @@ func PreRun(v *viper.Viper, flags map[string][]Definition) func(*cobra.Command, // This method should be called before the command runs to register flags accordingly. func RegisterFlags(cmd *cobra.Command, register map[string][]Definition) error { for name, defs := range register { - fset := pflag.NewFlagSet(name, pflag.ContinueOnError) + flagSet := pflag.NewFlagSet(name, pflag.ContinueOnError) // Determine the type of the default value for _, def := range defs { switch v := def.DefaultValue.(type) { case int: - fset.Int(def.FlagName, v, def.Description) + flagSet.Int(def.FlagName, v, def.Description) case uint64: - fset.Uint64(def.FlagName, v, def.Description) + flagSet.Uint64(def.FlagName, v, def.Description) case bool: - fset.Bool(def.FlagName, v, def.Description) + flagSet.Bool(def.FlagName, v, def.Description) case string: - fset.String(def.FlagName, v, def.Description) + flagSet.String(def.FlagName, v, def.Description) case []string: - fset.StringSlice(def.FlagName, v, def.Description) + flagSet.StringSlice(def.FlagName, v, def.Description) case map[string]string: - fset.StringToString(def.FlagName, v, def.Description) + flagSet.StringToString(def.FlagName, v, def.Description) case models.JobSelectionDataLocality: - fset.Var(flags.DataLocalityFlag(&v), def.FlagName, def.Description) + flagSet.Var(flags.DataLocalityFlag(&v), def.FlagName, def.Description) case logger.LogMode: - fset.Var(flags.LoggingFlag(&v), def.FlagName, def.Description) + flagSet.Var(flags.LoggingFlag(&v), def.FlagName, def.Description) case time.Duration: - fset.DurationVar(&v, def.FlagName, v, def.Description) + flagSet.DurationVar(&v, def.FlagName, v, def.Description) case types.Duration: - fset.DurationVar((*time.Duration)(&v), def.FlagName, time.Duration(v), def.Description) + flagSet.DurationVar((*time.Duration)(&v), def.FlagName, time.Duration(v), def.Description) case types.ResourceType: - fset.String(def.FlagName, string(v), def.Description) + flagSet.String(def.FlagName, string(v), def.Description) default: return fmt.Errorf("unhandled type: %T for flag %s", v, def.FlagName) } if def.Deprecated { - flag := fset.Lookup(def.FlagName) + flag := flagSet.Lookup(def.FlagName) flag.Deprecated = def.DeprecatedMessage flag.Hidden = true } } - cmd.PersistentFlags().AddFlagSet(fset) + cmd.PersistentFlags().AddFlagSet(flagSet) } return nil } diff --git a/cmd/util/opts/storage_specconfig.go b/cmd/util/opts/storage_specconfig.go index bb6715b1f0..8c4ee7c489 100644 --- a/cmd/util/opts/storage_specconfig.go +++ b/cmd/util/opts/storage_specconfig.go @@ -73,7 +73,7 @@ func (o *StorageSpecConfigOpt) Set(value string) error { options[k] = v } default: - return fmt.Errorf("unpexted key %s in field %s", key, field) + return fmt.Errorf("unexpected key %s in field %s", key, field) } } alias := sourceURI diff --git a/cmd/util/tokens.go b/cmd/util/tokens.go index 545ed244fc..3c46c0161b 100644 --- a/cmd/util/tokens.go +++ b/cmd/util/tokens.go @@ -46,7 +46,7 @@ func writeTokens(path string, t tokens) error { return json.NewEncoder(file).Encode(t) } -// Read the authorization crdential associated with the passed API base URL. If +// Read the authorization credentials associated with the passed API base URL. If // there is no credential currently stored, ReadToken will return nil with no // error. func ReadToken(path string, apiURL string) (*apimodels.HTTPCredential, error) { diff --git a/cspell.json b/cspell.json deleted file mode 100644 index acd34f1354..0000000000 --- a/cspell.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "version": "0.2", - "ignorePaths": [], - "dictionaryDefinitions": [], - "dictionaries": [], - "ignoreWords": [], - "import": [".cspell-code.json"] -} diff --git a/cspell.yaml b/cspell.yaml new file mode 100644 index 0000000000..274eb310ff --- /dev/null +++ b/cspell.yaml @@ -0,0 +1,62 @@ +version: '0.2' +language: en +allowCompoundWords: true + +# Dictionary configurations +dictionaryDefinitions: + - name: custom-dictionary + path: ./.cspell/custom-dictionary.txt + addWords: true + +dictionaries: + # General dictionaries + - en + - en-gb + # Programming language-specific dictionaries + - python + - golang + - typescript + - node + - html + - css + - cpp + # Technology-specific dictionaries + - k8s + - terraform + # Custom dictionaries + - custom-words + - custom-dictionary + +# Paths to ignore +ignorePaths: + - python/mkdocs.yml + - webui/build + - webui/node_modules + - webui/lib/api/generated/** + +# Patterns to ignore +ignoreRegExpList: + # Internet and email + - Urls + - Email + # Cryptography and security + - RsaCert + - SshRsa + - SHA + # Encoding + - Base64 + - Base64MultiLine + - Base64SingleLine + - HexDigits + # Programming-related + - CommitHash + - CommitHashLink + - CStyleHexValue + - CSSHexValue + - EscapedUnicodeCharacters + - EscapeCharacters + - HashStrings + - UnicodeRef + - UUID + # Custom patterns + - /github.com.*/ diff --git a/docker/custom-job-images/Makefile b/docker/custom-job-images/Makefile deleted file mode 100644 index a65e73cd2a..0000000000 --- a/docker/custom-job-images/Makefile +++ /dev/null @@ -1,25 +0,0 @@ -VERSION ?= 0.1 - -local: python-local duckdb-local - -build: python duckdb - -python: - @$(MAKE) -C python build - -python-local: - @$(MAKE) -C python local - -duckdb: - @$(MAKE) -C duckdb build - -duckdb-local: - @$(MAKE) -C duckdb local - -.PHONY: local python duckdb - - -python-test: - docker run --rm -it -v $(shell pwd)/python/test/single-file:/code bacalhauproject/exec-python-3.11:0.5 python /build/launcher.py -- python hello.py - docker run --rm -it -v $(shell pwd)/python/test/multi-file-reqtxt:/code bacalhauproject/exec-python-3.11:0.5 python /build/launcher.py -- python main.py - docker run --rm -it -v $(shell pwd)/python/test/multi-file-poetry:/code bacalhauproject/exec-python-3.11:0.5 python /build/launcher.py -- poetry run mfp diff --git a/docker/custom-job-images/README.md b/docker/custom-job-images/README.md deleted file mode 100644 index 298a5d385b..0000000000 --- a/docker/custom-job-images/README.md +++ /dev/null @@ -1,58 +0,0 @@ -# Custom Job Images - -This directory contains docker images used by the default custom job types, duckdb and python. -These images are used in the translation layer at the orchestrator, where custom job types are -converted into jobs for one of our supported execution environments (as of 1.2 this is docker -and wasm). - -These images make up a bundle that makes up 'custom job types', alongside the translation layer (that converts a 'python' job to a 'docker' job), and the template available to the CLI. - -## Images - -### Python - 3.11 - -`exec-python-3.11` provides a Python image with access to Python 3.11, build-essentials, and -a default set of installed requirements. To add more default requirements, add them to [python/base_requirements.txt](python/base_requirements.txt). - -The image expects a tgz to be mounted at /code from where there build/launcher.py process will: - -* Extract it -* Determine requirements method -* Install requirements -* Execute the command provided by the user - -If an /outputs folder exists, the stdout/stderr from the requirements installation process is written to /outputs/requirements.log for debugging. - -### DuckDB - -`exec-duckdb` provides an installation of duckdb installed in the image root folder. With appropriately mounted inputs, the user is able to specify all of the required parameters for running duckdb tasks (e.g. -csv -c "query") - -## Building - -Each image has two commands, `build` and `local`. - -`local` will build the image, and install it into the local docker engine allow for it to be used on the local machine. - -`build` will build the image and push it to docker hub. - -To use these tasks from the current folder, you can use: - -```shell -make python-local -make duckdb-local - -make python-build -make duckdb-build -``` - - -## Build problems? - -The makefiles provided attempt cross platform builds so that we are able to build on arm64 machines to be executed on amd64 machines. Depending on your setup, this may occasionally show the following error. - -``` -ERROR: Multiple platforms feature is currently not supported for docker driver. -Please switch to a different driver (eg. "docker buildx create --use") -``` - -following the instructions given when you run `docker buildx create --use` should get you building again. diff --git a/docker/custom-job-images/duckdb/Dockerfile b/docker/custom-job-images/duckdb/Dockerfile deleted file mode 100644 index 9b1f1ed0a0..0000000000 --- a/docker/custom-job-images/duckdb/Dockerfile +++ /dev/null @@ -1,17 +0,0 @@ -FROM --platform=$TARGETPLATFORM ubuntu:noble-20231126.1 -ARG TARGETPLATFORM -RUN echo "I am building for $TARGETPLATFORM" > /log - -RUN apt update && apt -yq upgrade && apt -yq install wget unzip -RUN if [ $TARGETPLATFORM = 'linux/arm64' ]; then \ - wget -O /tmp/ddb.zip "https://github.com/duckdb/duckdb/releases/download/v0.9.2/duckdb_cli-linux-aarch64.zip"; \ - else \ - wget -O /tmp/ddb.zip "https://github.com/duckdb/duckdb/releases/download/v0.9.2/duckdb_cli-linux-amd64.zip"; \ - fi -RUN unzip /tmp/ddb.zip -d /usr/local/bin - -LABEL org.opencontainers.image.source https://github.com/bacalhau-project/bacalhau-images -LABEL org.opencontainers.image.title "Bacalhau custom jobtype - Duckdb" -LABEL org.opencontainers.image.description "Duckdb for the bacalhau custom job type" -LABEL org.opencontainers.image.licenses Apache-2.0 -LABEL org.opencontainers.image.url https://bacalhau.org diff --git a/docker/custom-job-images/duckdb/Makefile b/docker/custom-job-images/duckdb/Makefile deleted file mode 100644 index 031e7852a5..0000000000 --- a/docker/custom-job-images/duckdb/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -MACHINE = $(shell uname -m) -USERNAME ?= bacalhauproject -VERSION ?= 0.2 - -ifeq ($(MACHINE),x86_64) - MACHINE := amd64 -endif - -local: - @echo - Building local duckdb $(VERSION) - docker buildx build \ - --platform linux/$(MACHINE) \ - --label org.opencontainers.artifact.created=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - -t $(USERNAME)/exec-duckdb:$(VERSION) --load . - -build: - @echo - Building duckdb $(VERSION) - docker buildx build \ - --platform linux/amd64,linux/arm64 \ - --label org.opencontainers.artifact.created=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - -t $(USERNAME)/exec-duckdb:$(VERSION) --push . - -.PHONY: build local diff --git a/docker/custom-job-images/python/Dockerfile b/docker/custom-job-images/python/Dockerfile deleted file mode 100644 index b907ef56ed..0000000000 --- a/docker/custom-job-images/python/Dockerfile +++ /dev/null @@ -1,39 +0,0 @@ -FROM --platform=$TARGETPLATFORM python:3.11.7-bullseye - -RUN mkdir /build -WORKDIR /build - -RUN apt-get -yq update -RUN DEBIAN_FRONTEND=noninteractive apt-get install -y \ - make \ - build-essential \ - libssl-dev \ - zlib1g-dev \ - libbz2-dev \ - libreadline-dev \ - libsqlite3-dev \ - wget \ - curl \ - llvm \ - libncurses5-dev \ - libncursesw5-dev \ - xz-utils \ - tk-dev \ - libffi-dev \ - liblzma-dev \ - git - -RUN python -mpip install --upgrade pip -RUN python -mpip install poetry - -COPY base_requirements.txt /build -RUN python -mpip install -r /build/base_requirements.txt - -COPY launcher.py /build -CMD ["/build/launcher.py"] - -LABEL org.opencontainers.image.source https://github.com/bacalhau-project/bacalhau-images -LABEL org.opencontainers.image.title "Bacalhau custom jobtype - Python 3.11" -LABEL org.opencontainers.image.description "Python for the bacalhau custom job type" -LABEL org.opencontainers.image.licenses Apache-2.0 -LABEL org.opencontainers.image.url https://bacalhau.org diff --git a/docker/custom-job-images/python/Makefile b/docker/custom-job-images/python/Makefile deleted file mode 100644 index 7798374acd..0000000000 --- a/docker/custom-job-images/python/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -MACHINE = $(shell uname -m) -USERNAME ?= bacalhauproject -VERSION ?= 0.5 - -ifeq ($(MACHINE),x86_64) - MACHINE := amd64 -endif - -local: - @echo - Building local python $(VERSION) - $(MACHINE) - docker buildx build \ - --platform linux/$(MACHINE) \ - -t $(USERNAME)/exec-python-3.11:$(VERSION) \ - --label org.opencontainers.artifact.created=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --load . - -build: - @echo - Building python $(VERSION) - docker buildx build \ - --platform linux/amd64,linux/arm64 \ - -t $(USERNAME)/exec-python-3.11:$(VERSION) \ - --label org.opencontainers.artifact.created=$(shell date -u +"%Y-%m-%dT%H:%M:%SZ") \ - --push . - - -.PHONY: build local diff --git a/docker/custom-job-images/python/base_requirements.txt b/docker/custom-job-images/python/base_requirements.txt deleted file mode 100644 index ac261435fe..0000000000 --- a/docker/custom-job-images/python/base_requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -# To have some dependencies pre-installed (at docker build time) add them -# to this requirements file. -pandas==2.1 -polar -requests diff --git a/docker/custom-job-images/python/launcher.py b/docker/custom-job-images/python/launcher.py deleted file mode 100755 index bd9fafe623..0000000000 --- a/docker/custom-job-images/python/launcher.py +++ /dev/null @@ -1,181 +0,0 @@ -#!/usr/bin/env python3 -import ast -import os -import shutil -import subprocess -import sys -from glob import glob - -IGNORE = ( - "*.pyc", - ".DS_Store", - "__pycache__", -) - -CODE_DIR = "/code" # The mounted code folder -OUTPUT_DIR = "/outputs" # The output folder - - -def main(): - working_dir = "/app" # Created by the shutil.copytree - - # it's possible we haven't been sent any code (and we're running via -c) - # so let's support not sending code. - if os.path.exists(CODE_DIR): - # Unpack the contents of /code to the working directory which - # will create that working_directory, ignoring the files that - # match the globs in IGNORE - ignore_pattern = shutil.ignore_patterns(*IGNORE) - shutil.copytree(CODE_DIR, working_dir, ignore=ignore_pattern) - os.chdir(working_dir) - - # The inline attachments will have adding the last part of the - # path when adding a directory, and so WORKING_DIR won't contain - # the code, it'll contain that directory. In these cases we'll - # just change the WORKING_DIR. - wd_list = os.listdir(working_dir) - if len(wd_list) == 1: - pth = os.path.join(working_dir, wd_list[0]) - if os.path.isdir(pth): - working_dir = pth - - # Figure out how to install requirements - for f in ( - single_file, - pyproject, - requirements_txt, - setup_py, - ): - if f(working_dir): - break - else: - # We will use the current directory as the working directory as - # we won't have created /app with the copy - working_dir = os.curdir - - # Run the program in that working directory - past = False - args = [] - for a in sys.argv: - if past: - args.append(a) - if a == "--": - past = True - - cmd = " ".join(args) - _ = subprocess.run(cmd, capture_output=False, shell=True, cwd=working_dir) - - -def to_requirements_log(stdoutBytes, stderrBytes): - if os.path.exists(OUTPUT_DIR): - name = os.path.join(OUTPUT_DIR, "requirements.log") - with open(name, "w") as f: - f.write("================================== STDOUT\n") - f.write(stdoutBytes.decode("utf-8")) - f.write("\n================================== STDERR\n") - f.write(stderrBytes.decode("utf-8")) - - -def single_file(working_dir): - """ - If we only find a single file ready to be deployed, we'll read pip install instructions - from the module doc (if it exists). - """ - installed = 0 - doclines = [] - files = glob("*.py", root_dir=working_dir) - - if len(files) == 1: - with open(os.path.join(working_dir, files[0])) as f: - mod = ast.parse(f.read()) - if not mod: - return False - - doc = ast.get_docstring(mod) - if not doc: - return False - - doclines = doc.split("\n") - - for line in doclines: - line = line.strip() - if line.startswith("pip"): - proc = subprocess.run( - f"python -m{line}", capture_output=True, shell=True, cwd=working_dir - ) - to_requirements_log(proc.stdout, proc.stderr) - - installed = installed + 1 - - return installed > 0 - - -def pyproject(working_dir): - """ - If there is a pyproject.toml we'll check to see if it is a poetry app, and if - so then we will get poetry to install dependencies. If not then we will attempt - to pip install them. - """ - pth = os.path.join(working_dir, "pyproject.toml") - if not os.path.exists(pth): - return False - - is_poetry = False - - with open(pth) as f: - contents = f.read() - is_poetry = "[tool.poetry]" in contents - - cmd = "poetry install" - if not is_poetry: - cmd = f"python -mpip install {pth}" - - proc = subprocess.run(cmd, capture_output=True, shell=True, cwd=working_dir) - to_requirements_log(proc.stdout, proc.stderr) - - return True - - -def requirements_txt(working_dir): - """ - Look for a requirements file (or several) based on common names to load the - dependencies from - """ - installed = 0 - files = ("dev-requirements.txt", "requirements-dev.txt", "requirements.txt") - for f in files: - pth = os.path.join(working_dir, f) - if os.path.exists(pth): - proc = subprocess.run( - f"python -mpip install -r {f}", - capture_output=True, - shell=True, - cwd=working_dir, - ) - to_requirements_log(proc.stdout, proc.stderr) - - installed = installed + 1 - - return installed > 0 - - -def setup_py(working_dir): - """ - Look for a setup.py file as a last resort and try to install it locally - """ - pth = os.path.join(working_dir, "setup.py") - if os.path.exists(pth): - proc = subprocess.run( - "python -m pip install -e .", - capture_output=True, - shell=True, - cwd=working_dir, - ) - to_requirements_log(proc.stdout, proc.stderr) - return True - - return False - - -if __name__ == "__main__": - main() diff --git a/docker/custom-job-images/python/test/multi-file-poetry/multi_file_poetry/__init__.py b/docker/custom-job-images/python/test/multi-file-poetry/multi_file_poetry/__init__.py deleted file mode 100644 index 9858d0be1d..0000000000 --- a/docker/custom-job-images/python/test/multi-file-poetry/multi_file_poetry/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -def main(): - from colorama import Fore - - print(Fore.BLUE + "Hello World") diff --git a/docker/custom-job-images/python/test/multi-file-poetry/poetry.lock b/docker/custom-job-images/python/test/multi-file-poetry/poetry.lock deleted file mode 100644 index 39de90bfbf..0000000000 --- a/docker/custom-job-images/python/test/multi-file-poetry/poetry.lock +++ /dev/null @@ -1,17 +0,0 @@ -# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. - -[[package]] -name = "colorama" -version = "0.4.6" -description = "Cross-platform colored terminal text." -optional = false -python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" -files = [ - {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, - {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, -] - -[metadata] -lock-version = "2.0" -python-versions = "^3.10" -content-hash = "29aa9de81f853ba77bf312052e460b1f92e0290eed2c8cc67ba60ddd99b4ee19" diff --git a/docker/custom-job-images/python/test/multi-file-poetry/pyproject.toml b/docker/custom-job-images/python/test/multi-file-poetry/pyproject.toml deleted file mode 100644 index e2d762a6bd..0000000000 --- a/docker/custom-job-images/python/test/multi-file-poetry/pyproject.toml +++ /dev/null @@ -1,19 +0,0 @@ -[tool.poetry] -name = "multi-file-poetry" -version = "0.1.0" -description = "" -authors = ["Bacalhau Team"] -readme = "README.md" -packages = [{ include = "multi_file_poetry" }] - -[tool.poetry.dependencies] -python = "^3.10" -colorama = "^0.4.6" - - -[build-system] -requires = ["poetry-core"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry.scripts] -mfp = "multi_file_poetry:main" diff --git a/docker/custom-job-images/python/test/multi-file-poetry/tests/__init__.py b/docker/custom-job-images/python/test/multi-file-poetry/tests/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docker/custom-job-images/python/test/multi-file-reqtxt/hello.py b/docker/custom-job-images/python/test/multi-file-reqtxt/hello.py deleted file mode 100644 index 4cae309aad..0000000000 --- a/docker/custom-job-images/python/test/multi-file-reqtxt/hello.py +++ /dev/null @@ -1,5 +0,0 @@ -from colorama import Fore - - -def hello(): - print(Fore.GREEN + "Hello World!") diff --git a/docker/custom-job-images/python/test/multi-file-reqtxt/main.py b/docker/custom-job-images/python/test/multi-file-reqtxt/main.py deleted file mode 100644 index 71c4133de5..0000000000 --- a/docker/custom-job-images/python/test/multi-file-reqtxt/main.py +++ /dev/null @@ -1,4 +0,0 @@ -from hello import hello - -if __name__ == "__main__": - hello() diff --git a/docker/custom-job-images/python/test/multi-file-reqtxt/requirements.txt b/docker/custom-job-images/python/test/multi-file-reqtxt/requirements.txt deleted file mode 100644 index 3fcfb51b2a..0000000000 --- a/docker/custom-job-images/python/test/multi-file-reqtxt/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -colorama diff --git a/docker/custom-job-images/python/test/single-file/hello.py b/docker/custom-job-images/python/test/single-file/hello.py deleted file mode 100644 index aa09810df7..0000000000 --- a/docker/custom-job-images/python/test/single-file/hello.py +++ /dev/null @@ -1,6 +0,0 @@ -""" -pip install colorama -""" -from colorama import Fore - -print(Fore.RED + "Hello World!!") diff --git a/go.mod b/go.mod index 8ca5ac5afd..eae94395a9 100644 --- a/go.mod +++ b/go.mod @@ -22,8 +22,6 @@ require ( github.com/golang-jwt/jwt v3.2.2+incompatible github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.1 - github.com/hashicorp/go-hclog v1.6.3 - github.com/hashicorp/go-plugin v1.6.0 github.com/hashicorp/go-retryablehttp v0.7.7 github.com/imdario/mergo v0.3.16 github.com/ipfs/boxo v0.18.0 @@ -80,7 +78,6 @@ require ( go.uber.org/zap v1.27.0 golang.org/x/crypto v0.27.0 golang.org/x/exp v0.0.0-20240213143201-ec583247a57a - gopkg.in/alessio/shellescape.v1 v1.0.0-20170105083845-52074bc9df61 k8s.io/apimachinery v0.29.0 k8s.io/kubectl v0.29.0 sigs.k8s.io/yaml v1.4.0 @@ -90,7 +87,6 @@ require ( github.com/KyleBanks/depth v1.2.1 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/agnivade/levenshtein v1.1.1 // indirect - github.com/alessio/shellescape v1.4.2 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 // indirect github.com/aws/aws-sdk-go-v2/credentials v1.17.3 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.1 // indirect @@ -120,7 +116,6 @@ require ( github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f // indirect github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/yamux v0.1.1 // indirect github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab // indirect github.com/ipfs/go-log/v2 v2.5.1 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -133,14 +128,12 @@ require ( github.com/lestrrat-go/option v1.0.1 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/minio/highwayhash v1.0.3 // indirect - github.com/mitchellh/go-testing-interface v1.0.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect github.com/multiformats/go-multihash v0.2.3 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/nats-io/jwt/v2 v2.7.0 // indirect github.com/nats-io/nkeys v0.4.7 // indirect - github.com/oklog/run v1.0.0 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect @@ -188,7 +181,6 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/gopacket v1.1.19 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect @@ -279,7 +271,7 @@ require ( golang.org/x/time v0.6.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect - google.golang.org/grpc v1.66.1 + google.golang.org/grpc v1.66.1 // indirect google.golang.org/protobuf v1.34.2 gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 30f730af03..62f4697ba2 100644 --- a/go.sum +++ b/go.sum @@ -288,8 +288,6 @@ github.com/alecthomas/participle/v2 v2.1.0/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9l github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9 h1:ez/4by2iGztzR4L0zgAOR8lTQK9VlyBVVd7G4omaOQs= github.com/alecthomas/units v0.0.0-20231202071711-9a357b53e9c9/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0= -github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5 h1:iW0a5ljuFxkLGPNem5Ui+KBjFJzKg4Fv2fnxe4dvzpM= github.com/alexbrainman/goissue34681 v0.0.0-20191006012335-3fc7a47baff5/go.mod h1:Y2QMoi1vgtOIfc+6DhrMOGkLoGzqSV2rKp4Sm+opsyA= github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= @@ -349,8 +347,6 @@ github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwN github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= -github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= -github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= github.com/c2h5oh/datasize v0.0.0-20220606134207-859f65c6625b h1:6+ZFm0flnudZzdSE0JxlhR2hKnGPcNB35BjQf4RYQDY= @@ -683,8 +679,6 @@ github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB1 github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= -github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -695,8 +689,6 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= -github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -814,8 +806,6 @@ github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0 github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jedib0t/go-pretty/v6 v6.5.3 h1:GIXn6Er/anHTkVUoufs7ptEvxdD6KIhR7Axa2wYCPF0= github.com/jedib0t/go-pretty/v6 v6.5.3/go.mod h1:5LQIxa52oJ/DlDSLv0HEkWOFMDGoWkJb9ss5KqPpJBg= -github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= -github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -926,7 +916,6 @@ github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8 github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= @@ -957,8 +946,6 @@ github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dz github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -1024,8 +1011,6 @@ github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDm github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= github.com/open-policy-agent/opa v0.60.0 h1:ZPoPt4yeNs5UXCpd/P/btpSyR8CR0wfhVoh9BOwgJNs= @@ -1178,7 +1163,6 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= @@ -1612,7 +1596,6 @@ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1988,8 +1971,6 @@ google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHh google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= -gopkg.in/alessio/shellescape.v1 v1.0.0-20170105083845-52074bc9df61 h1:8ajkpB4hXVftY5ko905id+dOnmorcS2CHNxxHLLDcFM= -gopkg.in/alessio/shellescape.v1 v1.0.0-20170105083845-52074bc9df61/go.mod h1:IfMagxm39Ys4ybJrDb7W3Ob8RwxftP0Yy+or/NVz1O8= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/ops/metrics/grafana/provisioning/dashboards/dashboard.json b/ops/metrics/grafana/provisioning/dashboards/dashboard.json index ecee403d32..9e05872bdf 100644 --- a/ops/metrics/grafana/provisioning/dashboards/dashboard.json +++ b/ops/metrics/grafana/provisioning/dashboards/dashboard.json @@ -182,7 +182,7 @@ "useBackend": false } ], - "title": "Jobs Receieved", + "title": "Jobs Received", "type": "stat" }, { @@ -656,7 +656,7 @@ "refId": "A" } ], - "title": "Averagef HTTP Requests Duration over 5min", + "title": "Average HTTP Requests Duration over 5min", "type": "timeseries" }, { @@ -930,7 +930,7 @@ "useBackend": false } ], - "title": "Evaluatio Broker Cancelable", + "title": "Evaluation Broker Cancelable", "type": "stat" }, { @@ -1004,7 +1004,7 @@ "useBackend": false } ], - "title": "Evaluatio Broker Inflight", + "title": "Evaluation Broker Inflight", "type": "stat" }, { @@ -1078,7 +1078,7 @@ "useBackend": false } ], - "title": "Evaluatio Broker Pending", + "title": "Evaluation Broker Pending", "type": "stat" }, { @@ -1152,7 +1152,7 @@ "useBackend": false } ], - "title": "Evaluatio Broker Waiting", + "title": "Evaluation Broker Waiting", "type": "stat" }, { diff --git a/ops/terraform/remote_files/scripts/start-bacalhau.sh b/ops/terraform/remote_files/scripts/start-bacalhau.sh index 252fbb148c..b7c30ad446 100644 --- a/ops/terraform/remote_files/scripts/start-bacalhau.sh +++ b/ops/terraform/remote_files/scripts/start-bacalhau.sh @@ -36,7 +36,6 @@ bacalhau serve \ --job-execution-timeout-bypass-client-id="${TRUSTED_CLIENT_IDS}" \ --ipfs-connect /ip4/127.0.0.1/tcp/5001 \ --api-port 1234 \ - --requester-job-translation-enabled \ --config DisableAnalytics \ --config labels="owner=bacalhau,name=node-${TERRAFORM_NODE_INDEX}"\ --config Compute.Orchestrators="${BACALHAU_ORCHESTRATORS}" \ diff --git a/pkg/authz/policies/policy_ns_anon.rego b/pkg/authz/policies/policy_ns_anon.rego index b243fd2dd9..c56ab79d13 100644 --- a/pkg/authz/policies/policy_ns_anon.rego +++ b/pkg/authz/policies/policy_ns_anon.rego @@ -30,7 +30,7 @@ allow if { namespace_readable(job_namespace_perms) } -# Allow reading all other endpoints, inclduing by users who don't have a token +# Allow reading all other endpoints, including by users who don't have a token allow if { input.http.path != job_endpoint not is_legacy_api @@ -51,7 +51,7 @@ allow if { not input.http.path[3] in ["submit", "cancel"] } -# Allow posting to auth endpoints, neccessary to get a token in the first place +# Allow posting to auth endpoints, necessary to get a token in the first place allow if { input.http.path[2] == "auth" } diff --git a/pkg/compute/executor.go b/pkg/compute/executor.go index 3908042204..f27a5fac7e 100644 --- a/pkg/compute/executor.go +++ b/pkg/compute/executor.go @@ -65,31 +65,31 @@ func NewBaseExecutor(params BaseExecutorParams) *BaseExecutor { func prepareInputVolumes( ctx context.Context, - strgprovider storage.StorageProvider, + storageProvider storage.StorageProvider, storageDirectory string, inputSources ...*models.InputSource) ( []storage.PreparedStorage, func(context.Context) error, error, ) { - inputVolumes, err := storage.ParallelPrepareStorage(ctx, strgprovider, storageDirectory, inputSources...) + inputVolumes, err := storage.ParallelPrepareStorage(ctx, storageProvider, storageDirectory, inputSources...) if err != nil { return nil, nil, err } return inputVolumes, func(ctx context.Context) error { - return storage.ParallelCleanStorage(ctx, strgprovider, inputVolumes) + return storage.ParallelCleanStorage(ctx, storageProvider, inputVolumes) }, nil } func prepareWasmVolumes( ctx context.Context, - strgprovider storage.StorageProvider, + storageProvider storage.StorageProvider, storageDirectory string, wasmEngine wasmmodels.EngineSpec) ( map[string][]storage.PreparedStorage, func(context.Context) error, error, ) { - importModuleVolumes, err := storage.ParallelPrepareStorage(ctx, strgprovider, storageDirectory, wasmEngine.ImportModules...) + importModuleVolumes, err := storage.ParallelPrepareStorage(ctx, storageProvider, storageDirectory, wasmEngine.ImportModules...) if err != nil { return nil, nil, err } - entryModuleVolumes, err := storage.ParallelPrepareStorage(ctx, strgprovider, storageDirectory, wasmEngine.EntryModule) + entryModuleVolumes, err := storage.ParallelPrepareStorage(ctx, storageProvider, storageDirectory, wasmEngine.EntryModule) if err != nil { return nil, nil, err } @@ -100,8 +100,8 @@ func prepareWasmVolumes( } cleanup := func(ctx context.Context) error { - err1 := storage.ParallelCleanStorage(ctx, strgprovider, importModuleVolumes) - err2 := storage.ParallelCleanStorage(ctx, strgprovider, entryModuleVolumes) + err1 := storage.ParallelCleanStorage(ctx, storageProvider, importModuleVolumes) + err2 := storage.ParallelCleanStorage(ctx, storageProvider, entryModuleVolumes) if err1 != nil || err2 != nil { return fmt.Errorf("Error cleaning up WASM volumes: %v, %v", err1, err2) } @@ -119,21 +119,21 @@ func prepareWasmVolumes( // // For example, an InputCleanupFn might be responsible for deallocating storage used // for input volumes, or deleting temporary input files that were created as part of the -// job's execution. The nature of it operation depends on the storage provided by `strgprovider` and +// job's execution. The nature of it operation depends on the storage provided by `storageProvider` and // input sources of the jobs associated tasks. For the case of a wasm job its input and entry module storage volumes // should be removed via the method after the jobs execution reaches a terminal state. type InputCleanupFn = func(context.Context) error func PrepareRunArguments( ctx context.Context, - strgprovider storage.StorageProvider, + storageProvider storage.StorageProvider, storageDirectory string, execution *models.Execution, resultsDir string, ) (*executor.RunCommandRequest, InputCleanupFn, error) { var cleanupFuncs []func(context.Context) error - inputVolumes, inputCleanup, err := prepareInputVolumes(ctx, strgprovider, storageDirectory, execution.Job.Task().InputSources...) + inputVolumes, inputCleanup, err := prepareInputVolumes(ctx, storageProvider, storageDirectory, execution.Job.Task().InputSources...) if err != nil { return nil, nil, err } @@ -162,7 +162,7 @@ func PrepareRunArguments( return nil, nil, err } - volumes, wasmCleanup, err := prepareWasmVolumes(ctx, strgprovider, storageDirectory, wasmEngine) + volumes, wasmCleanup, err := prepareWasmVolumes(ctx, storageProvider, storageDirectory, wasmEngine) if err != nil { return nil, nil, err } @@ -259,7 +259,7 @@ func (e *BaseExecutor) Start(ctx context.Context, execution *models.Execution) * log.Ctx(ctx).Debug().Msg("starting execution") if e.failureInjection.IsBadActor { - result.Err = fmt.Errorf("i am a baaad node. i failed execution %s", execution.ID) + result.Err = fmt.Errorf("i am a bad node. i failed execution %s", execution.ID) return result } diff --git a/pkg/compute/executor_buffer.go b/pkg/compute/executor_buffer.go index cc3fb6e12b..764149dfe6 100644 --- a/pkg/compute/executor_buffer.go +++ b/pkg/compute/executor_buffer.go @@ -154,7 +154,7 @@ func (s *ExecutorBuffer) deque() { // There are at most max matches, so try at most that many times max := s.queuedTasks.Len() for i := 0; i < max; i++ { - qitem := s.queuedTasks.DequeueWhere(func(task *bufferTask) bool { + qItem := s.queuedTasks.DequeueWhere(func(task *bufferTask) bool { // If we don't have enough resources to run this task, then we will skip it queuedResources := task.localExecutionState.Execution.TotalAllocatedResources() allocatedResources := s.runningCapacity.AddIfHasCapacity(ctx, *queuedResources) @@ -174,13 +174,13 @@ func (s *ExecutorBuffer) deque() { return true }) - if qitem == nil { + if qItem == nil { // We didn't find anything in the queue that matches our resource availability so we will // break out of this look as there is nothing else to find break } - task := qitem.Value + task := qItem.Value // Move the execution to the running list and remove from the list of enqueued IDs // before we actually run the task diff --git a/pkg/compute/store/test/store_suite.go b/pkg/compute/store/test/store_suite.go index 80c3147323..87808bde0d 100644 --- a/pkg/compute/store/test/store_suite.go +++ b/pkg/compute/store/test/store_suite.go @@ -18,7 +18,7 @@ import ( "github.com/bacalhau-project/bacalhau/pkg/test/mock" ) -type StoreCreator func(ctx context.Context, dbpath string) (store.ExecutionStore, error) +type StoreCreator func(ctx context.Context, dbPath string) (store.ExecutionStore, error) type StoreSuite struct { suite.Suite diff --git a/pkg/config/migrate.go b/pkg/config/migrate.go index f2c0dc7c2e..5a0b5d26eb 100644 --- a/pkg/config/migrate.go +++ b/pkg/config/migrate.go @@ -90,9 +90,7 @@ func MigrateV1(in v1types.BacalhauConfig) (types.Bacalhau, error) { UpdateConfig: types.UpdateConfig{ Interval: types.Duration(in.Update.CheckFrequency), }, - FeatureFlags: types.FeatureFlags{ - ExecTranslation: in.Node.Requester.TranslationEnabled, - }, + FeatureFlags: types.FeatureFlags{}, } return out, nil } diff --git a/pkg/config/types/bacalhau_test.go b/pkg/config/types/bacalhau_test.go index f203374a60..5eebad8821 100644 --- a/pkg/config/types/bacalhau_test.go +++ b/pkg/config/types/bacalhau_test.go @@ -120,9 +120,9 @@ func TestBacalhauMergeNew(t *testing.T) { NameProvider: "test", } other := Bacalhau{} - MergeNewd, err := base.MergeNew(other) + mergedNew, err := base.MergeNew(other) require.NoError(t, err) - assert.Equal(t, base, MergeNewd) + assert.Equal(t, base, mergedNew) }) t.Run("MergeNew overwrites existing fields", func(t *testing.T) { @@ -139,12 +139,12 @@ func TestBacalhauMergeNew(t *testing.T) { }, StrictVersionMatch: true, } - MergeNewd, err := base.MergeNew(other) + mergedNew, err := base.MergeNew(other) require.NoError(t, err) - assert.Equal(t, "otherhost", MergeNewd.API.Host) - assert.Equal(t, 8080, MergeNewd.API.Port) - assert.Equal(t, "test", MergeNewd.NameProvider) - assert.True(t, MergeNewd.StrictVersionMatch) + assert.Equal(t, "otherhost", mergedNew.API.Host) + assert.Equal(t, 8080, mergedNew.API.Port) + assert.Equal(t, "test", mergedNew.NameProvider) + assert.True(t, mergedNew.StrictVersionMatch) }) t.Run("MergeNew with nested structs", func(t *testing.T) { @@ -165,11 +165,11 @@ func TestBacalhauMergeNew(t *testing.T) { }, }, } - MergeNewd, err := base.MergeNew(other) + mergedNew, err := base.MergeNew(other) require.NoError(t, err) - assert.True(t, MergeNewd.Orchestrator.Enabled) - assert.Equal(t, "base.local", MergeNewd.Orchestrator.Host) - assert.Equal(t, Duration(10), MergeNewd.Orchestrator.NodeManager.DisconnectTimeout) + assert.True(t, mergedNew.Orchestrator.Enabled) + assert.Equal(t, "base.local", mergedNew.Orchestrator.Host) + assert.Equal(t, Duration(10), mergedNew.Orchestrator.NodeManager.DisconnectTimeout) }) t.Run("MergeNew with slices", func(t *testing.T) { @@ -183,9 +183,9 @@ func TestBacalhauMergeNew(t *testing.T) { Orchestrators: []string{"nats://127.0.0.1:4223", "nats://127.0.0.1:4224"}, }, } - MergeNewd, err := base.MergeNew(other) + mergedNew, err := base.MergeNew(other) require.NoError(t, err) - assert.Equal(t, []string{"nats://127.0.0.1:4223", "nats://127.0.0.1:4224"}, MergeNewd.Compute.Orchestrators) + assert.Equal(t, []string{"nats://127.0.0.1:4223", "nats://127.0.0.1:4224"}, mergedNew.Compute.Orchestrators) }) t.Run("MergeNew doesn't affect original configs", func(t *testing.T) { @@ -200,11 +200,11 @@ func TestBacalhauMergeNew(t *testing.T) { Host: "otherhost", }, } - MergeNewd, err := base.MergeNew(other) + mergedNew, err := base.MergeNew(other) require.NoError(t, err) - assert.NotEqual(t, base, MergeNewd) - assert.NotEqual(t, other, MergeNewd) + assert.NotEqual(t, base, mergedNew) + assert.NotEqual(t, other, mergedNew) assert.Equal(t, "localhost", base.API.Host) assert.Equal(t, "otherhost", other.API.Host) }) @@ -235,11 +235,11 @@ func TestBacalhauMergeNew(t *testing.T) { }, }, } - MergeNewd, err := base.MergeNew(other) + mergedNew, err := base.MergeNew(other) require.NoError(t, err) - assert.Equal(t, 1, MergeNewd.JobDefaults.Batch.Priority) - assert.Equal(t, "1000m", MergeNewd.JobDefaults.Batch.Task.Resources.CPU) - assert.Equal(t, "1Gb", MergeNewd.JobDefaults.Batch.Task.Resources.Memory) + assert.Equal(t, 1, mergedNew.JobDefaults.Batch.Priority) + assert.Equal(t, "1000m", mergedNew.JobDefaults.Batch.Task.Resources.CPU) + assert.Equal(t, "1Gb", mergedNew.JobDefaults.Batch.Task.Resources.Memory) }) } diff --git a/pkg/config/types/feature_flags.go b/pkg/config/types/feature_flags.go index b4b55a6dc5..8dc5379636 100644 --- a/pkg/config/types/feature_flags.go +++ b/pkg/config/types/feature_flags.go @@ -1,6 +1,4 @@ package types type FeatureFlags struct { - // ExecTranslation enables the execution translation feature. - ExecTranslation bool `yaml:"ExecTranslation,omitempty" json:"ExecTranslation,omitempty"` } diff --git a/pkg/config/types/gen/generate.go b/pkg/config/types/gen/generate.go index ef00c27352..af644e9f22 100644 --- a/pkg/config/types/gen/generate.go +++ b/pkg/config/types/gen/generate.go @@ -113,8 +113,8 @@ func WriteConstants(fieldInfos map[string]FieldInfo, w io.Writer) error { func ConfigFieldMap(dir string) map[string]FieldInfo { // Parse the package directory - fset := token.NewFileSet() - pkgs, err := parser.ParseDir(fset, dir, nil, parser.ParseComments) + fileSet := token.NewFileSet() + pkgs, err := parser.ParseDir(fileSet, dir, nil, parser.ParseComments) if err != nil { log.Fatal(err) } diff --git a/pkg/config/types/generated_constants.go b/pkg/config/types/generated_constants.go index c805277289..f30724e5b5 100644 --- a/pkg/config/types/generated_constants.go +++ b/pkg/config/types/generated_constants.go @@ -31,7 +31,6 @@ const EnginesDisabledKey = "Engines.Disabled" const EnginesTypesDockerManifestCacheRefreshKey = "Engines.Types.Docker.ManifestCache.Refresh" const EnginesTypesDockerManifestCacheSizeKey = "Engines.Types.Docker.ManifestCache.Size" const EnginesTypesDockerManifestCacheTTLKey = "Engines.Types.Docker.ManifestCache.TTL" -const FeatureFlagsExecTranslationKey = "FeatureFlags.ExecTranslation" const InputSourcesDisabledKey = "InputSources.Disabled" const InputSourcesMaxRetryCountKey = "InputSources.MaxRetryCount" const InputSourcesReadTimeoutKey = "InputSources.ReadTimeout" diff --git a/pkg/config/types/generated_descriptions.go b/pkg/config/types/generated_descriptions.go index 897bb42dc2..f60305469b 100644 --- a/pkg/config/types/generated_descriptions.go +++ b/pkg/config/types/generated_descriptions.go @@ -33,7 +33,6 @@ var ConfigDescriptions = map[string]string{ EnginesTypesDockerManifestCacheRefreshKey: "Refresh specifies the refresh interval for cache entries.", EnginesTypesDockerManifestCacheSizeKey: "Size specifies the size of the Docker manifest cache.", EnginesTypesDockerManifestCacheTTLKey: "TTL specifies the time-to-live duration for cache entries.", - FeatureFlagsExecTranslationKey: "ExecTranslation enables the execution translation feature.", InputSourcesDisabledKey: "Disabled specifies a list of storages that are disabled.", InputSourcesMaxRetryCountKey: "ReadTimeout specifies the maximum number of attempts for reading from a storage.", InputSourcesReadTimeoutKey: "ReadTimeout specifies the maximum time allowed for reading from a storage.", diff --git a/pkg/config/types/paths.go b/pkg/config/types/paths.go index f74c4ccc55..c4e773edbf 100644 --- a/pkg/config/types/paths.go +++ b/pkg/config/types/paths.go @@ -110,19 +110,6 @@ func (b Bacalhau) ResultsStorageDir() (string, error) { return path, nil } -const PluginsDirName = "plugins" - -func (b Bacalhau) PluginsDir() (string, error) { - if b.DataDir == "" { - return "", fmt.Errorf("data dir not set") - } - path := filepath.Join(b.DataDir, PluginsDirName) - if err := ensureDir(path); err != nil { - return "", fmt.Errorf("getting plugins path: %w", err) - } - return path, nil -} - const ExecutionStoreFileName = "state_boltdb.db" func (b Bacalhau) ExecutionStoreFilePath() (string, error) { diff --git a/pkg/devstack/devstack.go b/pkg/devstack/devstack.go index fed9d32e7c..19bb791ff9 100644 --- a/pkg/devstack/devstack.go +++ b/pkg/devstack/devstack.go @@ -126,11 +126,11 @@ func Setup( if isComputeNode { // We have multiple process on the same machine, all wanting to listen on a HTTP port // and so we will give each compute node a random open port to listen on. - fport, err := network.GetFreePort() + freePort, err := network.GetFreePort() if err != nil { return nil, errors.Wrap(err, "failed to get free port for local publisher") } - cfg.Publishers.Types.Local.Port = fport + cfg.Publishers.Types.Local.Port = freePort } cfg.Orchestrator.Enabled = isRequesterNode diff --git a/pkg/eventhandler/chained_handlers.go b/pkg/eventhandler/chained_handlers.go deleted file mode 100644 index 93cbd95273..0000000000 --- a/pkg/eventhandler/chained_handlers.go +++ /dev/null @@ -1,61 +0,0 @@ -package eventhandler - -import ( - "context" - "fmt" - "time" - - "github.com/rs/zerolog/log" - - "github.com/bacalhau-project/bacalhau/pkg/models" -) - -// An event handler implementation that chains multiple event handlers, and accepts a context provider -// to setup up the context once for all handlers. -type ChainedJobEventHandler struct { - eventHandlers []JobEventHandler - contextProvider ContextProvider -} - -func NewChainedJobEventHandler(contextProvider ContextProvider) *ChainedJobEventHandler { - return &ChainedJobEventHandler{contextProvider: contextProvider} -} - -func (r *ChainedJobEventHandler) AddHandlers(handlers ...JobEventHandler) { - r.eventHandlers = append(r.eventHandlers, handlers...) -} - -func (r *ChainedJobEventHandler) HandleJobEvent(ctx context.Context, event models.JobEvent) (err error) { - startTime := time.Now() - defer logEvent(ctx, event, startTime)(&err) - - if r.eventHandlers == nil { - return fmt.Errorf("no event handlers registered") - } - - jobCtx := r.contextProvider.GetContext(ctx, event.JobID) - - // All handlers are called, unless one of them returns an error. - for _, handler := range r.eventHandlers { - if err = handler.HandleJobEvent(jobCtx, event); err != nil { //nolint:gocritic - return err - } - } - return nil -} - -func logEvent(ctx context.Context, event models.JobEvent, startTime time.Time) func(*error) { - return func(handlerError *error) { - logMsg := log.Ctx(ctx).Debug(). - Str("EventName", event.EventName.String()). - Str("JobID", event.JobID). - Str("NodeID", event.SourceNodeID). - Str("Status", event.Status). - Dur("HandleDuration", time.Since(startTime)) - if *handlerError != nil { - logMsg = logMsg.AnErr("HandlerError", *handlerError) - } - - logMsg.Msg("Handled event") - } -} diff --git a/pkg/eventhandler/chained_handlers_test.go b/pkg/eventhandler/chained_handlers_test.go deleted file mode 100644 index cf92117769..0000000000 --- a/pkg/eventhandler/chained_handlers_test.go +++ /dev/null @@ -1,111 +0,0 @@ -//go:build unit || !integration - -package eventhandler - -import ( - "context" - "fmt" - "testing" - - "github.com/google/uuid" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "go.uber.org/mock/gomock" - - "github.com/bacalhau-project/bacalhau/pkg/eventhandler/mock_eventhandler" - "github.com/bacalhau-project/bacalhau/pkg/logger" - "github.com/bacalhau-project/bacalhau/pkg/models" -) - -// In order for 'go test' to run this suite, we need to create -// a normal test function and pass our suite to suite.Run -func TestChainedHandlers(t *testing.T) { - suite.Run(t, new(jobEventHandlerSuite)) -} - -type jobEventHandlerSuite struct { - suite.Suite - ctrl *gomock.Controller - chainedHandler *ChainedJobEventHandler - handler1 *mock_eventhandler.MockJobEventHandler - handler2 *mock_eventhandler.MockJobEventHandler - contextProvider *mock_eventhandler.MockContextProvider - context context.Context - event models.JobEvent -} - -// Before each test -func (suite *jobEventHandlerSuite) SetupTest() { - suite.ctrl = gomock.NewController(suite.T()) - suite.handler1 = mock_eventhandler.NewMockJobEventHandler(suite.ctrl) - suite.handler2 = mock_eventhandler.NewMockJobEventHandler(suite.ctrl) - suite.contextProvider = mock_eventhandler.NewMockContextProvider(suite.ctrl) - suite.chainedHandler = NewChainedJobEventHandler(suite.contextProvider) - suite.context = context.WithValue(context.Background(), "test", "test") - suite.event = models.JobEvent{ - EventName: models.JobEventCreated, - JobID: uuid.NewString(), - SourceNodeID: "nodeA", - Status: "this is a test event", - } - logger.ConfigureTestLogging(suite.T()) -} - -func (suite *jobEventHandlerSuite) TearDownTest() { - suite.ctrl.Finish() -} - -func (suite *jobEventHandlerSuite) TestChainedJobEventHandler_HandleJobEvent() { - suite.chainedHandler.AddHandlers(suite.handler1, suite.handler2) - ctx := context.Background() - - // assert context provider is called with the correct context and job id - suite.contextProvider.EXPECT().GetContext(ctx, suite.event.JobID).Return(suite.context) - - // assert both handlers are called with the context provider's context and event - gomock.InOrder( - suite.handler1.EXPECT().HandleJobEvent(suite.context, suite.event).Return(nil), - suite.handler2.EXPECT().HandleJobEvent(suite.context, suite.event).Return(nil), - ) - - // assert no error was returned - require.NoError(suite.T(), suite.chainedHandler.HandleJobEvent(ctx, suite.event)) -} - -func (suite *jobEventHandlerSuite) TestChainedJobEventHandler_HandleJobEventLazilyAdded() { - suite.chainedHandler.AddHandlers(suite.handler1) - suite.chainedHandler.AddHandlers(suite.handler2) - ctx := context.Background() - - // assert context provider is called with the correct context and job id - suite.contextProvider.EXPECT().GetContext(ctx, suite.event.JobID).Return(suite.context) - - // assert both handlers are called with the context provider's context and event - gomock.InOrder( - suite.handler1.EXPECT().HandleJobEvent(suite.context, suite.event).Return(nil), - suite.handler2.EXPECT().HandleJobEvent(suite.context, suite.event).Return(nil), - ) - - // assert no error was returned - require.NoError(suite.T(), suite.chainedHandler.HandleJobEvent(ctx, suite.event)) -} - -func (suite *jobEventHandlerSuite) TestChainedJobEventHandler_HandleJobEventError() { - suite.chainedHandler.AddHandlers(suite.handler1) - suite.chainedHandler.AddHandlers(suite.handler2) - ctx := context.Background() - mockError := fmt.Errorf("i am an error") - - // assert context provider is called with the correct context and job id - suite.contextProvider.EXPECT().GetContext(ctx, suite.event.JobID).Return(suite.context) - - // mock first handler to return an error, and don't expect the second handler to be called - suite.handler1.EXPECT().HandleJobEvent(suite.context, suite.event).Return(mockError) - - // assert no error was returned - require.Equal(suite.T(), mockError, suite.chainedHandler.HandleJobEvent(ctx, suite.event)) -} - -func (suite *jobEventHandlerSuite) TestChainedJobEventHandler_HandleJobEventEmptyHandlers() { - require.Error(suite.T(), suite.chainedHandler.HandleJobEvent(context.Background(), suite.event)) -} diff --git a/pkg/eventhandler/context_provider.go b/pkg/eventhandler/context_provider.go deleted file mode 100644 index 774878edcd..0000000000 --- a/pkg/eventhandler/context_provider.go +++ /dev/null @@ -1,81 +0,0 @@ -package eventhandler - -import ( - "context" - "sync" - - "go.opentelemetry.io/otel/attribute" - oteltrace "go.opentelemetry.io/otel/trace" - - "github.com/bacalhau-project/bacalhau/pkg/models" - "github.com/bacalhau-project/bacalhau/pkg/telemetry" -) - -// Interface for a context provider that can be used to generate a context to be used to handle -// job events. -type ContextProvider interface { - GetContext(ctx context.Context, jobID string) context.Context -} - -// TracerContextProvider is a context provider that generates a context along with tracing information. -// It also implements JobEventHandler to end the local lifecycle context for a job when it is completed. -type TracerContextProvider struct { - nodeID string - jobNodeContexts map[string]context.Context // per-node job lifecycle - contextMutex sync.RWMutex -} - -func NewTracerContextProvider(nodeID string) *TracerContextProvider { - return &TracerContextProvider{ - nodeID: nodeID, - jobNodeContexts: make(map[string]context.Context), - } -} - -func (t *TracerContextProvider) GetContext(ctx context.Context, jobID string) context.Context { - t.contextMutex.Lock() - defer t.contextMutex.Unlock() - - jobCtx, _ := telemetry.Span(ctx, "pkg/eventhandler/JobEventHandler.HandleJobEvent", - oteltrace.WithSpanKind(oteltrace.SpanKindInternal), - oteltrace.WithAttributes( - attribute.String(telemetry.TracerAttributeNameNodeID, t.nodeID), - attribute.String(telemetry.TracerAttributeNameJobID, jobID), - ), - ) - - // keep the latest context to clean it up during shutdown if necessary - t.jobNodeContexts[jobID] = jobCtx - return jobCtx -} - -func (t *TracerContextProvider) HandleJobEvent(ctx context.Context, event models.JobEvent) error { - // If the event is known to be terminal, end the local lifecycle context: - if event.EventName.IsTerminal() { - t.endJobNodeContext(ctx, event.JobID) - } - - return nil -} - -func (t *TracerContextProvider) Shutdown() error { - t.contextMutex.RLock() - defer t.contextMutex.RUnlock() - - for _, ctx := range t.jobNodeContexts { - oteltrace.SpanFromContext(ctx).End() - } - - // clear the maps - t.jobNodeContexts = make(map[string]context.Context) - - return nil -} - -// endJobNodeContext ends the local lifecycle context for a job. -func (t *TracerContextProvider) endJobNodeContext(ctx context.Context, jobID string) { - oteltrace.SpanFromContext(ctx).End() - t.contextMutex.Lock() - defer t.contextMutex.Unlock() - delete(t.jobNodeContexts, jobID) -} diff --git a/pkg/eventhandler/interfaces.go b/pkg/eventhandler/interfaces.go deleted file mode 100644 index 5c332daa28..0000000000 --- a/pkg/eventhandler/interfaces.go +++ /dev/null @@ -1,21 +0,0 @@ -package eventhandler - -//go:generate mockgen --source interfaces.go --destination mock_eventhandler/mock_handlers.go --package mock_eventhandler - -import ( - "context" - - "github.com/bacalhau-project/bacalhau/pkg/models" -) - -// A job event handler is a component that is notified of events related to jobs. -type JobEventHandler interface { - HandleJobEvent(ctx context.Context, event models.JobEvent) error -} - -// function that implements the JobEventHandler interface -type JobEventHandlerFunc func(ctx context.Context, event models.JobEvent) error - -func (f JobEventHandlerFunc) HandleJobEvent(ctx context.Context, event models.JobEvent) error { - return f(ctx, event) -} diff --git a/pkg/eventhandler/mock_eventhandler/mock_contextprovider.go b/pkg/eventhandler/mock_eventhandler/mock_contextprovider.go deleted file mode 100644 index 4bb1fc0721..0000000000 --- a/pkg/eventhandler/mock_eventhandler/mock_contextprovider.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: system/context_provider.go - -// Package mock_system is a generated GoMock package. -package mock_eventhandler - -import ( - context "context" - reflect "reflect" - - gomock "go.uber.org/mock/gomock" -) - -// MockContextProvider is a mock of ContextProvider interface. -type MockContextProvider struct { - ctrl *gomock.Controller - recorder *MockContextProviderMockRecorder -} - -// MockContextProviderMockRecorder is the mock recorder for MockContextProvider. -type MockContextProviderMockRecorder struct { - mock *MockContextProvider -} - -// NewMockContextProvider creates a new mock instance. -func NewMockContextProvider(ctrl *gomock.Controller) *MockContextProvider { - mock := &MockContextProvider{ctrl: ctrl} - mock.recorder = &MockContextProviderMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockContextProvider) EXPECT() *MockContextProviderMockRecorder { - return m.recorder -} - -// GetContext mocks base method. -func (m *MockContextProvider) GetContext(ctx context.Context, jobID string) context.Context { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetContext", ctx, jobID) - ret0, _ := ret[0].(context.Context) - return ret0 -} - -// GetContext indicates an expected call of GetContext. -func (mr *MockContextProviderMockRecorder) GetContext(ctx, jobID interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetContext", reflect.TypeOf((*MockContextProvider)(nil).GetContext), ctx, jobID) -} diff --git a/pkg/eventhandler/mock_eventhandler/mock_handlers.go b/pkg/eventhandler/mock_eventhandler/mock_handlers.go deleted file mode 100644 index 832e5914df..0000000000 --- a/pkg/eventhandler/mock_eventhandler/mock_handlers.go +++ /dev/null @@ -1,55 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: interfaces.go -// -// Generated by this command: -// -// mockgen --source interfaces.go --destination mock_eventhandler/mock_handlers.go --package mock_eventhandler -// - -// Package mock_eventhandler is a generated GoMock package. -package mock_eventhandler - -import ( - context "context" - reflect "reflect" - - models "github.com/bacalhau-project/bacalhau/pkg/models" - gomock "go.uber.org/mock/gomock" -) - -// MockJobEventHandler is a mock of JobEventHandler interface. -type MockJobEventHandler struct { - ctrl *gomock.Controller - recorder *MockJobEventHandlerMockRecorder -} - -// MockJobEventHandlerMockRecorder is the mock recorder for MockJobEventHandler. -type MockJobEventHandlerMockRecorder struct { - mock *MockJobEventHandler -} - -// NewMockJobEventHandler creates a new mock instance. -func NewMockJobEventHandler(ctrl *gomock.Controller) *MockJobEventHandler { - mock := &MockJobEventHandler{ctrl: ctrl} - mock.recorder = &MockJobEventHandlerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockJobEventHandler) EXPECT() *MockJobEventHandlerMockRecorder { - return m.recorder -} - -// HandleJobEvent mocks base method. -func (m *MockJobEventHandler) HandleJobEvent(ctx context.Context, event models.JobEvent) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HandleJobEvent", ctx, event) - ret0, _ := ret[0].(error) - return ret0 -} - -// HandleJobEvent indicates an expected call of HandleJobEvent. -func (mr *MockJobEventHandlerMockRecorder) HandleJobEvent(ctx, event any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HandleJobEvent", reflect.TypeOf((*MockJobEventHandler)(nil).HandleJobEvent), ctx, event) -} diff --git a/pkg/eventhandler/tracer.go b/pkg/eventhandler/tracer.go deleted file mode 100644 index 4db8f63dcb..0000000000 --- a/pkg/eventhandler/tracer.go +++ /dev/null @@ -1,78 +0,0 @@ -package eventhandler - -import ( - "context" - "fmt" - "io/fs" - "os" - - "github.com/rs/zerolog" - - "github.com/bacalhau-project/bacalhau/pkg/lib/marshaller" - "github.com/bacalhau-project/bacalhau/pkg/models" -) - -// Tracer is a JobEventHandler that will marshal the received event to a -// file-based log. -// -// Note that we don't need any mutexes here because writing to an os.File is -// thread-safe (see https://github.com/rs/zerolog/blob/master/writer.go#L33) -type Tracer struct { - LogFile *os.File - Logger zerolog.Logger -} - -const eventTracerFilePerms fs.FileMode = 0644 - -// Returns an eventhandler.Tracer that writes to config.GetEventTracerPath(), or -// an error if the file can't be opened. -func NewTracer(path string) (*Tracer, error) { - return NewTracerToFile(path) -} - -// Returns an eventhandler.Tracer that writes to the specified filename, or an -// error if the file can't be opened. -func NewTracerToFile(filename string) (*Tracer, error) { - file, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_APPEND, eventTracerFilePerms) - if err != nil { - return nil, err - } - - return &Tracer{ - LogFile: file, - Logger: zerolog.New(file).With().Timestamp().Logger(), - }, nil -} - -// HandleJobEvent implements JobEventHandler -func (t *Tracer) HandleJobEvent(ctx context.Context, event models.JobEvent) error { - trace(t.Logger, event) - return nil -} - -func trace[Event any](log zerolog.Logger, event Event) { - log.Log(). - Str("Type", fmt.Sprintf("%T", event)). - Func(func(e *zerolog.Event) { - // TODO: #828 Potential hotspot - marshaling is expensive, and - // we do it for every event. - eventJSON, err := marshaller.JSONMarshalWithMax(event) - if err == nil { - e.RawJSON("Event", eventJSON) - } else { - e.AnErr("MarshalError", err) - } - }).Send() -} - -func (t *Tracer) Shutdown() error { - if t.LogFile != nil { - err := t.LogFile.Close() - t.LogFile = nil - t.Logger = zerolog.Nop() - return err - } - return nil -} - -var _ JobEventHandler = (*Tracer)(nil) diff --git a/pkg/executor/docker/executor.go b/pkg/executor/docker/executor.go index 4ead9dea53..adb77ac97d 100644 --- a/pkg/executor/docker/executor.go +++ b/pkg/executor/docker/executor.go @@ -222,7 +222,7 @@ func (e *Executor) doWait(ctx context.Context, out chan *models.RunCommandResult out <- handle.result } else { // NB(forrest): this shouldn't happen with the wasm and docker executors, but handling it as it - // represents a significant error in executor logic, which may occur in future pluggable executor impls. + // represents a significant error in executor logic, which may occur in future pluggable executor impl. errCh <- fmt.Errorf("execution (%s) result is nil", handle.executionID) } } @@ -473,8 +473,8 @@ func makeContainerMounts( return nil, fmt.Errorf("output volume has no Location: %+v", output) } - srcd := filepath.Join(resultsDir, output.Name) - if err := os.Mkdir(srcd, util.OS_ALL_R|util.OS_ALL_X|util.OS_USER_W); err != nil { + srcDir := filepath.Join(resultsDir, output.Name) + if err := os.Mkdir(srcDir, util.OS_ALL_R|util.OS_ALL_X|util.OS_USER_W); err != nil { return nil, fmt.Errorf("failed to create results dir for execution: %w", err) } @@ -486,7 +486,7 @@ func makeContainerMounts( // this is an output volume so can be written to ReadOnly: false, // we create a named folder in the job results folder for this output - Source: srcd, + Source: srcDir, // the path of the output volume is from the perspective of inside the container Target: output.Path, }) diff --git a/pkg/executor/docker/executor_test.go b/pkg/executor/docker/executor_test.go index 085ddb239a..f1c8a3812d 100644 --- a/pkg/executor/docker/executor_test.go +++ b/pkg/executor/docker/executor_test.go @@ -436,7 +436,7 @@ func (s *ExecutorTestSuite) TestDockerExecutionCancellation() { // This is important to do. In our docker executor, we set active to true, before calling the docker client with ContainerStart // Hence there is a bit of time before the container actually gets started. The correct way of identifying that whether - // a contianer has started or not is via activeCh. We want to make sure that contianer is started before canceling the execution. + // a container has started or not is via activeCh. We want to make sure that container is started before canceling the execution. handler, _ := s.executor.handlers.Get(executionID) <-handler.activeCh diff --git a/pkg/executor/docker/models/types_test.go b/pkg/executor/docker/models/types_test.go index 4c0cc0f4a1..ff179f18fa 100644 --- a/pkg/executor/docker/models/types_test.go +++ b/pkg/executor/docker/models/types_test.go @@ -21,14 +21,14 @@ func TestDockerEngineBuilder_RoundTrip(t *testing.T) { { name: "valid spec all fields", builder: func() *DockerEngineBuilder { - return NewDockerEngineBuilder("myimage"). + return NewDockerEngineBuilder("myImage"). WithEntrypoint("bash", "-c"). WithEnvironmentVariables("KEY1=VALUE1", "KEY2=VALUE2"). WithWorkingDirectory("/app"). WithParameters("arg1", "arg2") }, expectedSpec: EngineSpec{ - Image: "myimage", + Image: "myImage", Entrypoint: []string{"bash", "-c"}, EnvironmentVariables: []string{"KEY1=VALUE1", "KEY2=VALUE2"}, WorkingDirectory: "/app", @@ -38,13 +38,13 @@ func TestDockerEngineBuilder_RoundTrip(t *testing.T) { { name: "valid spec no entry point", builder: func() *DockerEngineBuilder { - return NewDockerEngineBuilder("myimage"). + return NewDockerEngineBuilder("myImage"). WithEnvironmentVariables("KEY1=VALUE1", "KEY2=VALUE2"). WithWorkingDirectory("/app"). WithParameters("arg1", "arg2") }, expectedSpec: EngineSpec{ - Image: "myimage", + Image: "myImage", EnvironmentVariables: []string{"KEY1=VALUE1", "KEY2=VALUE2"}, WorkingDirectory: "/app", Parameters: []string{"arg1", "arg2"}, @@ -53,13 +53,13 @@ func TestDockerEngineBuilder_RoundTrip(t *testing.T) { { name: "valid spec no env var", builder: func() *DockerEngineBuilder { - return NewDockerEngineBuilder("myimage"). + return NewDockerEngineBuilder("myImage"). WithEntrypoint("bash", "-c"). WithWorkingDirectory("/app"). WithParameters("arg1", "arg2") }, expectedSpec: EngineSpec{ - Image: "myimage", + Image: "myImage", Entrypoint: []string{"bash", "-c"}, WorkingDirectory: "/app", Parameters: []string{"arg1", "arg2"}, @@ -68,13 +68,13 @@ func TestDockerEngineBuilder_RoundTrip(t *testing.T) { { name: "valid spec no params", builder: func() *DockerEngineBuilder { - return NewDockerEngineBuilder("myimage"). + return NewDockerEngineBuilder("myImage"). WithEntrypoint("bash", "-c"). WithEnvironmentVariables("KEY1=VALUE1", "KEY2=VALUE2"). WithWorkingDirectory("/app") }, expectedSpec: EngineSpec{ - Image: "myimage", + Image: "myImage", Entrypoint: []string{"bash", "-c"}, EnvironmentVariables: []string{"KEY1=VALUE1", "KEY2=VALUE2"}, WorkingDirectory: "/app", @@ -83,13 +83,13 @@ func TestDockerEngineBuilder_RoundTrip(t *testing.T) { { name: "valid spec no working dir", builder: func() *DockerEngineBuilder { - return NewDockerEngineBuilder("myimage"). + return NewDockerEngineBuilder("myImage"). WithEntrypoint("bash", "-c"). WithEnvironmentVariables("KEY1=VALUE1", "KEY2=VALUE2"). WithParameters("arg1", "arg2") }, expectedSpec: EngineSpec{ - Image: "myimage", + Image: "myImage", Entrypoint: []string{"bash", "-c"}, EnvironmentVariables: []string{"KEY1=VALUE1", "KEY2=VALUE2"}, Parameters: []string{"arg1", "arg2"}, diff --git a/pkg/executor/docker/network.go b/pkg/executor/docker/network.go index 73c22df42a..21495c7c3b 100644 --- a/pkg/executor/docker/network.go +++ b/pkg/executor/docker/network.go @@ -135,10 +135,10 @@ func (e *Executor) createHTTPGateway( } // Create the gateway container initially attached to the *host* network - domainList, derr := json.Marshal(networkConfig.DomainSet()) - clientList, cerr := json.Marshal([]string{subnet}) - if derr != nil || cerr != nil { - return nil, nil, pkgerrors.Wrap(errors.Join(derr, cerr), "error preparing gateway config") + domainList, dErr := json.Marshal(networkConfig.DomainSet()) + clientList, cErr := json.Marshal([]string{subnet}) + if dErr != nil || cErr != nil { + return nil, nil, pkgerrors.Wrap(errors.Join(dErr, cErr), "error preparing gateway config") } gatewayContainer, err := e.client.ContainerCreate(ctx, &container.Config{ diff --git a/pkg/executor/plugins/executors/docker/Makefile b/pkg/executor/plugins/executors/docker/Makefile deleted file mode 100644 index 84bcf56356..0000000000 --- a/pkg/executor/plugins/executors/docker/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -docker: main.go - go build -o ./bin/bacalhau-docker-executor main.go - -clean: - rm -f ./bin/bacalhau-docker-executor diff --git a/pkg/executor/plugins/executors/docker/bin/.gitignore b/pkg/executor/plugins/executors/docker/bin/.gitignore deleted file mode 100644 index c538edc3e7..0000000000 --- a/pkg/executor/plugins/executors/docker/bin/.gitignore +++ /dev/null @@ -1 +0,0 @@ -bacalhau-docker-executor diff --git a/pkg/executor/plugins/executors/docker/bin/.keep b/pkg/executor/plugins/executors/docker/bin/.keep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pkg/executor/plugins/executors/docker/main.go b/pkg/executor/plugins/executors/docker/main.go deleted file mode 100644 index b560e1b5f7..0000000000 --- a/pkg/executor/plugins/executors/docker/main.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "os" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - - "github.com/bacalhau-project/bacalhau/pkg/config/types" - "github.com/bacalhau-project/bacalhau/pkg/executor/docker" - "github.com/bacalhau-project/bacalhau/pkg/executor/plugins/grpc" -) - -const PluggableExecutorPluginName = "PLUGGABLE_EXECUTOR" - -// HandshakeConfig is used to just do a basic handshake between -// a plugin and host. If the handshake fails, a user friendly error is shown. -// This prevents users from executing bad plugins or executing a plugin -// directory. It is a UX feature, not a security feature. -var HandshakeConfig = plugin.HandshakeConfig{ - ProtocolVersion: 1, - MagicCookieKey: "EXECUTOR_PLUGIN", - MagicCookieValue: "bacalhau_executor", -} - -func main() { // Create an hclog.Logger - logger := hclog.New(&hclog.LoggerOptions{ - Name: "docker-plugin", - Output: os.Stderr, - Level: hclog.Trace, - }) - - cfg := types.DockerManifestCache{ - Size: 1000, - TTL: types.Duration(1 * time.Hour), - Refresh: types.Duration(1 * time.Hour), - } - dockerExecutor, err := docker.NewExecutor( - "bacalhau-pluggable-executor-docker", - types.Docker{ManifestCache: cfg}, - ) - if err != nil { - logger.Error(err.Error()) - } - - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: HandshakeConfig, - Plugins: map[string]plugin.Plugin{ - PluggableExecutorPluginName: &grpc.ExecutorGRPCPlugin{ - Impl: dockerExecutor, - }, - }, - Logger: logger, - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/pkg/executor/plugins/executors/wasm/Makefile b/pkg/executor/plugins/executors/wasm/Makefile deleted file mode 100644 index f62172bbc6..0000000000 --- a/pkg/executor/plugins/executors/wasm/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -wasm: main.go - go build -o ./bin/bacalhau-wasm-executor main.go - -clean: - rm -f ./bin/bacalhau-wasm-executor diff --git a/pkg/executor/plugins/executors/wasm/bin/.gitignore b/pkg/executor/plugins/executors/wasm/bin/.gitignore deleted file mode 100644 index 7f34dc1a2c..0000000000 --- a/pkg/executor/plugins/executors/wasm/bin/.gitignore +++ /dev/null @@ -1 +0,0 @@ -bacalhau-wasm-executor diff --git a/pkg/executor/plugins/executors/wasm/bin/.keep b/pkg/executor/plugins/executors/wasm/bin/.keep deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pkg/executor/plugins/executors/wasm/main.go b/pkg/executor/plugins/executors/wasm/main.go deleted file mode 100644 index 2712113123..0000000000 --- a/pkg/executor/plugins/executors/wasm/main.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "log" - "os" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin" - - "github.com/bacalhau-project/bacalhau/pkg/executor/plugins/grpc" - "github.com/bacalhau-project/bacalhau/pkg/executor/wasm" -) - -const PluggableExecutorPluginName = "PLUGGABLE_EXECUTOR" - -// HandshakeConfig is used to just do a basic handshake between -// a plugin and host. If the handshake fails, a user friendly error is shown. -// This prevents users from executing bad plugins or executing a plugin -// directory. It is a UX feature, not a security feature. -var HandshakeConfig = plugin.HandshakeConfig{ - ProtocolVersion: 1, - MagicCookieKey: "EXECUTOR_PLUGIN", - MagicCookieValue: "bacalhau_executor", -} - -func main() { - logger := hclog.New(&hclog.LoggerOptions{ - Name: "wasm-plugin", - Output: os.Stderr, - Level: hclog.Trace, - }) - wasmExecutor, err := wasm.NewExecutor() - if err != nil { - log.Fatal(err) - } - - plugin.Serve(&plugin.ServeConfig{ - HandshakeConfig: HandshakeConfig, - Plugins: map[string]plugin.Plugin{ - PluggableExecutorPluginName: &grpc.ExecutorGRPCPlugin{ - Impl: wasmExecutor, - }, - }, - Logger: logger, - GRPCServer: plugin.DefaultGRPCServer, - }) -} diff --git a/pkg/executor/plugins/grpc/client.go b/pkg/executor/plugins/grpc/client.go deleted file mode 100644 index 1e843ac62d..0000000000 --- a/pkg/executor/plugins/grpc/client.go +++ /dev/null @@ -1,192 +0,0 @@ -package grpc - -import ( - "context" - "encoding/json" - "io" - - "github.com/bacalhau-project/bacalhau/pkg/bidstrategy" - "github.com/bacalhau-project/bacalhau/pkg/executor" - "github.com/bacalhau-project/bacalhau/pkg/executor/plugins/grpc/proto" - "github.com/bacalhau-project/bacalhau/pkg/models" -) - -// TODO: Complete protobuf structure, rather than merely wrapping serialized JSON bytes in protobuf containers. -// Details in: https://github.com/bacalhau-project/bacalhau/issues/2700 - -var _ (executor.Executor) = (*GRPCClient)(nil) - -type GRPCClient struct { - client proto.ExecutorClient -} - -func (c *GRPCClient) IsInstalled(ctx context.Context) (bool, error) { - resp, err := c.client.IsInstalled(ctx, &proto.IsInstalledRequest{}) - if err != nil { - return false, err - } - return resp.Installed, nil -} - -func (c *GRPCClient) ShouldBid(ctx context.Context, request bidstrategy.BidStrategyRequest) (bidstrategy.BidStrategyResponse, error) { - b, err := json.Marshal(request) - if err != nil { - return bidstrategy.BidStrategyResponse{}, err - } - resp, err := c.client.ShouldBid(ctx, &proto.ShouldBidRequest{ - BidRequest: b, - }) - if err != nil { - return bidstrategy.BidStrategyResponse{}, err - } - var out bidstrategy.BidStrategyResponse - if err := json.Unmarshal(resp.BidResponse, &out); err != nil { - return bidstrategy.BidStrategyResponse{}, nil - } - return out, nil -} - -func (c *GRPCClient) ShouldBidBasedOnUsage( - ctx context.Context, - request bidstrategy.BidStrategyRequest, - usage models.Resources) (bidstrategy.BidStrategyResponse, error) { - reqBytes, err := json.Marshal(request) - if err != nil { - return bidstrategy.BidStrategyResponse{}, err - } - usageBytes, err := json.Marshal(usage) - if err != nil { - return bidstrategy.BidStrategyResponse{}, err - } - resp, err := c.client.ShouldBidBasedOnUsage(ctx, &proto.ShouldBidBasedOnUsageRequest{ - BidRequest: reqBytes, - Usage: usageBytes, - }) - if err != nil { - return bidstrategy.BidStrategyResponse{}, err - } - var out bidstrategy.BidStrategyResponse - if err := json.Unmarshal(resp.BidResponse, &out); err != nil { - return bidstrategy.BidStrategyResponse{}, nil - } - return out, nil -} - -func (c *GRPCClient) Run(ctx context.Context, args *executor.RunCommandRequest) (*models.RunCommandResult, error) { - b, err := json.Marshal(args) - if err != nil { - return nil, err - } - resp, err := c.client.Run(ctx, &proto.RunCommandRequest{Params: b}) - if err != nil { - return nil, err - } - out := new(models.RunCommandResult) - if err := json.Unmarshal(resp.Params, out); err != nil { - return nil, err - } - return out, nil -} - -func (c *GRPCClient) Start(ctx context.Context, request *executor.RunCommandRequest) error { - b, err := json.Marshal(request) - if err != nil { - return err - } - _, err = c.client.Start(ctx, &proto.RunCommandRequest{Params: b}) - if err != nil { - return err - } - - return nil -} - -func (c *GRPCClient) Wait(ctx context.Context, executionID string) (<-chan *models.RunCommandResult, <-chan error) { - // Create output and error channels - resultC := make(chan *models.RunCommandResult, 1) - errC := make(chan error, 1) - - // Initialize the WaitRequest - waitReq := &proto.WaitRequest{ - ExecutionID: executionID, - } - - // Make a server-streaming RPC call - stream, err := c.client.Wait(ctx, waitReq) - if err != nil { - errC <- err - return resultC, errC - } - - go func() { - defer close(resultC) - defer close(errC) - - // block until we receive a message from the stream or an error. - resp, err := stream.Recv() - if err != nil { - errC <- err - return - } - - // Convert proto.WaitResponse to models.RunCommandResult - out := new(models.RunCommandResult) - if err := json.Unmarshal(resp.Params, out); err != nil { - errC <- err - return - } - - // Send the result to the channel - resultC <- out - }() - - return resultC, errC -} - -func (c *GRPCClient) Cancel(ctx context.Context, id string) error { - _, err := c.client.Cancel(ctx, &proto.CancelCommandRequest{ExecutionID: id}) - if err != nil { - return err - } - return nil -} - -func (c *GRPCClient) GetLogStream(ctx context.Context, request executor.LogStreamRequest) (io.ReadCloser, error) { - respStream, err := c.client.GetOutputStream(ctx, &proto.OutputStreamRequest{ - ExecutionID: request.ExecutionID, - History: request.Tail, - Follow: request.Follow, - }) - if err != nil { - return nil, err - } - - return &StreamReader{stream: respStream}, nil -} - -type StreamReader struct { - stream proto.Executor_GetOutputStreamClient - buffer []byte -} - -func (sr *StreamReader) Read(p []byte) (n int, err error) { - if len(sr.buffer) == 0 { // if buffer is empty, fill it by reading from the stream - response, err := sr.stream.Recv() - if err != nil { - if err == io.EOF { - return 0, nil - } - return 0, err - } - sr.buffer = response.Data - } - - n = copy(p, sr.buffer) // copy from buffer to p - sr.buffer = sr.buffer[n:] // update buffer - - return n, nil -} - -func (sr *StreamReader) Close() error { - return sr.stream.CloseSend() -} diff --git a/pkg/executor/plugins/grpc/interface.go b/pkg/executor/plugins/grpc/interface.go deleted file mode 100644 index d735597feb..0000000000 --- a/pkg/executor/plugins/grpc/interface.go +++ /dev/null @@ -1,25 +0,0 @@ -package grpc - -import ( - "context" - - "github.com/hashicorp/go-plugin" - "google.golang.org/grpc" - - "github.com/bacalhau-project/bacalhau/pkg/executor" - "github.com/bacalhau-project/bacalhau/pkg/executor/plugins/grpc/proto" -) - -type ExecutorGRPCPlugin struct { - plugin.Plugin - Impl executor.Executor -} - -func (p *ExecutorGRPCPlugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { - proto.RegisterExecutorServer(s, &GRPCServer{Impl: p.Impl}) - return nil -} - -func (p *ExecutorGRPCPlugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { - return &GRPCClient{client: proto.NewExecutorClient(c)}, nil -} diff --git a/pkg/executor/plugins/grpc/proto/Makefile b/pkg/executor/plugins/grpc/proto/Makefile deleted file mode 100644 index 94852376ca..0000000000 --- a/pkg/executor/plugins/grpc/proto/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -.PHONY: all -all: executor.proto - @echo "Done elsewhere" - # protoc --go_out=plugins=grpc:. executor.proto diff --git a/pkg/executor/plugins/grpc/proto/executor.pb.go b/pkg/executor/plugins/grpc/proto/executor.pb.go deleted file mode 100644 index c28e0497c7..0000000000 --- a/pkg/executor/plugins/grpc/proto/executor.pb.go +++ /dev/null @@ -1,951 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.28.1 -// protoc v4.24.3 -// source: executor.proto - -package proto - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type StartResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *StartResponse) Reset() { - *x = StartResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StartResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StartResponse) ProtoMessage() {} - -func (x *StartResponse) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StartResponse.ProtoReflect.Descriptor instead. -func (*StartResponse) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{0} -} - -type RunCommandRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Params []byte `protobuf:"bytes,1,opt,name=Params,proto3" json:"Params,omitempty"` -} - -func (x *RunCommandRequest) Reset() { - *x = RunCommandRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RunCommandRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RunCommandRequest) ProtoMessage() {} - -func (x *RunCommandRequest) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RunCommandRequest.ProtoReflect.Descriptor instead. -func (*RunCommandRequest) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{1} -} - -func (x *RunCommandRequest) GetParams() []byte { - if x != nil { - return x.Params - } - return nil -} - -type RunCommandResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Params []byte `protobuf:"bytes,1,opt,name=Params,proto3" json:"Params,omitempty"` -} - -func (x *RunCommandResponse) Reset() { - *x = RunCommandResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RunCommandResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RunCommandResponse) ProtoMessage() {} - -func (x *RunCommandResponse) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RunCommandResponse.ProtoReflect.Descriptor instead. -func (*RunCommandResponse) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{2} -} - -func (x *RunCommandResponse) GetParams() []byte { - if x != nil { - return x.Params - } - return nil -} - -type CancelCommandRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ExecutionID string `protobuf:"bytes,1,opt,name=ExecutionID,proto3" json:"ExecutionID,omitempty"` -} - -func (x *CancelCommandRequest) Reset() { - *x = CancelCommandRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CancelCommandRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CancelCommandRequest) ProtoMessage() {} - -func (x *CancelCommandRequest) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CancelCommandRequest.ProtoReflect.Descriptor instead. -func (*CancelCommandRequest) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{3} -} - -func (x *CancelCommandRequest) GetExecutionID() string { - if x != nil { - return x.ExecutionID - } - return "" -} - -type CancelCommandResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *CancelCommandResponse) Reset() { - *x = CancelCommandResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *CancelCommandResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CancelCommandResponse) ProtoMessage() {} - -func (x *CancelCommandResponse) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CancelCommandResponse.ProtoReflect.Descriptor instead. -func (*CancelCommandResponse) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{4} -} - -type IsInstalledRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *IsInstalledRequest) Reset() { - *x = IsInstalledRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IsInstalledRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IsInstalledRequest) ProtoMessage() {} - -func (x *IsInstalledRequest) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IsInstalledRequest.ProtoReflect.Descriptor instead. -func (*IsInstalledRequest) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{5} -} - -type IsInstalledResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Installed bool `protobuf:"varint,1,opt,name=Installed,proto3" json:"Installed,omitempty"` -} - -func (x *IsInstalledResponse) Reset() { - *x = IsInstalledResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *IsInstalledResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*IsInstalledResponse) ProtoMessage() {} - -func (x *IsInstalledResponse) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use IsInstalledResponse.ProtoReflect.Descriptor instead. -func (*IsInstalledResponse) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{6} -} - -func (x *IsInstalledResponse) GetInstalled() bool { - if x != nil { - return x.Installed - } - return false -} - -type ShouldBidRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BidRequest []byte `protobuf:"bytes,1,opt,name=BidRequest,proto3" json:"BidRequest,omitempty"` -} - -func (x *ShouldBidRequest) Reset() { - *x = ShouldBidRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShouldBidRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShouldBidRequest) ProtoMessage() {} - -func (x *ShouldBidRequest) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShouldBidRequest.ProtoReflect.Descriptor instead. -func (*ShouldBidRequest) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{7} -} - -func (x *ShouldBidRequest) GetBidRequest() []byte { - if x != nil { - return x.BidRequest - } - return nil -} - -type ShouldBidBasedOnUsageRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BidRequest []byte `protobuf:"bytes,1,opt,name=BidRequest,proto3" json:"BidRequest,omitempty"` - Usage []byte `protobuf:"bytes,2,opt,name=Usage,proto3" json:"Usage,omitempty"` -} - -func (x *ShouldBidBasedOnUsageRequest) Reset() { - *x = ShouldBidBasedOnUsageRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShouldBidBasedOnUsageRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShouldBidBasedOnUsageRequest) ProtoMessage() {} - -func (x *ShouldBidBasedOnUsageRequest) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShouldBidBasedOnUsageRequest.ProtoReflect.Descriptor instead. -func (*ShouldBidBasedOnUsageRequest) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{8} -} - -func (x *ShouldBidBasedOnUsageRequest) GetBidRequest() []byte { - if x != nil { - return x.BidRequest - } - return nil -} - -func (x *ShouldBidBasedOnUsageRequest) GetUsage() []byte { - if x != nil { - return x.Usage - } - return nil -} - -// shared by both semantic and resource bid -type ShouldBidResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - BidResponse []byte `protobuf:"bytes,1,opt,name=BidResponse,proto3" json:"BidResponse,omitempty"` -} - -func (x *ShouldBidResponse) Reset() { - *x = ShouldBidResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ShouldBidResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ShouldBidResponse) ProtoMessage() {} - -func (x *ShouldBidResponse) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ShouldBidResponse.ProtoReflect.Descriptor instead. -func (*ShouldBidResponse) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{9} -} - -func (x *ShouldBidResponse) GetBidResponse() []byte { - if x != nil { - return x.BidResponse - } - return nil -} - -type OutputStreamRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ExecutionID string `protobuf:"bytes,1,opt,name=ExecutionID,proto3" json:"ExecutionID,omitempty"` - History bool `protobuf:"varint,2,opt,name=History,proto3" json:"History,omitempty"` - Follow bool `protobuf:"varint,3,opt,name=Follow,proto3" json:"Follow,omitempty"` -} - -func (x *OutputStreamRequest) Reset() { - *x = OutputStreamRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OutputStreamRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OutputStreamRequest) ProtoMessage() {} - -func (x *OutputStreamRequest) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OutputStreamRequest.ProtoReflect.Descriptor instead. -func (*OutputStreamRequest) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{10} -} - -func (x *OutputStreamRequest) GetExecutionID() string { - if x != nil { - return x.ExecutionID - } - return "" -} - -func (x *OutputStreamRequest) GetHistory() bool { - if x != nil { - return x.History - } - return false -} - -func (x *OutputStreamRequest) GetFollow() bool { - if x != nil { - return x.Follow - } - return false -} - -type OutputStreamResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` -} - -func (x *OutputStreamResponse) Reset() { - *x = OutputStreamResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *OutputStreamResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*OutputStreamResponse) ProtoMessage() {} - -func (x *OutputStreamResponse) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use OutputStreamResponse.ProtoReflect.Descriptor instead. -func (*OutputStreamResponse) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{11} -} - -func (x *OutputStreamResponse) GetData() []byte { - if x != nil { - return x.Data - } - return nil -} - -type WaitRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ExecutionID string `protobuf:"bytes,1,opt,name=ExecutionID,proto3" json:"ExecutionID,omitempty"` -} - -func (x *WaitRequest) Reset() { - *x = WaitRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_executor_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *WaitRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*WaitRequest) ProtoMessage() {} - -func (x *WaitRequest) ProtoReflect() protoreflect.Message { - mi := &file_executor_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use WaitRequest.ProtoReflect.Descriptor instead. -func (*WaitRequest) Descriptor() ([]byte, []int) { - return file_executor_proto_rawDescGZIP(), []int{12} -} - -func (x *WaitRequest) GetExecutionID() string { - if x != nil { - return x.ExecutionID - } - return "" -} - -var File_executor_proto protoreflect.FileDescriptor - -var file_executor_proto_rawDesc = []byte{ - 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0f, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x72, 0x74, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x0a, 0x11, 0x52, 0x75, 0x6e, 0x43, - 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, - 0x06, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x22, 0x2c, 0x0a, 0x12, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, - 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x50, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x22, 0x38, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x43, 0x6f, 0x6d, - 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x22, 0x17, 0x0a, - 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x14, 0x0a, 0x12, 0x49, 0x73, 0x49, 0x6e, 0x73, 0x74, - 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x33, 0x0a, 0x13, - 0x49, 0x73, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, - 0x64, 0x22, 0x32, 0x0a, 0x10, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, 0x69, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x42, 0x69, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x42, 0x69, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x54, 0x0a, 0x1c, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, - 0x69, 0x64, 0x42, 0x61, 0x73, 0x65, 0x64, 0x4f, 0x6e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x42, 0x69, 0x64, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x42, 0x69, 0x64, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x55, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x55, 0x73, 0x61, 0x67, 0x65, 0x22, 0x35, 0x0a, 0x11, 0x53, - 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x20, 0x0a, 0x0b, 0x42, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x42, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x22, 0x69, 0x0a, 0x13, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x46, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x22, 0x2a, 0x0a, - 0x14, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x2f, 0x0a, 0x0b, 0x57, 0x61, 0x69, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x32, 0xa9, 0x04, 0x0a, 0x08, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x12, 0x3a, 0x0a, 0x03, 0x52, 0x75, 0x6e, 0x12, 0x18, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x18, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, - 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x04, - 0x57, 0x61, 0x69, 0x74, 0x12, 0x12, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x57, 0x61, 0x69, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x75, 0x6e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x43, 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, - 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x43, 0x6f, - 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x43, 0x6f, 0x6d, 0x6d, 0x61, - 0x6e, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x49, 0x73, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x12, 0x19, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x49, 0x73, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x49, 0x73, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x3e, 0x0a, 0x09, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, 0x69, 0x64, 0x12, 0x17, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, 0x69, 0x64, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, - 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, 0x69, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x56, 0x0a, 0x15, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, 0x69, 0x64, 0x42, 0x61, 0x73, - 0x65, 0x64, 0x4f, 0x6e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x23, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, 0x69, 0x64, 0x42, 0x61, 0x73, 0x65, 0x64, - 0x4f, 0x6e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x42, 0x69, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x4f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1a, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x23, 0x5a, 0x21, 0x2e, 0x2f, 0x70, 0x6b, 0x67, 0x2f, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_executor_proto_rawDescOnce sync.Once - file_executor_proto_rawDescData = file_executor_proto_rawDesc -) - -func file_executor_proto_rawDescGZIP() []byte { - file_executor_proto_rawDescOnce.Do(func() { - file_executor_proto_rawDescData = protoimpl.X.CompressGZIP(file_executor_proto_rawDescData) - }) - return file_executor_proto_rawDescData -} - -var file_executor_proto_msgTypes = make([]protoimpl.MessageInfo, 13) -var file_executor_proto_goTypes = []interface{}{ - (*StartResponse)(nil), // 0: proto.StartResponse - (*RunCommandRequest)(nil), // 1: proto.RunCommandRequest - (*RunCommandResponse)(nil), // 2: proto.RunCommandResponse - (*CancelCommandRequest)(nil), // 3: proto.CancelCommandRequest - (*CancelCommandResponse)(nil), // 4: proto.CancelCommandResponse - (*IsInstalledRequest)(nil), // 5: proto.IsInstalledRequest - (*IsInstalledResponse)(nil), // 6: proto.IsInstalledResponse - (*ShouldBidRequest)(nil), // 7: proto.ShouldBidRequest - (*ShouldBidBasedOnUsageRequest)(nil), // 8: proto.ShouldBidBasedOnUsageRequest - (*ShouldBidResponse)(nil), // 9: proto.ShouldBidResponse - (*OutputStreamRequest)(nil), // 10: proto.OutputStreamRequest - (*OutputStreamResponse)(nil), // 11: proto.OutputStreamResponse - (*WaitRequest)(nil), // 12: proto.WaitRequest -} -var file_executor_proto_depIdxs = []int32{ - 1, // 0: proto.Executor.Run:input_type -> proto.RunCommandRequest - 1, // 1: proto.Executor.Start:input_type -> proto.RunCommandRequest - 12, // 2: proto.Executor.Wait:input_type -> proto.WaitRequest - 3, // 3: proto.Executor.Cancel:input_type -> proto.CancelCommandRequest - 5, // 4: proto.Executor.IsInstalled:input_type -> proto.IsInstalledRequest - 7, // 5: proto.Executor.ShouldBid:input_type -> proto.ShouldBidRequest - 8, // 6: proto.Executor.ShouldBidBasedOnUsage:input_type -> proto.ShouldBidBasedOnUsageRequest - 10, // 7: proto.Executor.GetOutputStream:input_type -> proto.OutputStreamRequest - 2, // 8: proto.Executor.Run:output_type -> proto.RunCommandResponse - 0, // 9: proto.Executor.Start:output_type -> proto.StartResponse - 2, // 10: proto.Executor.Wait:output_type -> proto.RunCommandResponse - 4, // 11: proto.Executor.Cancel:output_type -> proto.CancelCommandResponse - 6, // 12: proto.Executor.IsInstalled:output_type -> proto.IsInstalledResponse - 9, // 13: proto.Executor.ShouldBid:output_type -> proto.ShouldBidResponse - 9, // 14: proto.Executor.ShouldBidBasedOnUsage:output_type -> proto.ShouldBidResponse - 11, // 15: proto.Executor.GetOutputStream:output_type -> proto.OutputStreamResponse - 8, // [8:16] is the sub-list for method output_type - 0, // [0:8] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_executor_proto_init() } -func file_executor_proto_init() { - if File_executor_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_executor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StartResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunCommandRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RunCommandResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelCommandRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CancelCommandResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IsInstalledRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*IsInstalledResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShouldBidRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShouldBidBasedOnUsageRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ShouldBidResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutputStreamRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OutputStreamResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_executor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*WaitRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_executor_proto_rawDesc, - NumEnums: 0, - NumMessages: 13, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_executor_proto_goTypes, - DependencyIndexes: file_executor_proto_depIdxs, - MessageInfos: file_executor_proto_msgTypes, - }.Build() - File_executor_proto = out.File - file_executor_proto_rawDesc = nil - file_executor_proto_goTypes = nil - file_executor_proto_depIdxs = nil -} diff --git a/pkg/executor/plugins/grpc/proto/executor.proto b/pkg/executor/plugins/grpc/proto/executor.proto deleted file mode 100644 index ef64b00406..0000000000 --- a/pkg/executor/plugins/grpc/proto/executor.proto +++ /dev/null @@ -1,73 +0,0 @@ - -syntax = "proto3"; -package proto; - -// TODO: Complete these structure, rather than merely wrapping serialized JSON bytes in protobuf containers. -// Details in: https://github.com/bacalhau-project/bacalhau/issues/2700 - -message StartResponse { - -} - -message RunCommandRequest { - bytes Params = 1; -} - -message RunCommandResponse { - bytes Params = 1; -} - -message CancelCommandRequest { - string ExecutionID =1; -} - -message CancelCommandResponse { - -} - -message IsInstalledRequest { - -} - -message IsInstalledResponse { - bool Installed = 1; -} - -message ShouldBidRequest { - bytes BidRequest = 1; -} - -message ShouldBidBasedOnUsageRequest { - bytes BidRequest = 1; - bytes Usage = 2; -} - -// shared by both semantic and resource bid -message ShouldBidResponse { - bytes BidResponse = 1; -} - -message OutputStreamRequest { - string ExecutionID = 1; - bool History = 2; - bool Follow = 3; -} - -message OutputStreamResponse { - bytes Data =1; -} - -message WaitRequest { - string ExecutionID = 1; -} - -service Executor { - rpc Run(RunCommandRequest) returns (RunCommandResponse); - rpc Start(RunCommandRequest) returns (StartResponse); - rpc Wait(WaitRequest) returns (stream RunCommandResponse); - rpc Cancel(CancelCommandRequest) returns (CancelCommandResponse); - rpc IsInstalled(IsInstalledRequest) returns (IsInstalledResponse); - rpc ShouldBid(ShouldBidRequest) returns (ShouldBidResponse); - rpc ShouldBidBasedOnUsage(ShouldBidBasedOnUsageRequest) returns (ShouldBidResponse); - rpc GetOutputStream(OutputStreamRequest) returns (stream OutputStreamResponse); -} diff --git a/pkg/executor/plugins/grpc/proto/executor_grpc.pb.go b/pkg/executor/plugins/grpc/proto/executor_grpc.pb.go deleted file mode 100644 index 29132d3715..0000000000 --- a/pkg/executor/plugins/grpc/proto/executor_grpc.pb.go +++ /dev/null @@ -1,412 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v4.24.3 -// source: executor.proto - -package proto - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// ExecutorClient is the client API for Executor service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ExecutorClient interface { - Run(ctx context.Context, in *RunCommandRequest, opts ...grpc.CallOption) (*RunCommandResponse, error) - Start(ctx context.Context, in *RunCommandRequest, opts ...grpc.CallOption) (*StartResponse, error) - Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (Executor_WaitClient, error) - Cancel(ctx context.Context, in *CancelCommandRequest, opts ...grpc.CallOption) (*CancelCommandResponse, error) - IsInstalled(ctx context.Context, in *IsInstalledRequest, opts ...grpc.CallOption) (*IsInstalledResponse, error) - ShouldBid(ctx context.Context, in *ShouldBidRequest, opts ...grpc.CallOption) (*ShouldBidResponse, error) - ShouldBidBasedOnUsage(ctx context.Context, in *ShouldBidBasedOnUsageRequest, opts ...grpc.CallOption) (*ShouldBidResponse, error) - GetOutputStream(ctx context.Context, in *OutputStreamRequest, opts ...grpc.CallOption) (Executor_GetOutputStreamClient, error) -} - -type executorClient struct { - cc grpc.ClientConnInterface -} - -func NewExecutorClient(cc grpc.ClientConnInterface) ExecutorClient { - return &executorClient{cc} -} - -func (c *executorClient) Run(ctx context.Context, in *RunCommandRequest, opts ...grpc.CallOption) (*RunCommandResponse, error) { - out := new(RunCommandResponse) - err := c.cc.Invoke(ctx, "/proto.Executor/Run", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *executorClient) Start(ctx context.Context, in *RunCommandRequest, opts ...grpc.CallOption) (*StartResponse, error) { - out := new(StartResponse) - err := c.cc.Invoke(ctx, "/proto.Executor/Start", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *executorClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (Executor_WaitClient, error) { - stream, err := c.cc.NewStream(ctx, &Executor_ServiceDesc.Streams[0], "/proto.Executor/Wait", opts...) - if err != nil { - return nil, err - } - x := &executorWaitClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Executor_WaitClient interface { - Recv() (*RunCommandResponse, error) - grpc.ClientStream -} - -type executorWaitClient struct { - grpc.ClientStream -} - -func (x *executorWaitClient) Recv() (*RunCommandResponse, error) { - m := new(RunCommandResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *executorClient) Cancel(ctx context.Context, in *CancelCommandRequest, opts ...grpc.CallOption) (*CancelCommandResponse, error) { - out := new(CancelCommandResponse) - err := c.cc.Invoke(ctx, "/proto.Executor/Cancel", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *executorClient) IsInstalled(ctx context.Context, in *IsInstalledRequest, opts ...grpc.CallOption) (*IsInstalledResponse, error) { - out := new(IsInstalledResponse) - err := c.cc.Invoke(ctx, "/proto.Executor/IsInstalled", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *executorClient) ShouldBid(ctx context.Context, in *ShouldBidRequest, opts ...grpc.CallOption) (*ShouldBidResponse, error) { - out := new(ShouldBidResponse) - err := c.cc.Invoke(ctx, "/proto.Executor/ShouldBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *executorClient) ShouldBidBasedOnUsage(ctx context.Context, in *ShouldBidBasedOnUsageRequest, opts ...grpc.CallOption) (*ShouldBidResponse, error) { - out := new(ShouldBidResponse) - err := c.cc.Invoke(ctx, "/proto.Executor/ShouldBidBasedOnUsage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *executorClient) GetOutputStream(ctx context.Context, in *OutputStreamRequest, opts ...grpc.CallOption) (Executor_GetOutputStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &Executor_ServiceDesc.Streams[1], "/proto.Executor/GetOutputStream", opts...) - if err != nil { - return nil, err - } - x := &executorGetOutputStreamClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Executor_GetOutputStreamClient interface { - Recv() (*OutputStreamResponse, error) - grpc.ClientStream -} - -type executorGetOutputStreamClient struct { - grpc.ClientStream -} - -func (x *executorGetOutputStreamClient) Recv() (*OutputStreamResponse, error) { - m := new(OutputStreamResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// ExecutorServer is the server API for Executor service. -// All implementations must embed UnimplementedExecutorServer -// for forward compatibility -type ExecutorServer interface { - Run(context.Context, *RunCommandRequest) (*RunCommandResponse, error) - Start(context.Context, *RunCommandRequest) (*StartResponse, error) - Wait(*WaitRequest, Executor_WaitServer) error - Cancel(context.Context, *CancelCommandRequest) (*CancelCommandResponse, error) - IsInstalled(context.Context, *IsInstalledRequest) (*IsInstalledResponse, error) - ShouldBid(context.Context, *ShouldBidRequest) (*ShouldBidResponse, error) - ShouldBidBasedOnUsage(context.Context, *ShouldBidBasedOnUsageRequest) (*ShouldBidResponse, error) - GetOutputStream(*OutputStreamRequest, Executor_GetOutputStreamServer) error - mustEmbedUnimplementedExecutorServer() -} - -// UnimplementedExecutorServer must be embedded to have forward compatible implementations. -type UnimplementedExecutorServer struct { -} - -func (UnimplementedExecutorServer) Run(context.Context, *RunCommandRequest) (*RunCommandResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Run not implemented") -} -func (UnimplementedExecutorServer) Start(context.Context, *RunCommandRequest) (*StartResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Start not implemented") -} -func (UnimplementedExecutorServer) Wait(*WaitRequest, Executor_WaitServer) error { - return status.Errorf(codes.Unimplemented, "method Wait not implemented") -} -func (UnimplementedExecutorServer) Cancel(context.Context, *CancelCommandRequest) (*CancelCommandResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Cancel not implemented") -} -func (UnimplementedExecutorServer) IsInstalled(context.Context, *IsInstalledRequest) (*IsInstalledResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method IsInstalled not implemented") -} -func (UnimplementedExecutorServer) ShouldBid(context.Context, *ShouldBidRequest) (*ShouldBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ShouldBid not implemented") -} -func (UnimplementedExecutorServer) ShouldBidBasedOnUsage(context.Context, *ShouldBidBasedOnUsageRequest) (*ShouldBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ShouldBidBasedOnUsage not implemented") -} -func (UnimplementedExecutorServer) GetOutputStream(*OutputStreamRequest, Executor_GetOutputStreamServer) error { - return status.Errorf(codes.Unimplemented, "method GetOutputStream not implemented") -} -func (UnimplementedExecutorServer) mustEmbedUnimplementedExecutorServer() {} - -// UnsafeExecutorServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ExecutorServer will -// result in compilation errors. -type UnsafeExecutorServer interface { - mustEmbedUnimplementedExecutorServer() -} - -func RegisterExecutorServer(s grpc.ServiceRegistrar, srv ExecutorServer) { - s.RegisterService(&Executor_ServiceDesc, srv) -} - -func _Executor_Run_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RunCommandRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExecutorServer).Run(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Executor/Run", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExecutorServer).Run(ctx, req.(*RunCommandRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Executor_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RunCommandRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExecutorServer).Start(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Executor/Start", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExecutorServer).Start(ctx, req.(*RunCommandRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Executor_Wait_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(WaitRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ExecutorServer).Wait(m, &executorWaitServer{stream}) -} - -type Executor_WaitServer interface { - Send(*RunCommandResponse) error - grpc.ServerStream -} - -type executorWaitServer struct { - grpc.ServerStream -} - -func (x *executorWaitServer) Send(m *RunCommandResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Executor_Cancel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CancelCommandRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExecutorServer).Cancel(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Executor/Cancel", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExecutorServer).Cancel(ctx, req.(*CancelCommandRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Executor_IsInstalled_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(IsInstalledRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExecutorServer).IsInstalled(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Executor/IsInstalled", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExecutorServer).IsInstalled(ctx, req.(*IsInstalledRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Executor_ShouldBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ShouldBidRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExecutorServer).ShouldBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Executor/ShouldBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExecutorServer).ShouldBid(ctx, req.(*ShouldBidRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Executor_ShouldBidBasedOnUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ShouldBidBasedOnUsageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ExecutorServer).ShouldBidBasedOnUsage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.Executor/ShouldBidBasedOnUsage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ExecutorServer).ShouldBidBasedOnUsage(ctx, req.(*ShouldBidBasedOnUsageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Executor_GetOutputStream_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(OutputStreamRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ExecutorServer).GetOutputStream(m, &executorGetOutputStreamServer{stream}) -} - -type Executor_GetOutputStreamServer interface { - Send(*OutputStreamResponse) error - grpc.ServerStream -} - -type executorGetOutputStreamServer struct { - grpc.ServerStream -} - -func (x *executorGetOutputStreamServer) Send(m *OutputStreamResponse) error { - return x.ServerStream.SendMsg(m) -} - -// Executor_ServiceDesc is the grpc.ServiceDesc for Executor service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Executor_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "proto.Executor", - HandlerType: (*ExecutorServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Run", - Handler: _Executor_Run_Handler, - }, - { - MethodName: "Start", - Handler: _Executor_Start_Handler, - }, - { - MethodName: "Cancel", - Handler: _Executor_Cancel_Handler, - }, - { - MethodName: "IsInstalled", - Handler: _Executor_IsInstalled_Handler, - }, - { - MethodName: "ShouldBid", - Handler: _Executor_ShouldBid_Handler, - }, - { - MethodName: "ShouldBidBasedOnUsage", - Handler: _Executor_ShouldBidBasedOnUsage_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Wait", - Handler: _Executor_Wait_Handler, - ServerStreams: true, - }, - { - StreamName: "GetOutputStream", - Handler: _Executor_GetOutputStream_Handler, - ServerStreams: true, - }, - }, - Metadata: "executor.proto", -} diff --git a/pkg/executor/plugins/grpc/server.go b/pkg/executor/plugins/grpc/server.go deleted file mode 100644 index d7b87798f6..0000000000 --- a/pkg/executor/plugins/grpc/server.go +++ /dev/null @@ -1,166 +0,0 @@ -package grpc - -import ( - "context" - "encoding/json" - "fmt" - "io" - - "github.com/bacalhau-project/bacalhau/pkg/bidstrategy" - "github.com/bacalhau-project/bacalhau/pkg/executor" - "github.com/bacalhau-project/bacalhau/pkg/executor/plugins/grpc/proto" - "github.com/bacalhau-project/bacalhau/pkg/models" -) - -const ( - DefaultStreamBufferSize = 1024 -) - -// TODO: Complete protobuf structure, rather than merely wrapping serialized JSON bytes in protobuf containers. -// Details in: https://github.com/bacalhau-project/bacalhau/issues/2700 - -type GRPCServer struct { - Impl executor.Executor - - proto.UnimplementedExecutorServer -} - -func (s *GRPCServer) Start(_ context.Context, request *proto.RunCommandRequest) (*proto.StartResponse, error) { - // NB(forrest): A new context is created for the `Start` operation because `Start` initiates a - // long-running operation. The context passed as an argument to this method is tied to the gRPC request and is - // canceled when this method returns. By creating a separate context, we ensure that `Start` has a lifecycle - // independent of the gRPC request. - ctx := context.Background() - args := new(executor.RunCommandRequest) - if err := json.Unmarshal(request.Params, args); err != nil { - return nil, err - } - if err := s.Impl.Start(ctx, args); err != nil { - return nil, err - } - return &proto.StartResponse{}, nil -} - -func (s *GRPCServer) Wait(request *proto.WaitRequest, server proto.Executor_WaitServer) error { - // NB(forrest): The context obtained from `server.Context()` is appropriate to use here because `Wait` - // is a streaming RPC. The context remains active for the entire lifetime of the stream and is - // only canceled when the client or server closes the stream. This behavior is in contrast to - // unary RPCs (like `Start` and `Run`), where the context is tied to the individual request. - ctx := server.Context() - waitC, errC := s.Impl.Wait(ctx, request.GetExecutionID()) - select { - case <-ctx.Done(): - return ctx.Err() - case err := <-errC: - return err - case res := <-waitC: - resp, err := json.Marshal(res) - if err != nil { - return err - } - if err := server.Send(&proto.RunCommandResponse{Params: resp}); err != nil { - return err - } - } - return nil -} - -func (s *GRPCServer) Run(ctx context.Context, request *proto.RunCommandRequest) (*proto.RunCommandResponse, error) { - args := new(executor.RunCommandRequest) - if err := json.Unmarshal(request.Params, args); err != nil { - return nil, err - } - result, err := s.Impl.Run(ctx, args) - if err != nil { - return nil, err - } - b, err := json.Marshal(result) - if err != nil { - return nil, err - } - return &proto.RunCommandResponse{Params: b}, nil -} - -func (s *GRPCServer) Cancel(ctx context.Context, request *proto.CancelCommandRequest) (*proto.CancelCommandResponse, error) { - err := s.Impl.Cancel(ctx, request.ExecutionID) - if err != nil { - return nil, err - } - return &proto.CancelCommandResponse{}, nil -} - -func (s *GRPCServer) IsInstalled(ctx context.Context, _ *proto.IsInstalledRequest) (*proto.IsInstalledResponse, error) { - installed, err := s.Impl.IsInstalled(ctx) - if err != nil { - return nil, err - } - return &proto.IsInstalledResponse{Installed: installed}, nil -} - -func (s *GRPCServer) ShouldBid(ctx context.Context, request *proto.ShouldBidRequest) (*proto.ShouldBidResponse, error) { - var args bidstrategy.BidStrategyRequest - if err := json.Unmarshal(request.BidRequest, &args); err != nil { - return nil, err - } - result, err := s.Impl.ShouldBid(ctx, args) - if err != nil { - return nil, err - } - b, err := json.Marshal(result) - if err != nil { - return nil, err - } - return &proto.ShouldBidResponse{BidResponse: b}, nil -} - -func (s *GRPCServer) ShouldBidBasedOnUsage( - ctx context.Context, - request *proto.ShouldBidBasedOnUsageRequest) (*proto.ShouldBidResponse, error) { - var bidReq bidstrategy.BidStrategyRequest - if err := json.Unmarshal(request.BidRequest, &bidReq); err != nil { - return nil, err - } - var usage models.Resources - if err := json.Unmarshal(request.Usage, &usage); err != nil { - return nil, err - } - result, err := s.Impl.ShouldBidBasedOnUsage(ctx, bidReq, usage) - if err != nil { - return nil, err - } - b, err := json.Marshal(result) - if err != nil { - return nil, err - } - return &proto.ShouldBidResponse{BidResponse: b}, nil -} - -func (s *GRPCServer) GetLogStream(request *proto.OutputStreamRequest, server proto.Executor_GetOutputStreamServer) error { - ctx := server.Context() - result, err := s.Impl.GetLogStream(ctx, executor.LogStreamRequest{ - ExecutionID: request.ExecutionID, - Tail: request.History, - Follow: request.Follow, - }) - if err != nil { - return err - } - defer result.Close() - - buffer := make([]byte, DefaultStreamBufferSize) - for { - n, err := result.Read(buffer) - if err != nil { - if err == io.EOF { - break - } - return fmt.Errorf("failed to read data: %w", err) - } - - res := &proto.OutputStreamResponse{Data: buffer[:n]} - if err := server.Send(res); err != nil { - return fmt.Errorf("failed to send data: %w", err) - } - } - return nil -} diff --git a/pkg/executor/util/pluggable_executor.go b/pkg/executor/util/pluggable_executor.go deleted file mode 100644 index 243302cde6..0000000000 --- a/pkg/executor/util/pluggable_executor.go +++ /dev/null @@ -1,141 +0,0 @@ -package util - -import ( - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - - "github.com/bacalhau-project/bacalhau/pkg/executor" - "github.com/bacalhau-project/bacalhau/pkg/executor/plugins/grpc" - "github.com/hashicorp/go-plugin" -) - -func NewPluginExecutorManager() *PluginExecutorManager { - return &PluginExecutorManager{ - registered: make(map[string]PluginExecutorManagerConfig), - active: make(map[string]*activeExecutor), - } -} - -type PluginExecutorManager struct { - registered map[string]PluginExecutorManagerConfig - active map[string]*activeExecutor -} - -func (e *PluginExecutorManager) Get(ctx context.Context, key string) (executor.Executor, error) { - engine, ok := e.active[key] - if !ok { - return nil, fmt.Errorf("plugin %s not found", key) - } - return engine.Impl, nil -} - -func (e *PluginExecutorManager) Has(ctx context.Context, key string) bool { - _, ok := e.active[key] - return ok -} - -// Keys returns the keys of the registered executors -func (e *PluginExecutorManager) Keys(ctx context.Context) []string { - keys := make([]string, 0, len(e.active)) - for k := range e.active { - keys = append(keys, k) - } - return keys -} - -// compile-time check that PluginExecutorManager implements ExecutorProvider -var _ executor.ExecutorProvider = (*PluginExecutorManager)(nil) - -type activeExecutor struct { - Impl executor.Executor - Closer func() -} - -type PluginExecutorManagerConfig struct { - Name string - Path string - Command string - ProtocolVersion uint - MagicCookieKey string - MagicCookieValue string -} - -func (e *PluginExecutorManager) RegisterPlugin(config PluginExecutorManagerConfig) error { - _, ok := e.registered[config.Name] - if ok { - return fmt.Errorf("duplicate registration of executor %s", config.Name) - } - - if pluginBin, err := os.Stat(filepath.Join(config.Path, config.Command)); err != nil { - return err - } else if pluginBin.IsDir() { - return fmt.Errorf("plugin location is directory, expected binary") - } - // TODO check if binary is executable - - e.registered[config.Name] = config - return nil -} - -func (e *PluginExecutorManager) Start(ctx context.Context) error { - for name, config := range e.registered { - pluginExecutor, closer, err := e.dispense(name, config) - if err != nil { - return err - } - e.active[name] = &activeExecutor{ - Impl: pluginExecutor, - Closer: closer, - } - } - return nil -} - -func (e *PluginExecutorManager) Stop(ctx context.Context) error { - for _, active := range e.active { - active.Closer() - } - return nil -} - -const PluggableExecutorPluginName = "PLUGGABLE_EXECUTOR" - -func (e *PluginExecutorManager) dispense(name string, config PluginExecutorManagerConfig) (executor.Executor, func(), error) { - client := plugin.NewClient(&plugin.ClientConfig{ - Plugins: map[string]plugin.Plugin{ - PluggableExecutorPluginName: &grpc.ExecutorGRPCPlugin{}, - }, - AllowedProtocols: []plugin.Protocol{ - plugin.ProtocolNetRPC, plugin.ProtocolGRPC}, - HandshakeConfig: plugin.HandshakeConfig{ - ProtocolVersion: config.ProtocolVersion, - MagicCookieKey: config.MagicCookieKey, - MagicCookieValue: config.MagicCookieValue, - }, - //nolint:gosec - Cmd: exec.Command(filepath.Join(config.Path, config.Command)), - }) - - rpcClient, err := client.Client() - if err != nil { - client.Kill() - return nil, nil, err - } - - raw, err := rpcClient.Dispense(PluggableExecutorPluginName) - if err != nil { - client.Kill() - return nil, nil, err - } - - pluginExecutor, ok := raw.(executor.Executor) - if !ok { - client.Kill() - return nil, nil, fmt.Errorf("plugin is not of type: PluggableExecutor") - } - - return pluginExecutor, func() { client.Kill() }, nil -} diff --git a/pkg/executor/util/utils.go b/pkg/executor/util/utils.go index ba2f7ef71e..b40b02b5e2 100644 --- a/pkg/executor/util/utils.go +++ b/pkg/executor/util/utils.go @@ -130,27 +130,3 @@ func NewNoopExecutors(config noop_executor.ExecutorConfig) executor.ExecutorProv noopExecutor := noop_executor.NewNoopExecutorWithConfig(config) return provider.NewNoopProvider[executor.Executor](noopExecutor) } - -type PluginExecutorOptions struct { - Plugins []PluginExecutorManagerConfig -} - -func NewPluginExecutorProvider( - ctx context.Context, - cm *system.CleanupManager, - pluginOptions PluginExecutorOptions, -) (executor.ExecutorProvider, error) { - pe := NewPluginExecutorManager() - for _, cfg := range pluginOptions.Plugins { - if err := pe.RegisterPlugin(cfg); err != nil { - return nil, err - } - } - if err := pe.Start(ctx); err != nil { - return nil, err - } - - cm.RegisterCallbackWithContext(pe.Stop) - - return pe, nil -} diff --git a/pkg/executor/wasm/executor.go b/pkg/executor/wasm/executor.go index b2d04bdb18..2795a8cf39 100644 --- a/pkg/executor/wasm/executor.go +++ b/pkg/executor/wasm/executor.go @@ -273,18 +273,18 @@ func (e *Executor) makeFsFromStorage( return nil, fmt.Errorf("output volume has no path: %+v", output) } - srcd := filepath.Join(jobResultsDir, output.Name) + srcDir := filepath.Join(jobResultsDir, output.Name) log.Ctx(ctx).Debug(). Str("output", output.Name). - Str("dir", srcd). + Str("dir", srcDir). Msg("Collecting output") - err = os.Mkdir(srcd, util.OS_ALL_R|util.OS_ALL_X|util.OS_USER_W) + err = os.Mkdir(srcDir, util.OS_ALL_R|util.OS_ALL_X|util.OS_USER_W) if err != nil { return nil, err } - err = rootFs.Mount(output.Name, touchfs.New(srcd)) + err = rootFs.Mount(output.Name, touchfs.New(srcDir)) if err != nil { return nil, err } diff --git a/pkg/executor/wasm/loader.go b/pkg/executor/wasm/loader.go index 9f9e205855..e8f7e3dbfc 100644 --- a/pkg/executor/wasm/loader.go +++ b/pkg/executor/wasm/loader.go @@ -25,7 +25,7 @@ import ( // the WebAssembly program, allowing the user to deploy self-contained // WebAssembly blobs. See the introductory talk at https://youtu.be/6zJkMLzXbQc. // -// This works by using the "module name" field of a WebAssmelby import header, +// This works by using the "module name" field of a WebAssembly import header, // (which for user-supplied modules is arbitrary) as a hint to the loader as to // where the dependency lives and how to retrieve it. The module still needs to // be specified as input data for the job (a previous implementation of the @@ -102,7 +102,7 @@ func (loader *ModuleLoader) loadModule(ctx context.Context, m storage.PreparedSt // InstantiateRemoteModule loads and instantiates the remote module and all of // its dependencies. It only looks in the job's input storage specs for modules. // -// This function calls itself reucrsively for any discovered dependencies on the +// This function calls itself recursively for any discovered dependencies on the // loaded modules, so that the returned module has all of its dependencies fully // instantiated and is ready to use. func (loader *ModuleLoader) InstantiateRemoteModule(ctx context.Context, m storage.PreparedStorage) (api.Module, error) { diff --git a/pkg/executor/wasm/util/generic/broadcaster_test.go b/pkg/executor/wasm/util/generic/broadcaster_test.go index a26cbd84ff..42be342077 100644 --- a/pkg/executor/wasm/util/generic/broadcaster_test.go +++ b/pkg/executor/wasm/util/generic/broadcaster_test.go @@ -51,7 +51,7 @@ func (s *BroadcasterTestSuite) TestBroadcasterAutoclose() { require.Error(s.T(), err) } -func (s *BroadcasterTestSuite) TestBroadcasterSubUnsub() { +func (s *BroadcasterTestSuite) TestBroadcasterSubUnsubscribe() { ch1, err1 := s.broadcaster.Subscribe() ch2, err2 := s.broadcaster.Subscribe() require.NoError(s.T(), err1) diff --git a/pkg/executor/wasm/util/logger/logmanager.go b/pkg/executor/wasm/util/logger/logmanager.go index 35e207310b..23a0929a9b 100644 --- a/pkg/executor/wasm/util/logger/logmanager.go +++ b/pkg/executor/wasm/util/logger/logmanager.go @@ -162,11 +162,11 @@ func (lm *LogManager) Drain() { } func (lm *LogManager) GetWriters() (io.WriteCloser, io.WriteCloser) { - writerFunc := func(strm LogStreamType) func([]byte) *LogMessage { + writerFunc := func(stream LogStreamType) func([]byte) *LogMessage { return func(b []byte) *LogMessage { m := LogMessage{ Timestamp: time.Now().Unix(), - Stream: strm, + Stream: stream, } m.Data = append([]byte(nil), b...) return &m diff --git a/pkg/jobstore/boltdb/store_test.go b/pkg/jobstore/boltdb/store_test.go index b5e8f7cc11..671c6a1678 100644 --- a/pkg/jobstore/boltdb/store_test.go +++ b/pkg/jobstore/boltdb/store_test.go @@ -851,7 +851,7 @@ func (s *BoltJobstoreTestSuite) TestGetExecutions() { s.Equal(2, len(state)) s.Equal(state[0].GetModifyTime().Before(state[1].GetModifyTime()), true) - // When OrderBy is set to Modified At With Reverese + // When OrderBy is set to Modified At With Reverse state, err = s.store.GetExecutions(s.ctx, jobstore.GetExecutionsOptions{ JobID: "160", OrderBy: "modified_at", diff --git a/pkg/lib/collections/hashed_priority_queue_test.go b/pkg/lib/collections/hashed_priority_queue_test.go index f5838fbe99..967301fc6b 100644 --- a/pkg/lib/collections/hashed_priority_queue_test.go +++ b/pkg/lib/collections/hashed_priority_queue_test.go @@ -136,10 +136,10 @@ func (s *HashedPriorityQueueSuite) TestDuplicateKeys() { } for _, exp := range expected { - qitem := pq.Dequeue() - s.Require().NotNil(qitem) - s.Require().Equal(exp.v, qitem.Value) - s.Require().Equal(exp.p, qitem.Priority) + qItem := pq.Dequeue() + s.Require().NotNil(qItem) + s.Require().Equal(exp.v, qItem.Value) + s.Require().Equal(exp.p, qItem.Priority) } s.Require().True(pq.IsEmpty()) diff --git a/pkg/lib/collections/priority_queue.go b/pkg/lib/collections/priority_queue.go index 8448111d3e..fda6953f23 100644 --- a/pkg/lib/collections/priority_queue.go +++ b/pkg/lib/collections/priority_queue.go @@ -158,19 +158,19 @@ func (pq *PriorityQueue[T]) DequeueWhere(matcher MatchingFunction[T]) *QueueItem // If any iteration does not generate a match, the item is requeued in a temporary // queue reading for requeueing on this queue later on. for pq.internalQueue.Len() > 0 { - qitem := pq.dequeue() + qItem := pq.dequeue() - if qitem == nil { + if qItem == nil { return nil } - if matcher(qitem.Value) { - result = qitem + if matcher(qItem.Value) { + result = qItem break } // Add to the queue - unmatched = append(unmatched, qitem) + unmatched = append(unmatched, qItem) } // Re-add the items that were not matched back onto the Q diff --git a/pkg/lib/collections/priority_queue_base_test.go b/pkg/lib/collections/priority_queue_base_test.go index 0a5db6f5cd..e8de45f323 100644 --- a/pkg/lib/collections/priority_queue_base_test.go +++ b/pkg/lib/collections/priority_queue_base_test.go @@ -36,10 +36,10 @@ func (s *PriorityQueueTestSuite) TestSimple() { } for _, tc := range expected { - qitem := pq.Dequeue() - s.Require().NotNil(qitem) - s.Require().Equal(tc.v, qitem.Value) - s.Require().Equal(tc.p, qitem.Priority) + qItem := pq.Dequeue() + s.Require().NotNil(qItem) + s.Require().Equal(tc.v, qItem.Value) + s.Require().Equal(tc.p, qItem.Priority) } s.Require().True(pq.IsEmpty()) @@ -63,10 +63,10 @@ func (s *PriorityQueueTestSuite) TestSimpleMin() { } for _, tc := range expected { - qitem := pq.Dequeue() - s.Require().NotNil(qitem) - s.Require().Equal(tc.v, qitem.Value) - s.Require().Equal(tc.p, qitem.Priority) + qItem := pq.Dequeue() + s.Require().NotNil(qItem) + s.Require().Equal(tc.v, qItem.Value) + s.Require().Equal(tc.p, qItem.Priority) } s.Require().True(pq.IsEmpty()) @@ -74,8 +74,8 @@ func (s *PriorityQueueTestSuite) TestSimpleMin() { func (s *PriorityQueueTestSuite) TestEmpty() { pq := s.NewQueue() - qitem := pq.Dequeue() - s.Require().Nil(qitem) + qItem := pq.Dequeue() + s.Require().Nil(qItem) s.Require().True(pq.IsEmpty()) } @@ -91,13 +91,13 @@ func (s *PriorityQueueTestSuite) TestDequeueWhere() { count := pq.Len() - qitem := pq.DequeueWhere(func(possibleMatch TestData) bool { + qItem := pq.DequeueWhere(func(possibleMatch TestData) bool { return possibleMatch.id == "B" }) - s.Require().NotNil(qitem) - s.Require().Equal(TestData{"B", 2}, qitem.Value) - s.Require().Equal(int64(3), qitem.Priority) + s.Require().NotNil(qItem) + s.Require().Equal(TestData{"B", 2}, qItem.Value) + s.Require().Equal(int64(3), qItem.Priority) s.Require().Equal(count-1, pq.Len()) } @@ -105,11 +105,11 @@ func (s *PriorityQueueTestSuite) TestDequeueWhereFail() { pq := s.NewQueue() pq.Enqueue(TestData{"A", 1}, 4) - qitem := pq.DequeueWhere(func(possibleMatch TestData) bool { + qItem := pq.DequeueWhere(func(possibleMatch TestData) bool { return possibleMatch.id == "Z" }) - s.Require().Nil(qitem) + s.Require().Nil(qItem) } func (s *PriorityQueueTestSuite) TestPeek() { diff --git a/pkg/lib/collections/priority_queue_test.go b/pkg/lib/collections/priority_queue_test.go index ec3adeb23d..0c7bacc917 100644 --- a/pkg/lib/collections/priority_queue_test.go +++ b/pkg/lib/collections/priority_queue_test.go @@ -53,10 +53,10 @@ func (s *PriorityQueueSuite) TestDuplicateKeys() { } for _, exp := range expected { - qitem := pq.Dequeue() - s.Require().NotNil(qitem) - s.Require().Equal(exp.v, qitem.Value) - s.Require().Equal(exp.p, qitem.Priority) + qItem := pq.Dequeue() + s.Require().NotNil(qItem) + s.Require().Equal(exp.v, qItem.Value) + s.Require().Equal(exp.p, qItem.Priority) } s.Require().True(pq.IsEmpty()) diff --git a/pkg/lib/crypto/certificate.go b/pkg/lib/crypto/certificate.go index eaedc61769..b326ae9735 100644 --- a/pkg/lib/crypto/certificate.go +++ b/pkg/lib/crypto/certificate.go @@ -75,7 +75,7 @@ func NewSignedCertificate(parent Certificate, ipAddress []net.IP) (Certificate, return Certificate{cert: cert, parent: &parent, key: certPrivKey}, nil } -func (cert *Certificate) MarshalCertficate(out io.Writer) error { +func (cert *Certificate) MarshalCertificate(out io.Writer) error { var parent *x509.Certificate var signingKey *rsa.PrivateKey diff --git a/pkg/lib/crypto/certificate_test.go b/pkg/lib/crypto/certificate_test.go index 18de762b50..22a2590c30 100644 --- a/pkg/lib/crypto/certificate_test.go +++ b/pkg/lib/crypto/certificate_test.go @@ -28,7 +28,7 @@ func TestProducesValidCertificate(t *testing.T) { cert := getTestSelfSignedCert(t) var buf bytes.Buffer - err := cert.MarshalCertficate(&buf) + err := cert.MarshalCertificate(&buf) require.NoError(t, err) block, rest := pem.Decode(buf.Bytes()) @@ -49,7 +49,7 @@ func TestProducesSignedCertificate(t *testing.T) { require.NotNil(t, cert) var buf bytes.Buffer - err = cert.MarshalCertficate(&buf) + err = cert.MarshalCertificate(&buf) require.NoError(t, err) block, rest := pem.Decode(buf.Bytes()) @@ -61,7 +61,7 @@ func TestProducesSignedCertificate(t *testing.T) { require.NotNil(t, parsed) buf.Reset() - err = parent.MarshalCertficate(&buf) + err = parent.MarshalCertificate(&buf) require.NoError(t, err) pool := x509.NewCertPool() diff --git a/pkg/lib/policy/scrypt.go b/pkg/lib/policy/scrypt.go index 15e7e41da7..f69f5ca52f 100644 --- a/pkg/lib/policy/scrypt.go +++ b/pkg/lib/policy/scrypt.go @@ -30,7 +30,7 @@ var scryptFn = rego.Function2( Memoize: true, Nondeterministic: false, }, - func(bctx rego.BuiltinContext, passwordTerm, saltTerm *ast.Term) (*ast.Term, error) { + func(bCtx rego.BuiltinContext, passwordTerm, saltTerm *ast.Term) (*ast.Term, error) { var password, salt string if err := ast.As(passwordTerm.Value, &password); err != nil { return nil, err diff --git a/pkg/models/constants.go b/pkg/models/constants.go index 1e3d0631b0..05c0a8c950 100644 --- a/pkg/models/constants.go +++ b/pkg/models/constants.go @@ -88,11 +88,6 @@ const ( MetaRequesterID = "bacalhau.org/requester.id" MetaClientID = "bacalhau.org/client.id" - // Job provenance metadata used to track the origin of a job where - // it may have been translated from another job. - MetaDerivedFrom = "bacalhau.org/derivedFrom" - MetaTranslatedBy = "bacalhau.org/translatedBy" - MetaServerInstallationID = "bacalhau.org/server.installation.id" MetaServerInstanceID = "bacalhau.org/server.instance.id" MetaClientInstallationID = "bacalhau.org/client.installation.id" diff --git a/pkg/models/event_test.go b/pkg/models/event_test.go index 7d739f2de7..0056836d22 100644 --- a/pkg/models/event_test.go +++ b/pkg/models/event_test.go @@ -159,7 +159,7 @@ func (suite *EventTestSuite) TestGetJobStateIfPresent() { invalidState := "InvalidState" eventWithInvalidState := models.NewEvent(suite.topic).WithDetail(models.DetailsKeyNewState, invalidState) state, err = eventWithInvalidState.GetJobStateIfPresent() - suite.NoError(err) // models.JobStateType.UnmarshallText() does not return an error for invalid states + suite.NoError(err) // models.JobStateType.UnmarshalText() does not return an error for invalid states suite.Equal(models.JobStateTypeUndefined, state) } diff --git a/pkg/models/execution.go b/pkg/models/execution.go index ab54ebfee5..844fa26119 100644 --- a/pkg/models/execution.go +++ b/pkg/models/execution.go @@ -42,7 +42,7 @@ func (s ExecutionStateType) IsUndefined() bool { return s == ExecutionStateUndefined } -func (s ExecutionStateType) IsTermainl() bool { +func (s ExecutionStateType) IsTerminal() bool { return s == ExecutionStateBidRejected || s == ExecutionStateCompleted || s == ExecutionStateFailed || diff --git a/pkg/models/job_event_string.go b/pkg/models/job_event_string.go deleted file mode 100644 index 1d345bf277..0000000000 --- a/pkg/models/job_event_string.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by "stringer -type=JobEventType --trimprefix=JobEvent --output job_event_string.go"; DO NOT EDIT. - -package models - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[jobEventUndefined-0] - _ = x[JobEventCreated-1] - _ = x[JobEventBid-2] - _ = x[JobEventBidAccepted-3] - _ = x[JobEventBidRejected-4] - _ = x[JobEventComputeError-5] - _ = x[JobEventResultsProposed-6] - _ = x[JobEventResultsAccepted-7] - _ = x[JobEventResultsRejected-8] - _ = x[JobEventResultsPublished-9] - _ = x[JobEventError-10] - _ = x[JobEventCanceled-11] - _ = x[JobEventCompleted-12] - _ = x[jobEventDone-13] -} - -const _JobEventType_name = "jobEventUndefinedCreatedBidBidAcceptedBidRejectedComputeErrorResultsProposedResultsAcceptedResultsRejectedResultsPublishedErrorCanceledCompletedjobEventDone" - -var _JobEventType_index = [...]uint8{0, 17, 24, 27, 38, 49, 61, 76, 91, 106, 122, 127, 135, 144, 156} - -func (i JobEventType) String() string { - if i < 0 || i >= JobEventType(len(_JobEventType_index)-1) { - return "JobEventType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _JobEventType_name[_JobEventType_index[i]:_JobEventType_index[i+1]] -} diff --git a/pkg/models/jobevent.go b/pkg/models/jobevent.go deleted file mode 100644 index 54de944a22..0000000000 --- a/pkg/models/jobevent.go +++ /dev/null @@ -1,107 +0,0 @@ -package models - -import ( - "fmt" - "time" -) - -//go:generate stringer -type=JobEventType --trimprefix=JobEvent --output job_event_string.go -type JobEventType int - -const ( - jobEventUndefined JobEventType = iota // must be first - - // Job has been created on the requester node - JobEventCreated - - // a compute node bid on a job - JobEventBid - - // a requester node accepted for rejected a job bid - JobEventBidAccepted - JobEventBidRejected - - // a compute node had an error running a job - JobEventComputeError - - // a compute node completed running a job - JobEventResultsProposed - - // a Requester node accepted the results from a node for a job - JobEventResultsAccepted - - // a Requester node rejected the results from a node for a job - JobEventResultsRejected - - // once the results have been accepted or rejected - // the compute node will publish them and issue this event - JobEventResultsPublished - - // a requester node declared an error running a job - JobEventError - - // a user canceled a job - JobEventCanceled - - // a job has been completed - JobEventCompleted - - jobEventDone // must be last -) - -func (je JobEventType) IsUndefined() bool { - return je == jobEventUndefined -} - -// IsTerminal returns true if the given event type signals the end of the -// lifecycle of a job. After this, all nodes can safely ignore the job. -func (je JobEventType) IsTerminal() bool { - return je == JobEventError || je == JobEventCompleted || je == JobEventCanceled -} - -func ParseJobEventType(str string) (JobEventType, error) { - for typ := jobEventUndefined + 1; typ < jobEventDone; typ++ { - if equal(typ.String(), str) { - return typ, nil - } - } - - return jobEventUndefined, fmt.Errorf( - "executor: unknown job event type '%s'", str) -} - -func JobEventTypes() []JobEventType { - var res []JobEventType - for typ := jobEventUndefined + 1; typ < jobEventDone; typ++ { - res = append(res, typ) - } - - return res -} - -func (je JobEventType) MarshalText() ([]byte, error) { - return []byte(je.String()), nil -} - -func (je *JobEventType) UnmarshalText(text []byte) (err error) { - name := string(text) - *je, err = ParseJobEventType(name) - return -} - -// TODO remove this https://github.com/bacalhau-project/bacalhau/issues/4185 -type JobEvent struct { - JobID string `json:"JobID,omitempty" example:"9304c616-291f-41ad-b862-54e133c0149e"` - // compute execution identifier - ExecutionID string `json:"ExecutionID,omitempty" example:"9304c616-291f-41ad-b862-54e133c0149e"` - // the node that emitted this event - SourceNodeID string `json:"SourceNodeID,omitempty" example:"QmXaXu9N5GNetatsvwnTfQqNtSeKAD6uCmarbh3LMRYAcF"` - // the node that this event is for - // e.g. "AcceptJobBid" was emitted by Requester but it targeting compute node - TargetNodeID string `json:"TargetNodeID,omitempty" example:"QmdZQ7ZbhnvWY1J12XYKGHApJ6aufKyLNSvf8jZBrBaAVL"` - - EventName JobEventType `json:"EventName,omitempty"` - Status string `json:"Status,omitempty" example:"Got results proposal of length: 0"` - - EventTime time.Time `json:"EventTime,omitempty" example:"2022-11-17T13:32:55.756658941Z"` -} diff --git a/pkg/models/network.go b/pkg/models/network.go index ed2d063ad2..080b0c28a3 100644 --- a/pkg/models/network.go +++ b/pkg/models/network.go @@ -118,7 +118,7 @@ func (n *NetworkConfig) Validate() (err error) { err = errors.Join(err, fmt.Errorf("invalid networking type %q", n.Type)) } - // TODO(forrest): should return an error if the network type is not HTTP and domanins are set. + // TODO(forrest): should return an error if the network type is not HTTP and domains are set. for _, domain := range n.Domains { if domainRegex.MatchString(domain) { continue @@ -210,28 +210,28 @@ func matchDomain(left, right string) (diff int) { return diff } - lcur, rcur := len(lefts)-1, len(rights)-1 - for lcur >= 0 && rcur >= 0 { + lCur, rCur := len(lefts)-1, len(rights)-1 + for lCur >= 0 && rCur >= 0 { // If neither is a blank, these components need to match. - if lefts[lcur] != wildcard && rights[rcur] != wildcard { - if diff = strings.Compare(lefts[lcur], rights[rcur]); diff != 0 { + if lefts[lCur] != wildcard && rights[rCur] != wildcard { + if diff = strings.Compare(lefts[lCur], rights[rCur]); diff != 0 { return diff } } // If both are blanks, they match. - if lefts[lcur] == wildcard || rights[rcur] == wildcard { + if lefts[lCur] == wildcard || rights[rCur] == wildcard { break } // Blank means we are matching any subdomains, so only the rest of // the domain needs to match for this to work. - if lefts[lcur] != wildcard { - lcur -= 1 + if lefts[lCur] != wildcard { + lCur -= 1 } - if rights[rcur] != wildcard { - rcur -= 1 + if rights[rCur] != wildcard { + rCur -= 1 } } diff --git a/pkg/models/network_test.go b/pkg/models/network_test.go index 7c3c5265b4..e64c927613 100644 --- a/pkg/models/network_test.go +++ b/pkg/models/network_test.go @@ -108,8 +108,8 @@ func TestDomainMatching(t *testing.T) { {require.Less, "zzz.com", "foo.com"}, {require.Greater, "aaa.com", "foo.com"}, {require.Equal, "FOO.com", "foo.COM"}, - {require.Less, "bfoo.com", "afoo.com"}, - {require.Greater, "afoo.com", "bfoo.com"}, + {require.Less, "bFoo.com", "aFoo.com"}, + {require.Greater, "aFoo.com", "bFoo.com"}, {require.Less, "x-foo.com", ".foo.com"}, } diff --git a/pkg/nats/proxy/compute_proxy.go b/pkg/nats/proxy/compute_proxy.go index 498eb94b13..97d2bebb32 100644 --- a/pkg/nats/proxy/compute_proxy.go +++ b/pkg/nats/proxy/compute_proxy.go @@ -6,12 +6,13 @@ import ( "fmt" "time" + "github.com/nats-io/nats.go" + "github.com/rs/zerolog/log" + "github.com/bacalhau-project/bacalhau/pkg/compute" "github.com/bacalhau-project/bacalhau/pkg/lib/concurrency" "github.com/bacalhau-project/bacalhau/pkg/models" "github.com/bacalhau-project/bacalhau/pkg/nats/stream" - "github.com/nats-io/nats.go" - "github.com/rs/zerolog/log" ) const ( @@ -35,7 +36,7 @@ func NewComputeProxy(params ComputeProxyParams) (*ComputeProxy, error) { sc, err := stream.NewConsumerClient(stream.ConsumerClientParams{ Conn: params.Conn, Config: stream.StreamConsumerClientConfig{ - StreamCancellationBufferDuration: 5 * time.Second, //nolinter:gomnd + StreamCancellationBufferDuration: 5 * time.Second, //nolint:gomnd }, }) if err != nil { diff --git a/pkg/nats/stream/consumer_client.go b/pkg/nats/stream/consumer_client.go index f996763482..f4dea71f1e 100644 --- a/pkg/nats/stream/consumer_client.go +++ b/pkg/nats/stream/consumer_client.go @@ -9,11 +9,12 @@ import ( "sync" "time" - "github.com/bacalhau-project/bacalhau/pkg/lib/concurrency" "github.com/nats-io/nats.go" "github.com/nats-io/nuid" "github.com/rs/zerolog/log" "github.com/samber/lo" + + "github.com/bacalhau-project/bacalhau/pkg/lib/concurrency" ) // RequestChanLen Default request channel length for buffering asynchronous results. @@ -26,7 +27,7 @@ const ( heartBeatPrefix = "_HEARTBEAT" inboxPrefixLen = len(inboxPrefix) replySuffixLen = 8 // Gives us 62^8 - rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + rDigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" base = 62 nuidSize = 22 ) @@ -219,7 +220,7 @@ func (nc *ConsumerClient) newRespInbox() string { rn := nc.respRand.Int63() for i := 0; i < replySuffixLen; i++ { - sb.WriteByte(rdigits[rn%base]) + sb.WriteByte(rDigits[rn%base]) rn /= base } diff --git a/pkg/nats/stream/types.go b/pkg/nats/stream/types.go index d3cc84ca3b..f6bf8be717 100644 --- a/pkg/nats/stream/types.go +++ b/pkg/nats/stream/types.go @@ -49,7 +49,7 @@ type StreamInfo struct { // CreatedAt represents the time the stream was created. CreatedAt time.Time // Function to cancel the stream. This is useful in the event the consumer client - // is no longer interested in the stream. The cancel function is inovked informing the + // is no longer interested in the stream. The cancel function is invoked informing the // producer to no longer serve the stream. Cancel context.CancelFunc } diff --git a/pkg/nats/transport/nats.go b/pkg/nats/transport/nats.go index 5c1bff4c8f..e96c5bac86 100644 --- a/pkg/nats/transport/nats.go +++ b/pkg/nats/transport/nats.go @@ -264,7 +264,7 @@ func (t *NATSTransport) CallbackProxy() compute.Callback { return t.callbackProxy } -// RegistrationProxy returns the previoously created registration proxy. +// ManagementProxy returns the previously created registration proxy. func (t *NATSTransport) ManagementProxy() compute.ManagementEndpoint { return t.managementProxy } diff --git a/pkg/node/factories.go b/pkg/node/factories.go index 325f25225f..d10fa144b1 100644 --- a/pkg/node/factories.go +++ b/pkg/node/factories.go @@ -14,7 +14,6 @@ import ( baccrypto "github.com/bacalhau-project/bacalhau/pkg/lib/crypto" "github.com/bacalhau-project/bacalhau/pkg/lib/policy" "github.com/bacalhau-project/bacalhau/pkg/lib/provider" - "github.com/bacalhau-project/bacalhau/pkg/models" "github.com/bacalhau-project/bacalhau/pkg/publisher" publisher_util "github.com/bacalhau-project/bacalhau/pkg/publisher/util" "github.com/bacalhau-project/bacalhau/pkg/storage" @@ -76,39 +75,6 @@ func NewStandardExecutorsFactory(cfg types.EngineConfig) ExecutorsFactory { }) } -func NewPluginExecutorFactory(pluginPath string) ExecutorsFactory { - return ExecutorsFactoryFunc( - func(ctx context.Context, nodeConfig NodeConfig) (executor.ExecutorProvider, error) { - pr, err := executor_util.NewPluginExecutorProvider( - ctx, - nodeConfig.CleanupManager, - executor_util.PluginExecutorOptions{ - Plugins: []executor_util.PluginExecutorManagerConfig{ - { - Name: models.EngineDocker, - Path: pluginPath, - Command: "bacalhau-docker-executor", - ProtocolVersion: 1, - MagicCookieKey: "EXECUTOR_PLUGIN", - MagicCookieValue: "bacalhau_executor", - }, - { - Name: models.EngineWasm, - Path: pluginPath, - Command: "bacalhau-wasm-executor", - ProtocolVersion: 1, - MagicCookieKey: "EXECUTOR_PLUGIN", - MagicCookieValue: "bacalhau_executor", - }, - }, - }) - if err != nil { - return nil, err - } - return provider.NewConfiguredProvider(pr, nodeConfig.BacalhauConfig.Engines.Disabled), err - }) -} - func NewStandardPublishersFactory(cfg types.Bacalhau) PublishersFactory { return PublishersFactoryFunc( func( @@ -130,7 +96,7 @@ func NewStandardAuthenticatorsFactory(userKey *baccrypto.UserKey) Authenticators func(ctx context.Context, nodeConfig NodeConfig) (authn.Provider, error) { var allErr error - authns := make(map[string]authn.Authenticator, len(nodeConfig.BacalhauConfig.API.Auth.Methods)) + auths := make(map[string]authn.Authenticator, len(nodeConfig.BacalhauConfig.API.Auth.Methods)) for name, authnConfig := range nodeConfig.BacalhauConfig.API.Auth.Methods { switch authnConfig.Type { case string(authn.MethodTypeChallenge): @@ -140,7 +106,7 @@ func NewStandardAuthenticatorsFactory(userKey *baccrypto.UserKey) Authenticators continue } - authns[name] = challenge.NewAuthenticator( + auths[name] = challenge.NewAuthenticator( methodPolicy, challenge.NewStringMarshaller(nodeConfig.NodeID), userKey.PrivateKey(), @@ -153,7 +119,7 @@ func NewStandardAuthenticatorsFactory(userKey *baccrypto.UserKey) Authenticators continue } - authns[name] = ask.NewAuthenticator( + auths[name] = ask.NewAuthenticator( methodPolicy, userKey.PrivateKey(), nodeConfig.NodeID, @@ -163,7 +129,7 @@ func NewStandardAuthenticatorsFactory(userKey *baccrypto.UserKey) Authenticators } } - return provider.NewMappedProvider(authns), allErr + return provider.NewMappedProvider(auths), allErr }, ) } diff --git a/pkg/node/node.go b/pkg/node/node.go index 0c6e8f370b..663c09e350 100644 --- a/pkg/node/node.go +++ b/pkg/node/node.go @@ -65,19 +65,6 @@ type NodeDependencyInjector struct { AuthenticatorsFactory AuthenticatorsFactory } -func NewExecutorPluginNodeDependencyInjector( - cfg types.Bacalhau, - userKey *baccrypto.UserKey, - pluginPath string, -) NodeDependencyInjector { - return NodeDependencyInjector{ - StorageProvidersFactory: NewStandardStorageProvidersFactory(cfg), - ExecutorsFactory: NewPluginExecutorFactory(pluginPath), - PublishersFactory: NewStandardPublishersFactory(cfg), - AuthenticatorsFactory: NewStandardAuthenticatorsFactory(userKey), - } -} - func NewStandardNodeDependencyInjector(cfg types.Bacalhau, userKey *baccrypto.UserKey) NodeDependencyInjector { return NodeDependencyInjector{ StorageProvidersFactory: NewStandardStorageProvidersFactory(cfg), diff --git a/pkg/node/requester.go b/pkg/node/requester.go index 026f58a756..376f9b0760 100644 --- a/pkg/node/requester.go +++ b/pkg/node/requester.go @@ -10,7 +10,6 @@ import ( "github.com/bacalhau-project/bacalhau/pkg/bacerrors" "github.com/bacalhau-project/bacalhau/pkg/compute" - "github.com/bacalhau-project/bacalhau/pkg/eventhandler" "github.com/bacalhau-project/bacalhau/pkg/jobstore" boltjobstore "github.com/bacalhau-project/bacalhau/pkg/jobstore/boltdb" "github.com/bacalhau-project/bacalhau/pkg/lib/ncl" @@ -37,7 +36,6 @@ import ( "github.com/bacalhau-project/bacalhau/pkg/routing/tracing" s3helper "github.com/bacalhau-project/bacalhau/pkg/s3" "github.com/bacalhau-project/bacalhau/pkg/system" - "github.com/bacalhau-project/bacalhau/pkg/translation" ) var ( @@ -74,14 +72,6 @@ func NewRequesterNode( return nil, err } - // prepare event handlers - tracerContextProvider := eventhandler.NewTracerContextProvider(nodeID) - localJobEventConsumer := eventhandler.NewChainedJobEventHandler(tracerContextProvider) - - eventEmitter := orchestrator.NewEventEmitter(orchestrator.EventEmitterParams{ - EventConsumer: localJobEventConsumer, - }) - jobStore, err := createJobStore(ctx, cfg) if err != nil { return nil, err @@ -118,12 +108,6 @@ func NewRequesterNode( JobStore: jobStore, }), - // planner that publishes events on job completion or failure - planner.NewEventEmitter(planner.EventEmitterParams{ - ID: nodeID, - EventEmitter: eventEmitter, - }), - // logs job completion or failure planner.NewLoggingPlanner(), ) @@ -208,11 +192,6 @@ func NewRequesterNode( resultTransformers = append(resultTransformers, resultSigner) } - var translationProvider translation.TranslatorProvider - if cfg.BacalhauConfig.FeatureFlags.ExecTranslation { - translationProvider = translation.NewStandardTranslatorsProvider() - } - jobTransformers := transformer.ChainedTransformer[*models.Job]{ transformer.JobFn(transformer.IDGenerator), transformer.NameOptional(), @@ -225,10 +204,8 @@ func NewRequesterNode( endpointV2 := orchestrator.NewBaseEndpoint(&orchestrator.BaseEndpointParams{ ID: nodeID, Store: jobStore, - EventEmitter: eventEmitter, ComputeProxy: computeProxy, JobTransformer: jobTransformers, - TaskTranslator: translationProvider, ResultTransformer: resultTransformers, }) @@ -266,12 +243,6 @@ func NewRequesterNode( ) auth_endpoint.BindEndpoint(ctx, apiServer.Router, authenticators) - // order of event handlers is important as triggering some handlers might depend on the state of others. - localJobEventConsumer.AddHandlers( - // ends the span for the job if received a terminal event - tracerContextProvider, - ) - // ncl subscriber, err := ncl.NewSubscriber(transportLayer.Client(), ncl.WithSubscriberMessageSerDeRegistry(messageSerDeRegistry), @@ -300,10 +271,6 @@ func NewRequesterNode( } evalBroker.SetEnabled(false) - cleanupErr = tracerContextProvider.Shutdown() - if cleanupErr != nil { - logDebugIfContextCancelled(ctx, cleanupErr, "failed to shutdown tracer context provider") - } // Close the jobstore after the evaluation broker is disabled cleanupErr = jobStore.Close(ctx) if cleanupErr != nil { @@ -315,9 +282,8 @@ func NewRequesterNode( // It provides the compute call back endpoints for interacting with compute nodes. // e.g. bidding, job completions, cancellations, and failures callback := orchestrator.NewCallback(&orchestrator.CallbackParams{ - ID: nodeID, - EventEmitter: eventEmitter, - Store: jobStore, + ID: nodeID, + Store: jobStore, }) if err = transportLayer.RegisterComputeCallback(callback); err != nil { return nil, err diff --git a/pkg/node/utils.go b/pkg/node/utils.go index 7da646fc0e..05e72aca00 100644 --- a/pkg/node/utils.go +++ b/pkg/node/utils.go @@ -56,7 +56,7 @@ func getTLSCertificate(cfg types.Bacalhau) (string, string, error) { return "", "", err } else if caCert, err := crypto.NewSelfSignedCertificate(privKey, false, ips); err != nil { return "", "", errors.Wrap(err, "failed to generate server certificate") - } else if err = caCert.MarshalCertficate(certFile); err != nil { + } else if err = caCert.MarshalCertificate(certFile); err != nil { return "", "", errors.Wrap(err, "failed to write server certificate") } cert = certFile.Name() diff --git a/pkg/orchestrator/callback.go b/pkg/orchestrator/callback.go index 4550bf8369..6be49b86b4 100644 --- a/pkg/orchestrator/callback.go +++ b/pkg/orchestrator/callback.go @@ -13,23 +13,20 @@ import ( ) type CallbackParams struct { - ID string - Store jobstore.Store - EventEmitter EventEmitter + ID string + Store jobstore.Store } // Callback base implementation of requester Endpoint type Callback struct { - id string - store jobstore.Store - eventEmitter EventEmitter + id string + store jobstore.Store } func NewCallback(params *CallbackParams) *Callback { return &Callback{ - id: params.ID, - store: params.Store, - eventEmitter: params.EventEmitter, + id: params.ID, + store: params.Store, } } @@ -96,16 +93,11 @@ func (e *Callback) OnBidComplete(ctx context.Context, response compute.BidResult log.Ctx(ctx).Error().Err(err).Msgf("[OnBidComplete] failed to commit transaction") return } - - if response.Accepted { - e.eventEmitter.EmitBidReceived(ctx, response) - } } func (e *Callback) OnRunComplete(ctx context.Context, result compute.RunResult) { log.Ctx(ctx).Debug().Msgf("Requester node %s received RunComplete for execution: %s from %s", e.id, result.ExecutionID, result.SourcePeerID) - e.eventEmitter.EmitRunComplete(ctx, result) txContext, err := e.store.BeginTx(ctx) if err != nil { @@ -223,8 +215,6 @@ func (e *Callback) OnComputeFailure(ctx context.Context, result compute.ComputeE log.Ctx(ctx).Error().Err(err).Msgf("[OnComputeFailure] failed to commit transaction") return } - - e.eventEmitter.EmitComputeFailure(ctx, result.ExecutionID, result) } // enqueueEvaluation enqueues an evaluation to allow the scheduler to either accept the bid, or find a new node diff --git a/pkg/orchestrator/endpoint.go b/pkg/orchestrator/endpoint.go index dfe1a5da90..fd5a02fbf4 100644 --- a/pkg/orchestrator/endpoint.go +++ b/pkg/orchestrator/endpoint.go @@ -2,13 +2,10 @@ package orchestrator import ( "context" - "encoding/base64" "fmt" "time" "github.com/google/uuid" - "github.com/pkg/errors" - "sigs.k8s.io/yaml" "github.com/bacalhau-project/bacalhau/pkg/analytics" "github.com/bacalhau-project/bacalhau/pkg/bacerrors" @@ -18,26 +15,21 @@ import ( "github.com/bacalhau-project/bacalhau/pkg/lib/concurrency" "github.com/bacalhau-project/bacalhau/pkg/models" "github.com/bacalhau-project/bacalhau/pkg/orchestrator/transformer" - "github.com/bacalhau-project/bacalhau/pkg/translation" ) type BaseEndpointParams struct { ID string Store jobstore.Store - EventEmitter EventEmitter ComputeProxy compute.Endpoint JobTransformer transformer.JobTransformer - TaskTranslator translation.TranslatorProvider ResultTransformer transformer.ResultTransformer } type BaseEndpoint struct { id string store jobstore.Store - eventEmitter EventEmitter computeProxy compute.Endpoint jobTransformer transformer.JobTransformer - taskTranslator translation.TranslatorProvider resultTransformer transformer.ResultTransformer } @@ -45,10 +37,8 @@ func NewBaseEndpoint(params *BaseEndpointParams) *BaseEndpoint { return &BaseEndpoint{ id: params.ID, store: params.Store, - eventEmitter: params.EventEmitter, computeProxy: params.ComputeProxy, jobTransformer: params.JobTransformer, - taskTranslator: params.TaskTranslator, resultTransformer: params.ResultTransformer, } } @@ -76,34 +66,6 @@ func (e *BaseEndpoint) SubmitJob(ctx context.Context, request *SubmitJobRequest) } submitEvent.JobID = job.ID - var translationEvent models.Event - - // We will only perform task translation in the orchestrator if we were provided with a provider - // that can give translators to perform the translation. - if e.taskTranslator != nil { - // Before we create an evaluation for the job, we want to check that none of the job's tasks - // need translating from a custom job type to a known job type (docker, wasm). If they do, - // then we will perform the translation and create the evaluation for the new job instead. - translatedJob, err := translation.Translate(ctx, e.taskTranslator, job) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to translate job type: %s", job.Task().Engine.Type)) - } - - // If we have translated the job (i.e. at least one task was translated) then we will record the original - // job that was used to create the translated job. This will allow us to track the provenance of the job - // when using `describe` and will ensure only the original job is returned when using `list`. - if translatedJob != nil { - if b, err := yaml.Marshal(translatedJob); err != nil { - return nil, errors.Wrap(err, "failure converting job to JSON") - } else { - translatedJob.Meta[models.MetaDerivedFrom] = base64.StdEncoding.EncodeToString(b) - translationEvent = JobTranslatedEvent(job, translatedJob) - } - - job = translatedJob - } - } - txContext, err := e.store.BeginTx(ctx) if err != nil { return nil, fmt.Errorf("failed to begin transaction: %w", err) @@ -121,11 +83,6 @@ func (e *BaseEndpoint) SubmitJob(ctx context.Context, request *SubmitJobRequest) if err = e.store.AddJobHistory(txContext, job.ID, JobSubmittedEvent()); err != nil { return nil, err } - if translationEvent.Message != "" { - if err = e.store.AddJobHistory(txContext, job.ID, translationEvent); err != nil { - return nil, err - } - } eval := &models.Evaluation{ ID: uuid.NewString(), @@ -145,7 +102,6 @@ func (e *BaseEndpoint) SubmitJob(ctx context.Context, request *SubmitJobRequest) return nil, err } - e.eventEmitter.EmitJobCreated(ctx, *job) return &SubmitJobResponse{ JobID: job.ID, EvaluationID: eval.ID, @@ -223,12 +179,6 @@ func (e *BaseEndpoint) StopJob(ctx context.Context, request *StopJobRequest) (St return StopJobResponse{}, err } - e.eventEmitter.EmitEventSilently(ctx, models.JobEvent{ - JobID: request.JobID, - EventName: models.JobEventCanceled, - Status: request.Reason, - EventTime: time.Now(), - }) return StopJobResponse{ EvaluationID: evalID, }, nil diff --git a/pkg/orchestrator/event_emitter.go b/pkg/orchestrator/event_emitter.go deleted file mode 100644 index 513caa25f5..0000000000 --- a/pkg/orchestrator/event_emitter.go +++ /dev/null @@ -1,96 +0,0 @@ -package orchestrator - -import ( - "context" - "time" - - "github.com/rs/zerolog/log" - - "github.com/bacalhau-project/bacalhau/pkg/compute" - "github.com/bacalhau-project/bacalhau/pkg/eventhandler" - "github.com/bacalhau-project/bacalhau/pkg/models" -) - -// A quick workaround to publish job events locally as we still have some types that rely -// on job events to update their states (e.g. localdb) and to take actions (e.g. websockets and logging) -// TODO: create a strongly typed local event emitter, and update localdb directly from -// -// requester instead of consuming events. -type EventEmitterParams struct { - EventConsumer eventhandler.JobEventHandler -} - -type EventEmitter struct { - eventConsumer eventhandler.JobEventHandler -} - -func NewEventEmitter(params EventEmitterParams) EventEmitter { - return EventEmitter{ - eventConsumer: params.EventConsumer, - } -} - -func (e EventEmitter) EmitJobCreated( - ctx context.Context, job models.Job) { - event := models.JobEvent{ - JobID: job.ID, - SourceNodeID: job.Meta[models.MetaRequesterID], - EventName: models.JobEventCreated, - EventTime: time.Now(), - } - e.EmitEventSilently(ctx, event) -} - -func (e EventEmitter) EmitBidReceived( - ctx context.Context, result compute.BidResult) { - e.EmitEventSilently(ctx, e.constructEvent(result.RoutingMetadata, result.ExecutionMetadata, models.JobEventBid)) -} - -func (e EventEmitter) EmitBidAccepted( - ctx context.Context, request compute.BidAcceptedRequest, response compute.BidAcceptedResponse) { - e.EmitEventSilently(ctx, e.constructEvent(request.RoutingMetadata, response.ExecutionMetadata, models.JobEventBidAccepted)) -} - -func (e EventEmitter) EmitBidRejected( - ctx context.Context, request compute.BidRejectedRequest, response compute.BidRejectedResponse) { - e.EmitEventSilently(ctx, e.constructEvent(request.RoutingMetadata, response.ExecutionMetadata, models.JobEventBidRejected)) -} - -func (e EventEmitter) EmitRunComplete(ctx context.Context, response compute.RunResult) { - e.EmitEventSilently(ctx, e.constructEvent(response.RoutingMetadata, response.ExecutionMetadata, models.JobEventResultsProposed)) -} - -func (e EventEmitter) EmitComputeFailure(ctx context.Context, executionID string, err error) { - event := models.JobEvent{ - ExecutionID: executionID, - EventName: models.JobEventComputeError, - Status: err.Error(), - EventTime: time.Now(), - } - e.EmitEventSilently(ctx, event) -} - -func (e EventEmitter) constructEvent( - routingMetadata compute.RoutingMetadata, - executionMetadata compute.ExecutionMetadata, - eventName models.JobEventType) models.JobEvent { - return models.JobEvent{ - TargetNodeID: routingMetadata.TargetPeerID, - SourceNodeID: routingMetadata.SourcePeerID, - JobID: executionMetadata.JobID, - ExecutionID: executionMetadata.ExecutionID, - EventName: eventName, - EventTime: time.Now(), - } -} - -func (e EventEmitter) EmitEvent(ctx context.Context, event models.JobEvent) error { - return e.eventConsumer.HandleJobEvent(ctx, event) -} - -func (e EventEmitter) EmitEventSilently(ctx context.Context, event models.JobEvent) { - err := e.EmitEvent(ctx, event) - if err != nil { - log.Ctx(ctx).Error().Err(err).Msgf("failed to emit event %+v", event) - } -} diff --git a/pkg/orchestrator/planner/event_emitter.go b/pkg/orchestrator/planner/event_emitter.go deleted file mode 100644 index e7ea6bea28..0000000000 --- a/pkg/orchestrator/planner/event_emitter.go +++ /dev/null @@ -1,54 +0,0 @@ -package planner - -import ( - "context" - "time" - - "github.com/bacalhau-project/bacalhau/pkg/models" - "github.com/bacalhau-project/bacalhau/pkg/orchestrator" -) - -// EventEmitter is a planner implementation that emits events based on the job state. -type EventEmitter struct { - id string - eventEmitter orchestrator.EventEmitter -} - -// EventEmitterParams holds the parameters for creating a new EventEmitter. -type EventEmitterParams struct { - ID string - EventEmitter orchestrator.EventEmitter -} - -// NewEventEmitter creates a new instance of EventEmitter. -func NewEventEmitter(params EventEmitterParams) *EventEmitter { - return &EventEmitter{ - id: params.ID, - eventEmitter: params.EventEmitter, - } -} - -// Process updates the state of the executions in the plan according to the scheduler's desired state. -func (s *EventEmitter) Process(ctx context.Context, plan *models.Plan) error { - var eventName models.JobEventType - switch plan.DesiredJobState { - case models.JobStateTypeCompleted: - eventName = models.JobEventCompleted - case models.JobStateTypeFailed: - eventName = models.JobEventError - default: - } - if !eventName.IsUndefined() { - s.eventEmitter.EmitEventSilently(ctx, models.JobEvent{ - SourceNodeID: s.id, - JobID: plan.Job.ID, - Status: plan.UpdateMessage, - EventName: eventName, - EventTime: time.Now(), - }) - } - return nil -} - -// compile-time check whether the EventEmitter implements the Planner interface. -var _ orchestrator.Planner = (*EventEmitter)(nil) diff --git a/pkg/publicapi/apimodels/error.go b/pkg/publicapi/apimodels/error.go index 6d2bc98bfd..81845fdad9 100644 --- a/pkg/publicapi/apimodels/error.go +++ b/pkg/publicapi/apimodels/error.go @@ -2,7 +2,6 @@ package apimodels import ( "encoding/json" - "errors" "fmt" "io" "net/http" @@ -66,24 +65,30 @@ func (e *APIError) Error() string { return e.Message } -// Parse HTTP Resposne to APIError -func FromHttpResponse(resp *http.Response) (*APIError, error) { - +// Parse HTTP Response to APIError +func GenerateAPIErrorFromHTTPResponse(resp *http.Response) *APIError { if resp == nil { - return nil, errors.New("response is nil, cannot be unmarsheld to APIError") + return NewAPIError(0, "API call error, invalid response") } defer resp.Body.Close() body, err := io.ReadAll(resp.Body) if err != nil { - return nil, fmt.Errorf("error reading response body: %w", err) + return NewAPIError( + resp.StatusCode, + fmt.Sprintf("Unable to read API call response body. Error: %q", err.Error())) } var apiErr APIError err = json.Unmarshal(body, &apiErr) if err != nil { - return nil, fmt.Errorf("error parsing response body: %w", err) + return NewAPIError( + resp.StatusCode, + fmt.Sprintf("Unable to parse API call response body. Error: %q. Body received: %q", + err.Error(), + string(body), + )) } // If the JSON didn't include a status code, use the HTTP Status @@ -91,7 +96,7 @@ func FromHttpResponse(resp *http.Response) (*APIError, error) { apiErr.HTTPStatusCode = resp.StatusCode } - return &apiErr, nil + return &apiErr } // FromBacError converts a bacerror.Error to an APIError diff --git a/pkg/publicapi/client/v2/client.go b/pkg/publicapi/client/v2/client.go index 18f687ed34..95979028be 100644 --- a/pkg/publicapi/client/v2/client.go +++ b/pkg/publicapi/client/v2/client.go @@ -74,18 +74,12 @@ func (c *httpClient) Get(ctx context.Context, endpoint string, in apimodels.GetR return apimodels.NewUnauthorizedError("invalid token") } - var apiError *apimodels.APIError if resp.StatusCode != http.StatusOK { - apiError, err = apimodels.FromHttpResponse(resp) - if err != nil { - return err + if apiError := apimodels.GenerateAPIErrorFromHTTPResponse(resp); apiError != nil { + return apiError } } - if apiError != nil { - return apiError - } - defer resp.Body.Close() if out != nil { @@ -116,18 +110,12 @@ func (c *httpClient) write(ctx context.Context, verb, endpoint string, in apimod return apimodels.ErrInvalidToken } - var apiError *apimodels.APIError if resp.StatusCode != http.StatusOK { - apiError, err = apimodels.FromHttpResponse(resp) - if err != nil { - return err + if apiError := apimodels.GenerateAPIErrorFromHTTPResponse(resp); apiError != nil { + return apiError } } - if apiError != nil { - return apiError - } - if out != nil { if err := decodeBody(resp, &out); err != nil { return err @@ -362,12 +350,13 @@ func (c *httpClient) interceptError(ctx context.Context, err error, resp *http.R WithCode(bacerrors.UnauthorizedError) } - apiError, apiErr := apimodels.FromHttpResponse(resp) - if apiErr == nil { + apiError := apimodels.GenerateAPIErrorFromHTTPResponse(resp) + if apiError != nil { return apiError.ToBacError() } - return bacerrors.Wrap(apiErr, "server error"). + return bacerrors.New("server error"). + WithHTTPStatusCode(http.StatusInternalServerError). WithCode(bacerrors.InternalError) } diff --git a/pkg/publicapi/endpoint/orchestrator/node.go b/pkg/publicapi/endpoint/orchestrator/node.go index 18a60baa6b..890b4aa135 100644 --- a/pkg/publicapi/endpoint/orchestrator/node.go +++ b/pkg/publicapi/endpoint/orchestrator/node.go @@ -14,7 +14,7 @@ import ( "github.com/bacalhau-project/bacalhau/pkg/util" ) -// godoc for Orchstrator GetNode +// godoc for Orchestrator GetNode // // @ID orchestrator/getNode // @Summary Get an orchestrator node diff --git a/pkg/publicapi/middleware/version.go b/pkg/publicapi/middleware/version.go index bda88e0ad2..292e98c6d2 100644 --- a/pkg/publicapi/middleware/version.go +++ b/pkg/publicapi/middleware/version.go @@ -34,32 +34,32 @@ func VersionNotifyLogger(logger *zerolog.Logger, serverVersion semver.Version) e // instructs logger to extract given list of headers from request. LogHeaders: []string{apimodels.HTTPHeaderBacalhauGitVersion}, LogValuesFunc: func(c echo.Context, v echomiddelware.RequestLoggerValues) error { - notif := Notification{ + notification := Notification{ RequestID: v.RequestID, ClientID: c.Response().Header().Get(apimodels.HTTPHeaderClientID), ServerVersion: serverVersion.String(), } defer func() { - if notif.Message != "" { + if notification.Message != "" { logger.WithLevel(zerolog.DebugLevel). - Str("ClientID", notif.ClientID). - Str("RequestID", notif.RequestID). - Str("ClientVersion", notif.ClientVersion). - Str("ServerVersion", notif.ServerVersion). - Msg(notif.Message) + Str("ClientID", notification.ClientID). + Str("RequestID", notification.RequestID). + Str("ClientVersion", notification.ClientVersion). + Str("ServerVersion", notification.ServerVersion). + Msg(notification.Message) } }() cVersion := v.Headers[apimodels.HTTPHeaderBacalhauGitVersion] if len(cVersion) == 0 { // version header is empty, cannot parse it - notif.Message = "received request from client without version" + notification.Message = "received request from client without version" return nil } if len(cVersion) > 1 { // version header contained multiple fields - notif.Message = fmt.Sprintf("received request from client with multiple versions: %s", cVersion) + notification.Message = fmt.Sprintf("received request from client with multiple versions: %s", cVersion) return nil } @@ -67,20 +67,20 @@ func VersionNotifyLogger(logger *zerolog.Logger, serverVersion semver.Version) e clientVersion, err := semver.NewVersion(cVersion[0]) if err != nil { // cannot parse client version, should notify - notif.Message = fmt.Sprintf("received request with invalid client version: %s", cVersion[0]) + notification.Message = fmt.Sprintf("received request with invalid client version: %s", cVersion[0]) return nil } // extract parsed client version for comparison - notif.ClientVersion = clientVersion.String() + notification.ClientVersion = clientVersion.String() diff := serverVersion.Compare(clientVersion) switch diff { case 1: // client version is less than server version - notif.Message = "received request from outdated client" + notification.Message = "received request from outdated client" case -1: // server version is less than client version - notif.Message = "received request from newer client" + notification.Message = "received request from newer client" case 0: // versions are the same, don't notify } diff --git a/pkg/publicapi/middleware/version_test.go b/pkg/publicapi/middleware/version_test.go index f49b1a2d9c..1887b3600a 100644 --- a/pkg/publicapi/middleware/version_test.go +++ b/pkg/publicapi/middleware/version_test.go @@ -103,9 +103,9 @@ func (suite *VersionNotifyTestSuite) TestLogVersionNotify() { if suite.buf.Len() == 0 { suite.Equalf("", tc.expectedMessage, "unexpected notification") } else { - notif := suite.parseMessage(suite.buf.String()) - suite.Contains(notif.Message, tc.expectedMessage) - suite.Equal(tc.expectedClientVersion, notif.ClientVersion) + notification := suite.parseMessage(suite.buf.String()) + suite.Contains(notification.Message, tc.expectedMessage) + suite.Equal(tc.expectedClientVersion, notification.ClientVersion) } }) } diff --git a/pkg/repo/fs.go b/pkg/repo/fs.go index 4e7724bcbe..eff5adcdee 100644 --- a/pkg/repo/fs.go +++ b/pkg/repo/fs.go @@ -208,7 +208,6 @@ func (fsr *FsRepo) WriteRunInfo(ctx context.Context, summaryShellVariablesString func (fsr *FsRepo) EnsureRepoPathsConfigured(c config_legacy.ReadWriter) { c.SetIfAbsent(legacy_types.AuthTokensPath, fsr.join(config_legacy.TokensPath)) c.SetIfAbsent(legacy_types.UserKeyPath, fsr.join(config_legacy.UserPrivateKeyFileName)) - c.SetIfAbsent(legacy_types.NodeExecutorPluginPath, fsr.join(config_legacy.PluginsPath)) // NB(forrest): pay attention to the subtle name difference here c.SetIfAbsent(legacy_types.NodeComputeStoragePath, fsr.join(config_legacy.ComputeStoragesPath)) diff --git a/pkg/repo/migrations/v3_4.go b/pkg/repo/migrations/v3_4.go index 939499a914..23bd35065b 100644 --- a/pkg/repo/migrations/v3_4.go +++ b/pkg/repo/migrations/v3_4.go @@ -57,7 +57,7 @@ func V3MigrationWithConfig(globalCfg system.GlobalConfig) repo.Migration { } // update the legacy version file so older versions fail gracefully. if err := r.WriteLegacyVersion(repo.Version4); err != nil { - return fmt.Errorf("updating repo.verion: %w", err) + return fmt.Errorf("updating repo.version: %w", err) } if err := r.WriteLastUpdateCheck(time.UnixMilli(0)); err != nil { return err diff --git a/pkg/repo/migrations/v3_4_test.go b/pkg/repo/migrations/v3_4_test.go index 803226fef7..8cbd9fc39b 100644 --- a/pkg/repo/migrations/v3_4_test.go +++ b/pkg/repo/migrations/v3_4_test.go @@ -75,7 +75,6 @@ func (suite *V3MigrationsTestSuite) TestV3MigrationWithFullRepo() { suite.DirExists(filepath.Join(suite.TempDir, "orchestrator_store")) suite.DirExists(filepath.Join(suite.TempDir, "orchestrator_store", "nats-store")) suite.FileExists(filepath.Join(suite.TempDir, "orchestrator_store", "jobs.db")) - suite.DirExists(filepath.Join(suite.TempDir, "plugins")) suite.FileExists(filepath.Join(suite.TempDir, "repo.version")) suite.FileExists(filepath.Join(suite.TempDir, "update.json")) suite.FileExists(filepath.Join(suite.TempDir, "user_id.pem")) @@ -186,7 +185,6 @@ func (suite *V3MigrationsTestSuite) TestV3MigrationWithMinimalRepo() { suite.DirExists(filepath.Join(suite.TempDir, "orchestrator_store")) suite.NoDirExists(filepath.Join(suite.TempDir, "orchestrator_store", "nats-store")) suite.NoFileExists(filepath.Join(suite.TempDir, "orchestrator_store", "jobs.db")) - suite.DirExists(filepath.Join(suite.TempDir, "plugins")) suite.FileExists(filepath.Join(suite.TempDir, "repo.version")) suite.FileExists(filepath.Join(suite.TempDir, "update.json")) suite.FileExists(filepath.Join(suite.TempDir, "user_id.pem")) @@ -249,7 +247,6 @@ func (suite *V3MigrationsTestSuite) TestV3MigrationWithOrchestratorRepo() { suite.DirExists(filepath.Join(suite.TempDir, "orchestrator_store")) suite.DirExists(filepath.Join(suite.TempDir, "orchestrator_store", "nats-store")) suite.FileExists(filepath.Join(suite.TempDir, "orchestrator_store", "jobs.db")) - suite.DirExists(filepath.Join(suite.TempDir, "plugins")) suite.FileExists(filepath.Join(suite.TempDir, "repo.version")) suite.FileExists(filepath.Join(suite.TempDir, "update.json")) suite.FileExists(filepath.Join(suite.TempDir, "user_id.pem")) @@ -362,7 +359,6 @@ func (suite *V3MigrationsTestSuite) TestV3MigrationWithComputeRepo() { suite.DirExists(filepath.Join(suite.TempDir, "orchestrator_store")) suite.NoDirExists(filepath.Join(suite.TempDir, "orchestrator_store", "nats-store")) suite.NoFileExists(filepath.Join(suite.TempDir, "orchestrator_store", "jobs.db")) - suite.DirExists(filepath.Join(suite.TempDir, "plugins")) suite.FileExists(filepath.Join(suite.TempDir, "repo.version")) suite.FileExists(filepath.Join(suite.TempDir, "update.json")) suite.FileExists(filepath.Join(suite.TempDir, "user_id.pem")) diff --git a/pkg/routing/tracing/tracing.go b/pkg/routing/tracing/tracing.go index 1efadffe62..ed936e15c0 100644 --- a/pkg/routing/tracing/tracing.go +++ b/pkg/routing/tracing/tracing.go @@ -62,7 +62,7 @@ func (r *NodeStore) GetByPrefix(ctx context.Context, prefix string) (models.Node log.Ctx(ctx).Trace(). Dur("duration", dur). Str("prefix", prefix). - Msg("node retrieved by previus") + Msg("node retrieved by previous") }() return r.delegate.GetByPrefix(ctx, prefix) diff --git a/pkg/s3/errors_test.go b/pkg/s3/errors_test.go index 3e72863698..096da13e93 100644 --- a/pkg/s3/errors_test.go +++ b/pkg/s3/errors_test.go @@ -1,5 +1,7 @@ //go:build unit || !integration +/* spell-checker: disable */ + package s3 import ( diff --git a/pkg/storage/inline/storage.go b/pkg/storage/inline/storage.go index 927110ae63..75ca770ff5 100644 --- a/pkg/storage/inline/storage.go +++ b/pkg/storage/inline/storage.go @@ -33,11 +33,12 @@ import ( "os" "path/filepath" + "github.com/c2h5oh/datasize" + "github.com/vincent-petithory/dataurl" + "github.com/bacalhau-project/bacalhau/pkg/models" "github.com/bacalhau-project/bacalhau/pkg/storage" "github.com/bacalhau-project/bacalhau/pkg/util/targzip" - "github.com/c2h5oh/datasize" - "github.com/vincent-petithory/dataurl" ) // The maximum size that will be stored inline without gzip compression. @@ -74,8 +75,8 @@ func (i *InlineStorage) GetVolumeSize(_ context.Context, spec models.InputSource } if data.ContentType() == gzipMimeType { - size, derr := targzip.UncompressedSize(bytes.NewReader(data.Data)) - return size.Bytes(), derr + size, dErr := targzip.UncompressedSize(bytes.NewReader(data.Data)) + return size.Bytes(), dErr } else { return uint64(len(data.Data)), nil } @@ -128,13 +129,13 @@ func (i *InlineStorage) PrepareStorage(_ context.Context, storageDirectory strin return storage.StorageVolume{}, err } - _, werr := tempfile.Write(data.Data) - cerr := tempfile.Close() + _, wErr := tempfile.Write(data.Data) + cErr := tempfile.Close() return storage.StorageVolume{ Type: storage.StorageVolumeConnectorBind, Source: tempfile.Name(), Target: spec.Target, - }, errors.Join(werr, cerr) + }, errors.Join(wErr, cErr) } } diff --git a/pkg/storage/s3/storage_test.go b/pkg/storage/s3/storage_test.go index 5016a01ae9..2aa3519edc 100644 --- a/pkg/storage/s3/storage_test.go +++ b/pkg/storage/s3/storage_test.go @@ -1,5 +1,7 @@ //go:build integration || !unit +/* spell-checker: disable */ + package s3_test import ( diff --git a/pkg/test/compute/resourcelimits_test.go b/pkg/test/compute/resourcelimits_test.go index 43b510904c..f4635b271d 100644 --- a/pkg/test/compute/resourcelimits_test.go +++ b/pkg/test/compute/resourcelimits_test.go @@ -162,7 +162,7 @@ func (suite *ComputeNodeResourceLimitsSuite) TestTotalResourceLimits() { }) require.NoError(suite.T(), err) - // sleep a bit here to simulate jobs being sumbmitted over time + // sleep a bit here to simulate jobs being submitted over time time.Sleep((10 + time.Duration(rand.Intn(10))) * time.Millisecond) } @@ -363,7 +363,7 @@ func (suite *ComputeNodeResourceLimitsSuite) TestParallelGPU() { require.NoError(suite.T(), err) jobIds = append(jobIds, submittedJob.JobID) - // sleep a bit here to simulate jobs being sumbmitted over time + // sleep a bit here to simulate jobs being submitted over time // and to give time for compute nodes to accept and run the jobs // this needs to be less than the time the job lasts // so we are running jobs in parallel diff --git a/pkg/test/executor/test_runner.go b/pkg/test/executor/test_runner.go index a9a24b7dca..acd6bb37ee 100644 --- a/pkg/test/executor/test_runner.go +++ b/pkg/test/executor/test_runner.go @@ -87,9 +87,9 @@ func RunTestCase( execution.AllocateResources(job.Task().Name, models.Resources{}) resultsDirectory := t.TempDir() - strgProvider := stack.Nodes[0].ComputeNode.Storages + storageProvider := stack.Nodes[0].ComputeNode.Storages - runCommandArguments, cleanup, err := compute.PrepareRunArguments(ctx, strgProvider, t.TempDir(), execution, resultsDirectory) + runCommandArguments, cleanup, err := compute.PrepareRunArguments(ctx, storageProvider, t.TempDir(), execution, resultsDirectory) require.NoError(t, err) t.Cleanup(func() { if err := cleanup(ctx); err != nil { diff --git a/pkg/test/scenario/resolver.go b/pkg/test/scenario/resolver.go index 9fa1dad27d..0ca4dc75d4 100644 --- a/pkg/test/scenario/resolver.go +++ b/pkg/test/scenario/resolver.go @@ -183,7 +183,7 @@ func GetFilteredExecutionStates(jobState *JobState, filterState models.Execution func WaitForTerminalStates() StateChecks { return func(state *JobState) (bool, error) { for _, executionState := range state.Executions { - if !executionState.ComputeState.StateType.IsTermainl() { + if !executionState.ComputeState.StateType.IsTerminal() { return false, nil } } diff --git a/pkg/test/scenario/responses.go b/pkg/test/scenario/responses.go index b78d2f8eb8..217cb11d12 100644 --- a/pkg/test/scenario/responses.go +++ b/pkg/test/scenario/responses.go @@ -21,7 +21,7 @@ func SubmitJobSuccess() CheckSubmitResponse { return fmt.Errorf("expected job response, got nil") } if len(response.Warnings) > 0 { - return fmt.Errorf("unexpted warnings returned when submitting job: %v", response.Warnings) + return fmt.Errorf("unexpected warnings returned when submitting job: %v", response.Warnings) } return nil } diff --git a/pkg/test/scenario/results.go b/pkg/test/scenario/results.go index 7d2825e8b2..e62ee66428 100644 --- a/pkg/test/scenario/results.go +++ b/pkg/test/scenario/results.go @@ -64,7 +64,7 @@ func FileEquals( } } -// ManyCheckes returns a CheckResults that runs the passed checkers and returns +// ManyChecks returns a CheckResults that runs the passed checkers and returns // an error if any of them fail. func ManyChecks(checks ...CheckResults) CheckResults { return func(resultsDir string) error { diff --git a/pkg/translation/translation.go b/pkg/translation/translation.go deleted file mode 100644 index 56f8bc2c2d..0000000000 --- a/pkg/translation/translation.go +++ /dev/null @@ -1,118 +0,0 @@ -// Package translation provides interfaces for translating from a Job to a -// different Job. This allow us to accept more job types than we have -// executors as we translate from the abstract type to the concrete executor. -// -// When presented with a Job, this package iterates through the tasks -// belonging to the job to determine whether any of the tasks have an -// Engine type that is not one of the core executors (docker or wasm). -// If it does not, then it returns immediately. -// -// For the discovered tasks, the TranslatorProvider is asked to provide an -// implementation of the Translator interface based on the task's engine type. -// The newly obtained Translator processes the task and returns a new task -// with a known engine type (docker or wasm). Depending on where the -// translation occurs, extra work might result in the generation of a derived -// job. - -package translation - -import ( - "context" - "errors" - "fmt" - - "github.com/bacalhau-project/bacalhau/pkg/lib/provider" - "github.com/bacalhau-project/bacalhau/pkg/models" - "github.com/bacalhau-project/bacalhau/pkg/translation/translators" - "github.com/bacalhau-project/bacalhau/pkg/util/idgen" -) - -// Translator defines what functions are required for a component that -// is able to translate from one job to another. It is important that -// implementers ensure that their implementation is reentrant - which -// means it should not use any mutable state after initialization. -type Translator interface { - provider.Providable - - Translate(*models.Task) (*models.Task, error) -} - -// TranslatorProvider is an alias for `provider.Provider[Translator]` -type TranslatorProvider interface { - provider.Provider[Translator] -} - -// NewStandardTranslatorsProvider returns a TranslatorProvider which maps names -// to implementations of the Translator interface -func NewStandardTranslatorsProvider() TranslatorProvider { - return provider.NewMappedProvider(map[string]Translator{ - "python": &translators.PythonTranslator{}, - "duckdb": &translators.DuckDBTranslator{}, - }) -} - -// Translate attempts to translate from one job to another, based on the engine type -// of the tasks in the job. After ensuring that each of the tasks is either a default -// (docker, wasm) or available via the provider, then a new Job is cloned from the -// original and the individual tasks updated. -func Translate(ctx context.Context, provider TranslatorProvider, original *models.Job) (*models.Job, error) { - if shouldTr, err := ShouldTranslate(ctx, provider, original.Tasks); err != nil { - return nil, err - } else { - // Nothing for us to do so we should return immediately - if !shouldTr { - return nil, nil - } - } - - newJob := original.Copy() - newJob.ID = idgen.NewJobID() - - var errs error - - for i := range newJob.Tasks { - task := newJob.Tasks[i] - kind := task.Engine.Type - - if models.IsDefaultEngineType(kind) { - continue // and leave this task in place - } - - if translator, err := provider.Get(ctx, kind); err != nil { - errs = errors.Join(errs, err) - } else { - t, err := translator.Translate(task) - if err != nil { - errs = errors.Join(errs, err) - continue - } - - // Copy the newly translated task over the top of the task - // that was copied from the original job - newJob.Tasks[i] = t - } - } - - return newJob, errs -} - -// ShouldTranslate works out whether we need to carry on with translation, that is -// are there any engine types specified that are not a default engine and we know -// how to translate. If not, then we can exit early. -func ShouldTranslate(ctx context.Context, provider TranslatorProvider, tasks []*models.Task) (bool, error) { - var errs error - needTranslationCount := 0 - - for i := range tasks { - kind := tasks[i].Engine.Type - if provider.Has(ctx, kind) { - needTranslationCount += 1 - } else if kind == models.EngineDocker || kind == models.EngineWasm || kind == models.EngineNoop { - continue - } else { - errs = errors.Join(errs, fmt.Errorf("unknown task type identified in translation: '%s'", kind)) - } - } - - return needTranslationCount > 0, errs -} diff --git a/pkg/translation/translation_test.go b/pkg/translation/translation_test.go deleted file mode 100644 index d71a694efb..0000000000 --- a/pkg/translation/translation_test.go +++ /dev/null @@ -1,200 +0,0 @@ -//go:build unit || !integration - -package translation_test - -import ( - "context" - "testing" - - "github.com/bacalhau-project/bacalhau/pkg/models" - "github.com/bacalhau-project/bacalhau/pkg/translation" - "github.com/stretchr/testify/suite" -) - -type TranslationTestSuite struct { - suite.Suite - ctx context.Context - provider translation.TranslatorProvider -} - -func TestTranslationTestSuite(t *testing.T) { - suite.Run(t, new(TranslationTestSuite)) -} - -func (s *TranslationTestSuite) SetupSuite() { - s.ctx = context.Background() - s.provider = translation.NewStandardTranslatorsProvider() -} - -var testcases = []struct { - name string - spec *models.SpecConfig - expected *models.SpecConfig -}{ - { - name: "python", - spec: &models.SpecConfig{ - Type: "python", - Params: map[string]interface{}{ - "Command": "python", - "Arguments": []interface{}{"-c", "print('Hello, world!')"}, - }, - }, - expected: &models.SpecConfig{ - Type: "docker", - Params: map[string]interface{}{ - "Image": "bacalhauproject/exec-python-3.11:0.5", - "Entrypoint": []string{}, - "Parameters": []string{ - "/build/launcher.py", "--", "python", "-c", "print('Hello, world!')", - }, - "EnvironmentVariables": []string{}, - "WorkingDirectory": "", - }, - }, - }, - { - name: "python with spaces", - spec: &models.SpecConfig{ - Type: "python", - Params: map[string]interface{}{ - "Command": "python", - "Arguments": []interface{}{"-c", `"import this"`}, - }, - }, - expected: &models.SpecConfig{ - Type: "docker", - Params: map[string]interface{}{ - "Image": "bacalhauproject/exec-python-3.11:0.5", - "Entrypoint": []string{}, - "Parameters": []string{ - "/build/launcher.py", "--", "python", "-c", `"import this"`, - }, - "EnvironmentVariables": []string{}, - "WorkingDirectory": "", - }, - }, - }, -} - -func (s *TranslationTestSuite) TestTranslate() { - for _, tc := range testcases { - s.Run(tc.name, func() { - job := &models.Job{ - ID: tc.name, - Tasks: []*models.Task{ - { - Name: "task1", - Engine: tc.spec, - }, - }, - } - - translated, err := translation.Translate(s.ctx, s.provider, job) - s.Require().NoError(err) - - s.Require().Equal(tc.expected, translated.Task().Engine) - }) - } -} - -func (s *TranslationTestSuite) TestTranslateWithInvalidEngine() { - job := &models.Job{ - ID: "invalid_engine", - Tasks: []*models.Task{ - { - Name: "task1", - Engine: &models.SpecConfig{ - Type: "invalid", - }, - }, - }, - } - - _, err := translation.Translate(s.ctx, s.provider, job) - s.Require().Error(err) -} - -func (s *TranslationTestSuite) TestTranslateWithDefaultEngine() { - job := &models.Job{ - ID: "invalid_engine", - Tasks: []*models.Task{ - { - Name: "task1", - Engine: &models.SpecConfig{ - Type: "docker", - }, - }, - }, - } - - translated, err := translation.Translate(s.ctx, s.provider, job) - s.Require().NoError(err) - s.Require().Nil(translated) -} - -func (s *TranslationTestSuite) TestTranslateWithMixedEngines() { - job := &models.Job{ - ID: "invalid_engine", - Tasks: []*models.Task{ - { - Name: "task1", - Engine: &models.SpecConfig{ - Type: "docker", - }, - }, - { - Name: "task2", - Engine: &models.SpecConfig{ - Type: "duckdb", - Params: map[string]interface{}{ - "Command": "duckdb", - "Arguments": []interface{}{"-csv", "-c", "select * from table;"}, - }, - }, - }, - }, - } - - translated, err := translation.Translate(s.ctx, s.provider, job) - s.Require().NoError(err) - s.Require().NotNil(translated) - - // Before - s.Require().Equal("docker", job.Tasks[0].Engine.Type) - s.Require().Equal("duckdb", job.Tasks[1].Engine.Type) - - // After - s.Require().Equal("docker", translated.Tasks[0].Engine.Type) - s.Require().Equal("docker", translated.Tasks[1].Engine.Type) -} - -func (s *TranslationTestSuite) TestShouldTranslateWithDefaultEngine() { - tasks := []*models.Task{ - { - Name: "task1", - Engine: &models.SpecConfig{ - Type: "docker", - }, - }, - } - - should, err := translation.ShouldTranslate(s.ctx, s.provider, tasks) - s.Require().NoError(err) - s.Require().False(should) -} - -func (s *TranslationTestSuite) TestShouldTranslateWithNonDefaultEngine() { - tasks := []*models.Task{ - { - Name: "task1", - Engine: &models.SpecConfig{ - Type: "python", - }, - }, - } - - should, err := translation.ShouldTranslate(s.ctx, s.provider, tasks) - s.Require().NoError(err) - s.Require().True(should) -} diff --git a/pkg/translation/translators/duckdb.go b/pkg/translation/translators/duckdb.go deleted file mode 100644 index fb98cf0ebc..0000000000 --- a/pkg/translation/translators/duckdb.go +++ /dev/null @@ -1,54 +0,0 @@ -package translators - -import ( - "context" - - "github.com/bacalhau-project/bacalhau/pkg/models" - "github.com/bacalhau-project/bacalhau/pkg/util" -) - -const DuckDBImage = "bacalhauproject/exec-duckdb:0.2" - -type DuckDBTranslator struct{} - -func (d *DuckDBTranslator) IsInstalled(context.Context) (bool, error) { - return true, nil -} - -func (d *DuckDBTranslator) Translate(original *models.Task) (*models.Task, error) { - dkrSpec, err := d.dockerEngine(original.Engine) - if err != nil { - return nil, err - } - - builder := original. - ToBuilder(). - Meta(models.MetaTranslatedBy, "translators/duckdb"). - Engine(dkrSpec) - - return builder.BuildOrDie(), nil -} - -func (d *DuckDBTranslator) dockerEngine(origin *models.SpecConfig) (*models.SpecConfig, error) { - // It'd be nice to use pkg/executor/docker/types/EngineSpec here, but it - // would mean adding a dependency on yet another package. - cmd := origin.Params["Command"].(string) - args, err := util.InterfaceToStringArray(origin.Params["Arguments"]) - if err != nil { - return nil, err - } - - params := []string{} - - params = append(params, cmd) - params = append(params, args...) - - spec := models.NewSpecConfig(models.EngineDocker) - spec.Params["Image"] = DuckDBImage - spec.Params["Entrypoint"] = []string{} - spec.Params["Parameters"] = params - spec.Params["EnvironmentVariables"] = []string{} - spec.Params["WorkingDirectory"] = "" - - return spec, nil -} diff --git a/pkg/translation/translators/errors.go b/pkg/translation/translators/errors.go deleted file mode 100644 index fa50464ca7..0000000000 --- a/pkg/translation/translators/errors.go +++ /dev/null @@ -1,7 +0,0 @@ -package translators - -import "fmt" - -func ErrMissingParameters(trs string) error { - return fmt.Errorf("missing parameters in task for '%s' translator", trs) -} diff --git a/pkg/translation/translators/python.go b/pkg/translation/translators/python.go deleted file mode 100644 index 6ad4d27a52..0000000000 --- a/pkg/translation/translators/python.go +++ /dev/null @@ -1,103 +0,0 @@ -package translators - -import ( - "context" - "fmt" - - "github.com/bacalhau-project/bacalhau/pkg/models" - "github.com/bacalhau-project/bacalhau/pkg/util" - "golang.org/x/exp/maps" -) - -// PythonPackageDomains lists all of the domains that might be needed to install -// dependencies at runtime. -var PythonPackageDomains = []string{ - "pypi.python.org", - "pypi.org", - "pythonhosted.org", - "files.pythonhosted.org", - "repo.anaconda.com", - "repo.continuum.io", - "conda.anaconda.org", -} - -// SupportedPythonVersions maps the python version to the docker image that -// provides support for that version. -var SupportedPythonVersions = map[string]string{ - "3.11": "bacalhauproject/exec-python-3.11:0.5", -} - -type PythonTranslator struct{} - -func (p *PythonTranslator) IsInstalled(context.Context) (bool, error) { - return true, nil -} - -func (p *PythonTranslator) Translate(original *models.Task) (*models.Task, error) { - dkrSpec, err := p.dockerEngine(original.Engine) - if err != nil { - return nil, err - } - - builder := original. - ToBuilder(). - Meta(models.MetaTranslatedBy, "translators/python"). - Engine(dkrSpec) - - original.Network = &models.NetworkConfig{ - Type: models.NetworkHTTP, - Domains: PythonPackageDomains, - } - - return builder.BuildOrDie(), nil -} - -func (p *PythonTranslator) dockerEngine(origin *models.SpecConfig) (*models.SpecConfig, error) { - // It'd be nice to use pkg/executor/docker/types/EngineSpec here, but it - // would mean adding a dependency on yet another package. - cmd := origin.Params["Command"].(string) - args, err := util.InterfaceToStringArray(origin.Params["Arguments"]) - if err != nil { - return nil, err - } - - versionString := "3.11" // Default version - version := origin.Params["Version"] - if version != nil { - versionString = version.(string) - } - - image, err := getImageName(versionString) - if err != nil { - return nil, err - } - - params := []string{ - "/build/launcher.py", "--", - } - - params = append(params, cmd) - params = append(params, args...) - - spec := models.NewSpecConfig(models.EngineDocker) - spec.Params["Image"] = image - spec.Params["Entrypoint"] = []string{} - spec.Params["Parameters"] = params - spec.Params["EnvironmentVariables"] = []string{} - spec.Params["WorkingDirectory"] = "" - - return spec, nil -} - -func getImageName(version string) (string, error) { - image, found := SupportedPythonVersions[version] - if !found { - supported := "" - versions := maps.Keys(SupportedPythonVersions) - for i := range versions { - supported += fmt.Sprintf(" * %s\n", versions[i]) - } - return "", fmt.Errorf("unsupported python version: %s\nsupported versions are:\n%s", version, supported) - } - return image, nil -} diff --git a/pkg/util/conversion.go b/pkg/util/conversion.go deleted file mode 100644 index 216f30e440..0000000000 --- a/pkg/util/conversion.go +++ /dev/null @@ -1,30 +0,0 @@ -package util - -import "fmt" - -// InterfaceToStringArray converts an interface{} that we know is a []string -// to that []string via []interface{}. This is useful when we have a map[string]interface{} -// and we want to get the []string{} out of it. -func InterfaceToStringArray(source interface{}) ([]string, error) { - if source == nil { - return nil, nil - } - - // // If it is already a string array, then return it. - // strArray, ok := source.([]string) - // if ok { - // return strArray, nil - // } - - sourceArray, ok := source.([]interface{}) - if !ok { - return nil, fmt.Errorf("expected []interface{} but got %T", source) - } - - result := make([]string, len(sourceArray)) - for i, v := range sourceArray { - result[i] = fmt.Sprint(v) - } - - return result, nil -} diff --git a/pkg/util/conversion_test.go b/pkg/util/conversion_test.go deleted file mode 100644 index 1a3ec4f735..0000000000 --- a/pkg/util/conversion_test.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build unit || !integration - -package util_test - -import ( - "testing" - - "github.com/bacalhau-project/bacalhau/pkg/util" - "github.com/stretchr/testify/require" -) - -func TestInterfaceToStringArray(t *testing.T) { - testcases := []struct { - name string - source interface{} - expected []string - shouldError bool - }{ - { - name: "nil", - source: nil, - expected: nil, - shouldError: false, - }, - { - name: "empty", - source: []interface{}{}, - expected: []string{}, - shouldError: false, - }, - { - name: "string", - source: []interface{}{"foo"}, - expected: []string{"foo"}, - shouldError: false, - }, - { - name: "int", - source: []interface{}{1}, - expected: []string{"1"}, - shouldError: false, - }, - { - name: "float", - source: []interface{}{1.1}, - expected: []string{"1.1"}, - shouldError: false, - }, - { - name: "bool", - source: []interface{}{true}, - expected: []string{"true"}, - shouldError: false, - }, - { - name: "mixed", - source: []interface{}{"foo", 1, 1.1, true}, - expected: []string{"foo", "1", "1.1", "true"}, - shouldError: false, - }, - { - name: "map", - source: map[string]interface{}{"foo": "bar"}, - expected: nil, - shouldError: true, - }, - { - name: "string array", - source: []interface{}{"foo", "bar"}, - expected: []string{"foo", "bar"}, - shouldError: false, - }, - { - name: "int array", - source: []interface{}{1, 2}, - expected: []string{"1", "2"}, - shouldError: false, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - actual, err := util.InterfaceToStringArray(tc.source) - if tc.shouldError { - require.Error(t, err) - return - } - - require.NoError(t, err) - require.Equal(t, tc.expected, actual) - }) - } -} diff --git a/pkg/util/idgen/short_id_test.go b/pkg/util/idgen/short_id_test.go index 36079088cc..230d08471b 100644 --- a/pkg/util/idgen/short_id_test.go +++ b/pkg/util/idgen/short_id_test.go @@ -1,5 +1,7 @@ //go:build unit || !integration +/* spell-checker: disable */ + package idgen import ( diff --git a/plugins/Makefile b/plugins/Makefile deleted file mode 100644 index 6c250ebdef..0000000000 --- a/plugins/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -TOPTARGETS := build clean - -SUBDIRS := executors - -$(TOPTARGETS): $(SUBDIRS) -$(SUBDIRS): - @echo "$@:" - @$(MAKE) -C $@ $(MAKECMDGOALS) - -.PHONY: $(TOPTARGETS) $(SUBDIRS) diff --git a/plugins/README.md b/plugins/README.md deleted file mode 100644 index f497a80a02..0000000000 --- a/plugins/README.md +++ /dev/null @@ -1,9 +0,0 @@ - -# Plugins - -This directory is the root directory for various plugins for Bacalhau. - - -## Executors - -The [executors plugins](./executors/README.md) implement the default pluggable executors for Bacalhau, along with the protobuf IDL required to implement new pluggable executors. diff --git a/plugins/executors/Makefile b/plugins/executors/Makefile deleted file mode 100644 index bc905f6934..0000000000 --- a/plugins/executors/Makefile +++ /dev/null @@ -1,26 +0,0 @@ -PLUGINS = -TMP_OUT = ../../ - -# As this is building Go specific libraries, it is expected -# that this will live inside each PLUGINS which is Go-based, -# but is implemented here to allow us to iteratively move the -# existing plugins. Requires the installation of -# -# $ go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28 -# $ go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2 -# -# We should move this task to the individual plugins that require -# it once they exist. -idl: executor.proto - @echo " Building protos" - @ if ! which protoc > /dev/null; then \ - echo "error: protoc not installed" >&2; \ - exit 1; \ - fi - protoc --proto_path=. --go-grpc_out=$(TMP_OUT) --go_out=$(TMP_OUT) executor.proto - -build: idl - @echo " No plugins to build yet" - -clean: - @echo " Executors: Nothing to clean" diff --git a/plugins/executors/README.md b/plugins/executors/README.md deleted file mode 100644 index d7e03af3c7..0000000000 --- a/plugins/executors/README.md +++ /dev/null @@ -1,19 +0,0 @@ - -# Executor plugins - -Executors are responsible for running jobs in Bacalhau, and this directory is the home of the builtin Bacalhau plugins for Docker and WebAssembly jobs. - - -## WIP: Building plugins - -To build the default plugins once they are implemented here, you can use the provided Makefile. - -```sh -# Build -make build - -# Cleanup -make clean -``` - -This will run the build, and clean, tasks respectively for each plugin that lives in this directory. diff --git a/plugins/executors/executor.proto b/plugins/executors/executor.proto deleted file mode 100644 index 5783cbb14b..0000000000 --- a/plugins/executors/executor.proto +++ /dev/null @@ -1,75 +0,0 @@ - -syntax = "proto3"; -package proto; - -option go_package = "./pkg/executor/plugins/grpc/proto"; - -// TODO: Complete these structure, rather than merely wrapping serialized JSON bytes in protobuf containers. -// Details in: https://github.com/bacalhau-project/bacalhau/issues/2700 - -message StartResponse { - -} - -message RunCommandRequest { - bytes Params = 1; -} - -message RunCommandResponse { - bytes Params = 1; -} - -message CancelCommandRequest { - string ExecutionID =1; -} - -message CancelCommandResponse { - -} - -message IsInstalledRequest { - -} - -message IsInstalledResponse { - bool Installed = 1; -} - -message ShouldBidRequest { - bytes BidRequest = 1; -} - -message ShouldBidBasedOnUsageRequest { - bytes BidRequest = 1; - bytes Usage = 2; -} - -// shared by both semantic and resource bid -message ShouldBidResponse { - bytes BidResponse = 1; -} - -message OutputStreamRequest { - string ExecutionID = 1; - bool History = 2; - bool Follow = 3; -} - -message OutputStreamResponse { - bytes Data =1; -} - -message WaitRequest { - string ExecutionID = 1; -} - -service Executor { - rpc Run(RunCommandRequest) returns (RunCommandResponse); - rpc Start(RunCommandRequest) returns (StartResponse); - rpc Wait(WaitRequest) returns (stream RunCommandResponse); - rpc Cancel(CancelCommandRequest) returns (CancelCommandResponse); - rpc IsInstalled(IsInstalledRequest) returns (IsInstalledResponse); - rpc ShouldBid(ShouldBidRequest) returns (ShouldBidResponse); - rpc ShouldBidBasedOnUsage(ShouldBidBasedOnUsageRequest) returns (ShouldBidResponse); - rpc GetOutputStream(OutputStreamRequest) returns (stream OutputStreamResponse); -} diff --git a/python/mkdocs.yml b/python/mkdocs.yml index 0b6a73d46a..3710c19320 100644 --- a/python/mkdocs.yml +++ b/python/mkdocs.yml @@ -1,4 +1,4 @@ -site_name: Bacalahu SDK +site_name: Bacalhau SDK site_url: https://github.com/bacalhau-project/bacalhau repo_url: https://github.com/bacalhau-project/bacalhau/python repo_name: bacalhau-project/bacalhau-sdk diff --git a/test-integration/Dockerfile-ClientNode b/test-integration/Dockerfile-ClientNode new file mode 100644 index 0000000000..da31f340f7 --- /dev/null +++ b/test-integration/Dockerfile-ClientNode @@ -0,0 +1,27 @@ +# Use the docker:dind image as the base image +FROM docker:dind + +# Set the working directory +WORKDIR /app + +# Install curl and bash +RUN apk update && apk add --no-cache curl bash + +# Install the ca-certificates package +RUN apk add --no-cache ca-certificates + +# Copy a root ca into the image +COPY certificates/generated_assets/bacalhau_test_root_ca.crt /usr/local/share/ca-certificates/bacalhau_test_root_ca.crt + +# Update CA certificates +RUN update-ca-certificates + +# Download and execute the Bash script from the given URL +RUN curl -sSL https://get.bacalhau.org/install.sh | bash + +# Download the binary, make it executable, and move it to /usr/local/bin +RUN curl -o /tmp/mc https://dl.min.io/client/mc/release/linux-amd64/mc \ + && chmod +x /tmp/mc \ + && mv /tmp/mc /usr/local/bin/ + +ENTRYPOINT ["dockerd-entrypoint.sh"] diff --git a/test-integration/Dockerfile-ComputeNode b/test-integration/Dockerfile-ComputeNode new file mode 100644 index 0000000000..7a6cc4ebaa --- /dev/null +++ b/test-integration/Dockerfile-ComputeNode @@ -0,0 +1,24 @@ +# Use the docker:dind image as the base image +FROM docker:dind + +# Set the working directory +WORKDIR /app + +# Install curl and bash +RUN apk update && apk add --no-cache curl bash + +# Install the ca-certificates package +RUN apk add --no-cache ca-certificates + +# Copy a root ca into the image +COPY certificates/generated_assets/bacalhau_test_root_ca.crt /usr/local/share/ca-certificates/bacalhau_test_root_ca.crt + +# Update CA certificates +RUN update-ca-certificates + +# Download and execute the Bash script from the given URL +RUN curl -sSL https://get.bacalhau.org/install.sh | bash + +COPY compute_node_image_setup.sh compute_node_image_setup.sh +ENTRYPOINT ["/usr/bin/env"] +CMD ./compute_node_image_setup.sh diff --git a/test-integration/Dockerfile-DockerImageRegistryNode b/test-integration/Dockerfile-DockerImageRegistryNode new file mode 100644 index 0000000000..9c38ba886e --- /dev/null +++ b/test-integration/Dockerfile-DockerImageRegistryNode @@ -0,0 +1,24 @@ +FROM registry:2 + +# Install curl and bash +RUN apk update && apk add --no-cache curl bash + +# Install the ca-certificates package +RUN apk add --no-cache ca-certificates + +# Copy a root ca into the image +COPY certificates/generated_assets/bacalhau_test_root_ca.crt /usr/local/share/ca-certificates/bacalhau_test_root_ca.crt + +# Create a directory to store certificates to be used by the registry +RUN mkdir /certs + +# Copy the certificate and key from the local directory to /certs +COPY certificates/generated_assets/bacalhau-container-img-registry-node.crt /certs/ +COPY certificates/generated_assets/bacalhau-container-img-registry-node.key /certs/ + +# Ensure proper permissions for certs +RUN chmod 600 /certs/bacalhau-container-img-registry-node.key +RUN chmod 644 /certs/bacalhau-container-img-registry-node.crt + +# Expose the registry's default port +EXPOSE 5000 443 diff --git a/test-integration/Dockerfile-RequesterNode b/test-integration/Dockerfile-RequesterNode new file mode 100644 index 0000000000..cbbd207c32 --- /dev/null +++ b/test-integration/Dockerfile-RequesterNode @@ -0,0 +1,22 @@ +# Use the docker:dind image as the base image +FROM docker:dind + +# Set the working directory +WORKDIR /app + +# Install curl and bash +RUN apk update && apk add --no-cache curl bash + +# Install the ca-certificates package +RUN apk add --no-cache ca-certificates + +# Copy a root ca into the image +COPY certificates/generated_assets/bacalhau_test_root_ca.crt /usr/local/share/ca-certificates/bacalhau_test_root_ca.crt + +# Update CA certificates +RUN update-ca-certificates + +# Download and execute the Bash script from the given URL +RUN curl -sSL https://get.bacalhau.org/install.sh | bash + +ENTRYPOINT ["dockerd-entrypoint.sh"] diff --git a/test-integration/README.md b/test-integration/README.md new file mode 100644 index 0000000000..48f9eabed4 --- /dev/null +++ b/test-integration/README.md @@ -0,0 +1,198 @@ +# Running Bacalhau on Docker + +## Overview + +Since Bacalhau is a distributed system with multiple components, it is critical to have a reliable method for end-to-end testing. Additionally, it's important that these tests closely resemble a real production environment without relying on mocks. + +This setup addresses those needs by running Bacalhau inside containers while also supporting Docker workloads within these containers (using Docker-in-Docker, or DinD). + +## Architecture + +- A Requester Docker container, running Bacalhau as a requester node. +- A Compute Docker container, running Bacalhau as a compute node and is configured to run Docker containers inside it. +- A Bacalhau Client Docker container to act as a jumpbox to interact with this Bacalhau deployment. +- A [Registry](https://github.com/distribution/distribution/) Docker container to act as the local container image registry. +- A Minio Docker container to support running S3 compatible input/output jobs. +- Docker Compose is used to create 5 services: the Requester Node, the Compute Node, the Client CLI Node, the registry node, and the Minio node. +- All the services are connected on the same Docker network, allowing them to communicate over the bridged network. +- All the containers have an injected custom Certificate Authority, which is used for a portion of the internal TLS communication. + - TODO: Expand the TLS setup to more components. Now it is used for the registry communication only. + +## Setup + +--- +### Build the Docker Images + +Build the Requester Node image: +```shell +docker build -f Dockerfile-RequesterNode -t bacalhau-requester-node-image . +``` + +Build the Compute Node image: +```shell +docker build -f Dockerfile-ComputeNode -t bacalhau-compute-node-image . +``` + +Build the Client Node image: +```shell +docker build -f Dockerfile-ClientNode -t bacalhau-client-node-image . +``` + +Build the Registry Node image: +```shell +docker build -f Dockerfile-DockerImageRegistryNode -t bacalhau-container-img-registry-node-image . +``` + +After running these commands, you should see the above images created: +```shell +docker image ls +``` +--- +### Running the setup + +Run Docker Compose: +```shell +docker-compose up +``` + +Access the utility client container to use the Bacalhau CLI: +```shell +docker exec -it bacalhau-client-node-container /bin/bash +``` + +Once inside the container, you can run the following commands to verify the setup: +```shell +# You should see two nodes: a Requestor and a Compute Node +bacalhau node list +``` + +Run a test workload +```shell +bacalhau docker run hello-world + +# Describe the job; it should have completed successfully. +bacalhau job describe ........ +``` + +In another terminal window, you can follow the logs of the Requester node, and compute node +```shell +docker logs bacalhau-requester-node-container -f +docker logs bacalhau-compute-node-container -f +``` + +--- +### Setting Up Minio + +Access the utility client container to use the Bacalhau CLI: +```shell +docker exec -it bacalhau-client-node-container /bin/bash +``` + +Setup an alias for the Minio CLI +```shell +# The environment variables are already injected in +# the container, no need to replace them yourself. +mc alias set bacalhau-minio "http://${BACALHAU_MINIO_NODE_HOST}:9000" "${MINIO_ROOT_USER}" "${MINIO_ROOT_PASSWORD}" +mc admin info bacalhau-minio +``` + +Create a bucket and add some files +```shell +mc mb bacalhau-minio/my-data-bucket +mc ls bacalhau-minio/my-data-bucket/section1/ +echo "This is a sample text hello hello." > example.txt +mc cp example.txt bacalhau-minio/my-data-bucket/section1/ +``` + +RUn a job with data input from the minion bucket + +```shell +# Content of aws-test-job.yaml below +bacalhau job run aws-test-job.yaml +``` + +```yaml +Name: S3 Job Data Access Test +Type: batch +Count: 1 +Tasks: + - Name: main + Engine: + Type: docker + Params: + Image: ubuntu:latest + Entrypoint: + - /bin/bash + Parameters: + - "-c" + - "cat /put-my-s3-data-here/example.txt" + InputSources: + - Target: "/put-my-s3-data-here" + Source: + Type: s3 + Params: + Bucket: "my-data-bucket" + Key: "section1/" + Endpoint: "http://bacalhau-minio-node:9000" + Region: "us-east-1" # If no region added, it fails, even for minio +``` + +--- +### Setting Up private registry + +This docker compose deployment has a private registry deployed on its own node. It allows us to +create tests and experiment with docker images jobs without the need to use DockerHub in anyway. + +From inside the client container, let's pull an image from DockerHub, push it to our own private registry, +then run a docker job running the image in out private registry. + +```shell +# pull from docker hub +docker pull ubuntu + +# tag the image to prepare it to be push to our private registry +docker image tag ubuntu bacalhau-container-img-registry-node:5000/firstbacalhauimage + +# push the image to our private registry +docker push bacalhau-container-img-registry-node:5000/firstbacalhauimage +``` + +Now, let's create a job that references that image in private registry: + +```shell +# Content of private-registry-test-job.yaml below +bacalhau job run private-registry-test-job.yaml +``` + +```yaml +Name: Job to test using local registry images +Type: batch +Count: 1 +Tasks: + - Name: main + Engine: + Type: docker + Params: + Image: bacalhau-container-img-registry-node:5000/firstbacalhauimage + Entrypoint: + - /bin/bash + Parameters: + - "-c" + - "echo test-local-registry" +``` + +--- +### Notes: + +If for some reason after running `docker-compose up`, you faced issues with the Image registry node starting, try to remove the image registry docker volume by running: + +```shell +# Destroy the deployment +docker-compose down + +# Remove registry volume +docker volume rm test-integration_registry-volume + +# Create deployment again +docker-compose up +``` diff --git a/test-integration/certificates/README.md b/test-integration/certificates/README.md new file mode 100644 index 0000000000..f993908841 --- /dev/null +++ b/test-integration/certificates/README.md @@ -0,0 +1,9 @@ +# Certificate Generation + +The script in the folder allows you to generate certificates that are signed by a root CA, and provide the +CN and SAN for these leaf certs. The generated certs will be added to the `generated_assets` directory. + +Usage: `./generate_leaf_certs.sh ` +```shell +./generate_leaf_certs.sh my-bacalhau-requester-node +``` diff --git a/test-integration/certificates/generate_leaf_certs.sh b/test-integration/certificates/generate_leaf_certs.sh new file mode 100755 index 0000000000..0411adc9d3 --- /dev/null +++ b/test-integration/certificates/generate_leaf_certs.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +# Set variables +ROOT_CA_CERT="generated_assets/bacalhau_test_root_ca.crt" +ROOT_CA_KEY="generated_assets/bacalhau_test_root_ca.key" +DAYS_VALID=1825 # 5 years + +# Organization name and country (same as before) +ORG_NAME="Bacalhau" +COUNTRY="US" + +# Check if the input argument is provided +if [[ -z "$1" ]]; then + echo "Error: Please provide a string for the Common Name and Subject Alternative Names." + exit 1 +fi + +COMMON_NAME="$1" +OUTPUT_CERT="generated_assets/${COMMON_NAME}.crt" +OUTPUT_KEY="generated_assets/${COMMON_NAME}.key" +CSR_PATH="generated_assets/${COMMON_NAME}.csr" +CNF_PATH="generated_assets/${COMMON_NAME}.cnf" + +# Check if the files already exist +if [[ -f "${OUTPUT_CERT}" ]] || [[ -f "${OUTPUT_KEY}" ]]; then + echo "Error: One or both of the following files already exist:" + [[ -f "${OUTPUT_CERT}" ]] && echo " - ${OUTPUT_CERT}" + [[ -f "${OUTPUT_KEY}" ]] && echo " - ${OUTPUT_KEY}" + echo "Please remove or rename the existing files before running this script." + exit 1 +fi + +# Generate a private key for the new certificate +echo "Generating certificate signed by the root CA..." +openssl genpkey -algorithm RSA -out "${OUTPUT_KEY}" -pkeyopt rsa_keygen_bits:4096 + +# Create an OpenSSL configuration file for the SAN +cat > "${CNF_PATH}" </dev/null 2>&1; then + echo "dockerd is available! Now Starting Bacalhau as a compute node" + bacalhau config set compute.auth.token="${NETWORK_AUTH_TOKEN}" + bacalhau serve --compute -c compute.orchestrators="nats://${REQUESTER_NODE_LINK}:4222" + # Wait for any process to exit + wait -n + + # Exit with status of process that exited first + exit $? + fi + + # Wait before retrying + echo "dockerd is not available yet. Retrying in ${RETRY_INTERVAL} seconds..." + sleep "${RETRY_INTERVAL}" + + # Increment attempt counter + attempt=$((attempt + 1)) +done + +echo "dockerd did not become available within ${TOTAL_WAIT_TIME_FOR_DOCKERD} seconds." +exit 1 diff --git a/test-integration/docker-compose.yml b/test-integration/docker-compose.yml new file mode 100644 index 0000000000..2340fba1a6 --- /dev/null +++ b/test-integration/docker-compose.yml @@ -0,0 +1,117 @@ +x-common-env-variables: &common-env-variables + NETWORK_AUTH_TOKEN: "i_am_very_secret_token" + BACALHAU_API_PORT: "1234" + MINIO_ROOT_USER: "minioadmin" + MINIO_ROOT_PASSWORD: "minioadminpass" + AWS_ACCESS_KEY_ID: "minioadmin" + AWS_SECRET_ACCESS_KEY: "minioadminpass" + +networks: + bacalhau-network: + driver: bridge + +volumes: + minio-volume: + driver: local + registry-volume: + driver: local + +services: + bacalhau-minio-node: + image: quay.io/minio/minio + container_name: bacalhau-minio-node-container + command: server /data --console-address ":9001" + volumes: + - minio-volume:/data + restart: always + networks: + - bacalhau-network + environment: *common-env-variables + healthcheck: + test: [ "CMD", "curl", "-f", "http://localhost:9000/minio/health/live" ] + interval: 1s + timeout: 5s + retries: 30 + start_period: 2s + + bacalhau-container-img-registry-node: + image: bacalhau-container-img-registry-node-image + container_name: bacalhau-container-img-registry-container + volumes: + - registry-volume:/var/lib/registry + restart: always + networks: + - bacalhau-network + environment: + REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY: /var/lib/registry + REGISTRY_HTTP_ADDR: "0.0.0.0:5000" + REGISTRY_HTTP_TLS_CERTIFICATE: "/certs/bacalhau-container-img-registry-node.crt" + REGISTRY_HTTP_TLS_KEY: "/certs/bacalhau-container-img-registry-node.key" + healthcheck: + test: [ "CMD-SHELL", "nc -zv localhost 5000" ] + interval: 1s + timeout: 5s + retries: 30 + start_period: 2s + + bacalhau-requester-node: + image: bacalhau-requester-node-image + container_name: bacalhau-requester-node-container + networks: + - bacalhau-network + environment: *common-env-variables + depends_on: + bacalhau-minio-node: + condition: service_healthy + privileged: true + command: + - /bin/bash + - -c + - | + bacalhau config set "orchestrator.auth.token" "$${NETWORK_AUTH_TOKEN}" && bacalhau serve --orchestrator -c api.port=$${BACALHAU_API_PORT} + healthcheck: + test: [ "CMD-SHELL", "nc -zv localhost 1234" ] + interval: 1s + timeout: 5s + retries: 30 + start_period: 2s + + bacalhau-compute-node: + image: bacalhau-compute-node-image + container_name: bacalhau-compute-node-container + privileged: true + networks: + - bacalhau-network + depends_on: + bacalhau-requester-node: + condition: service_healthy + bacalhau-container-img-registry-node: + condition: service_healthy + environment: + <<: *common-env-variables + REQUESTER_NODE_LINK: 'bacalhau-requester-node' + healthcheck: + test: [ "CMD-SHELL", "nc -zv localhost 1234" ] + interval: 1s + timeout: 5s + retries: 30 + start_period: 2s + + bacalhau-client-node: + image: bacalhau-client-node-image + container_name: bacalhau-client-node-container + privileged: true + networks: + - bacalhau-network + depends_on: + bacalhau-requester-node: + condition: service_healthy + bacalhau-compute-node: + condition: service_healthy + bacalhau-container-img-registry-node: + condition: service_healthy + environment: + <<: *common-env-variables + BACALHAU_API_HOST: 'bacalhau-requester-node' + BACALHAU_COMPUTE_NODE_HOST: 'bacalhau-compute-node' + BACALHAU_MINIO_NODE_HOST: 'bacalhau-minio-node'