diff --git a/Dockerfile b/Dockerfile index 07d9c53..64e579e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,37 +1,46 @@ -FROM golang:1.20 as build-stage +# syntax=docker/dockerfile:1 -WORKDIR /fn - -COPY go.mod go.sum ./ -RUN go mod download - -COPY input/ ./input -COPY *.go ./ - -RUN CGO_ENABLED=0 go build -o /function . - -FROM debian:12.1-slim as package-stage - -# TODO(negz): Use a proper Crossplane package building tool. We're abusing the -# fact that this image won't have an io.crossplane.pkg: base annotation. This -# means Crossplane package manager will pull this entire ~100MB image, which -# also happens to contain a valid Function runtime. -# https://github.com/crossplane/crossplane/blob/v1.13.2/contributing/specifications/xpkg.md -WORKDIR /package -COPY package/ ./ +# We use the latest Go 1.x version unless asked to use something else. +# The GitHub Actions CI job sets this argument for a consistent Go version. +ARG GO_VERSION=1 -RUN cat crossplane.yaml > /package.yaml -RUN cat input/*.yaml >> /package.yaml +# Setup the base environment. The BUILDPLATFORM is set automatically by Docker. +# The --platform=${BUILDPLATFORM} flag tells Docker to build the function using +# the OS and architecture of the host running the build, not the OS and +# architecture that we're building the function for. +FROM --platform=${BUILDPLATFORM} golang:${GO_VERSION} AS build -FROM gcr.io/distroless/base-debian11 AS build-release-stage +WORKDIR /fn +# Most functions don't want or need CGo support, so we disable it. +ENV CGO_ENABLED=0 + +# We run go mod download in a separate step so that we can cache its results. +# This lets us avoid re-downloading modules if we don't need to. The type=target +# mount tells Docker to mount the current directory read-only in the WORKDIR. +# The type=cache mount tells Docker to cache the Go modules cache across builds. +RUN --mount=target=. --mount=type=cache,target=/go/pkg/mod go mod download + +# The TARGETOS and TARGETARCH args are set by docker. We set GOOS and GOARCH to +# these values to ask Go to compile a binary for these architectures. If +# TARGETOS and TARGETOS are different from BUILDPLATFORM, Go will cross compile +# for us (e.g. compile a linux/amd64 binary on a linux/arm64 build machine). +ARG TARGETOS +ARG TARGETARCH + +# Build the function binary. The type=target mount tells Docker to mount the +# current directory read-only in the WORKDIR. The type=cache mount tells Docker +# to cache the Go modules cache across builds. +RUN --mount=target=. \ + --mount=type=cache,target=/go/pkg/mod \ + --mount=type=cache,target=/root/.cache/go-build \ + GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /function . + +# Produce the Function image. We use a very lightweight 'distroless' image that +# does not include any of the build tools used in previous stages. +FROM gcr.io/distroless/base-debian11 AS image WORKDIR / - -COPY --from=build-stage /function /function -COPY --from=package-stage /package.yaml /package.yaml - +COPY --from=build /function /function EXPOSE 9443 - USER nonroot:nonroot - ENTRYPOINT ["/function"] diff --git a/awsapi.go b/awsapi.go index 8389360..f6b2d15 100644 --- a/awsapi.go +++ b/awsapi.go @@ -133,11 +133,12 @@ func (f *Function) CreateAWSNodegroupSpec(cluster, namespace, region, providerCo } var object *unstructured.Unstructured - if object, err = composite.ToUnstructuredKubernetesObject(awsmmp, f.composite.Spec.KubernetesProviderConfigRef); err != nil { + if object, err = composite.ToUnstructuredKubernetesObject(awsmmp, f.composite.Spec.ClusterProviderConfigRef); err != nil { f.log.Debug(fmt.Sprintf("failed to convert nodegroup %q to kubernetes object for cluster %q.", nodegroup, *cluster), "error was", err) continue } + f.log.Info("Adding nodegroup to required resources", "nodegroup", nodegroupName) if err = f.composed.AddDesired(nodegroupName, object); err != nil { f.log.Info(composedName, "add machinepool", errors.Wrap(err, "cannot add composed object "+nodegroupName)) continue diff --git a/fn.go b/fn.go index 7e05297..78ed181 100644 --- a/fn.go +++ b/fn.go @@ -11,7 +11,7 @@ import ( "github.com/giantswarm/xfnlib/pkg/composite" ) -const composedName = "function-describe-nodegroups" +const composedName = "crossplane-fn-describe-nodegroups" // RunFunction Execute the desired reconcilliation state, creating any required resources func (f *Function) RunFunction(_ context.Context, req *fnv1beta1.RunFunctionRequest) (rsp *fnv1beta1.RunFunctionResponse, err error) { @@ -34,7 +34,7 @@ func (f *Function) RunFunction(_ context.Context, req *fnv1beta1.RunFunctionRequ namespace *string = &f.composite.Spec.ClaimRef.Namespace region *string = &f.composite.Spec.Region provider *string = &f.composite.Spec.CompositionSelector.MatchLabels.Provider - providerConfigRef *string = &f.composite.Spec.AwsProviderConfigRef + providerConfigRef *string = &f.composite.Spec.CloudProviderConfigRef labels map[string]string = f.composite.Metadata.Labels annotations map[string]string = map[string]string{ @@ -56,9 +56,9 @@ func (f *Function) RunFunction(_ context.Context, req *fnv1beta1.RunFunctionRequ return rsp, nil } case "azure": - break + f.log.Info("Azure provider is not yet implemented") case "gcp": - break + f.log.Info("GCP provider is not yet implemented") } if err = f.composed.ToResponse(rsp); err != nil { diff --git a/types.go b/types.go index a7d4d8c..a140cce 100644 --- a/types.go +++ b/types.go @@ -31,15 +31,15 @@ type EksImportXRObject struct { } type XRSpec struct { - KubernetesAdditionalLabels map[string]string `json:"kubernetesAdditionalLabels"` - Labels map[string]string `json:"labels"` - AwsProviderConfigRef string `json:"cloudProviderConfigRef"` - ClusterName string `json:"clusterName"` - DeletionPolicy string `json:"deletionPolicy"` - KubernetesProviderConfigRef string `json:"clusterProviderConfigRef"` - Region string `json:"regionOrLocation"` - ResourceGroupName string `json:"resourceGroupName,omitempty"` - ClaimRef struct { + KubernetesAdditionalLabels map[string]string `json:"kubernetesAdditionalLabels"` + Labels map[string]string `json:"labels"` + CloudProviderConfigRef string `json:"cloudProviderConfigRef"` + ClusterName string `json:"clusterName"` + DeletionPolicy string `json:"deletionPolicy"` + ClusterProviderConfigRef string `json:"clusterProviderConfigRef"` + Region string `json:"regionOrLocation"` + ResourceGroupName string `json:"resourceGroupName,omitempty"` + ClaimRef struct { Namespace string `json:"namespace"` } `json:"claimRef"`