Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: Preparation for incoming static code analysis CI check #15164

Open
wants to merge 20 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion clients/cmd/docker-driver/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func (d *driver) StartLogging(file string, logCtx logger.Info) error {

var jsonl logger.Logger
if !noFile {
if err := os.MkdirAll(folder, 0755); err != nil {
if err := os.MkdirAll(folder, 0750); err != nil {
return errors.Wrap(err, "error setting up logger dir")
}

Expand Down
2 changes: 1 addition & 1 deletion clients/cmd/docker-driver/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ func main() {
pprofPort := os.Getenv("PPROF_PORT")
if pprofPort != "" {
go func() {
err := http.ListenAndServe(fmt.Sprintf(":%s", pprofPort), nil)
err := http.ListenAndServe(fmt.Sprintf(":%s", pprofPort), nil) //#nosec G114 -- This is a debug feature that must be intentionally enabled and is not used in prod, DOS is not a concern.
logger.Log("msg", "http server stopped", "err", err)
}()
}
Expand Down
2 changes: 1 addition & 1 deletion clients/cmd/fluent-bit/dque.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ func newDque(cfg *config, logger log.Logger, metrics *client.Metrics) (client.Cl
logger: log.With(logger, "component", "queue", "name", cfg.bufferConfig.dqueConfig.queueName),
}

err = os.MkdirAll(cfg.bufferConfig.dqueConfig.queueDir, 0644)
err = os.MkdirAll(cfg.bufferConfig.dqueConfig.queueDir, 0640)
if err != nil {
return nil, fmt.Errorf("cannot create queue directory: %s", err)
}
Expand Down
6 changes: 4 additions & 2 deletions clients/pkg/promtail/promtail.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
package promtail

import (
"crypto/md5"
"errors"
"fmt"
"os"
Expand All @@ -10,6 +9,8 @@ import (
"syscall"
"time"

"golang.org/x/crypto/sha3"

"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
Expand Down Expand Up @@ -130,7 +131,8 @@ func (p *Promtail) reloadConfig(cfg *config.Config) error {
return errConfigNotChange
}
newConf := cfg.String()
level.Info(p.logger).Log("msg", "Reloading configuration file", "md5sum", fmt.Sprintf("%x", md5.Sum([]byte(newConf))))
hash := sha3.Sum256([]byte(newConf))
level.Info(p.logger).Log("msg", "Reloading configuration file", "sha3sum", fmt.Sprintf("%x", hash))
if p.targetManagers != nil {
p.targetManagers.Stop()
}
Expand Down
2 changes: 1 addition & 1 deletion clients/pkg/promtail/targets/kafka/authentication.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (

func createTLSConfig(cfg promconfig.TLSConfig) (*tls.Config, error) {
tc := &tls.Config{
InsecureSkipVerify: cfg.InsecureSkipVerify,
InsecureSkipVerify: cfg.InsecureSkipVerify, //#nosec G402 -- User has explicitly requested to disable TLS
ServerName: cfg.ServerName,
}
// load ca cert
Expand Down
2 changes: 1 addition & 1 deletion clients/pkg/promtail/targets/testutils/testutils.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func RandName() string {
b := make([]rune, 10)
for i := range b {
b[i] = letters[randomGenerator.Intn(len(letters))]
b[i] = letters[randomGenerator.Intn(len(letters))] //#nosec G404 -- Generating random test data, fine.
}
return string(b)
}
2 changes: 1 addition & 1 deletion cmd/chunks-inspect/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ func printFile(filename string, blockDetails, printLines, storeBlocks bool) {
}

func writeBlockToFile(data []byte, blockIndex int, filename string) {
err := os.WriteFile(filename, data, 0644)
err := os.WriteFile(filename, data, 0640) // #nosec G306 -- this is fencing off the "other" permissions
if err != nil {
log.Println("Failed to store block", blockIndex, "to file", filename, "due to error:", err)
} else {
Expand Down
12 changes: 10 additions & 2 deletions cmd/loki-canary/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -208,8 +208,16 @@ func main() {
})
http.Handle("/metrics", promhttp.Handler())
go func() {
err := http.ListenAndServe(":"+strconv.Itoa(*port), nil)
if err != nil {
srv := &http.Server{
Addr: ":" + strconv.Itoa(*port),
Handler: nil, // uses default mux from http.Handle calls above
ReadTimeout: 120 * time.Second,
WriteTimeout: 120 * time.Second,
IdleTimeout: 120 * time.Second,
ReadHeaderTimeout: 120 * time.Second,
}
err := srv.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
panic(err)
}
}()
Expand Down
2 changes: 1 addition & 1 deletion cmd/migrate/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ func main() {
flag.Parse()

go func() {
log.Println(http.ListenAndServe("localhost:8080", nil))
log.Println(http.ListenAndServe("localhost:8080", nil)) //#nosec G114 -- This is only bound to localhost, not a plausible DOS vector.
}()

// Create a set of defaults
Expand Down
2 changes: 1 addition & 1 deletion integration/client/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ func (c *Client) Get(path string) (*http.Response, error) {
// Get all the metrics
func (c *Client) Metrics() (string, error) {
url := fmt.Sprintf("%s/metrics", c.baseURL)
res, err := http.Get(url)
res, err := http.Get(url) //#nosec G107 -- Intentionally taking user input from config
if err != nil {
return "", err
}
Expand Down
6 changes: 3 additions & 3 deletions integration/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ func New(logLevel level.Value, opts ...func(*Cluster)) *Cluster {

overridesFile := filepath.Join(sharedPath, "loki-overrides.yaml")

err = os.WriteFile(overridesFile, []byte(`overrides:`), 0o777)
err = os.WriteFile(overridesFile, []byte(`overrides:`), 0640) // #nosec G306 -- this is fencing off the "other" permissions
if err != nil {
panic(fmt.Errorf("error creating overrides file: %w", err))
}
Expand Down Expand Up @@ -348,7 +348,7 @@ func (c *Component) writeConfig() error {
return fmt.Errorf("error getting merged config: %w", err)
}

if err := os.WriteFile(configFile.Name(), mergedConfig, 0o644); err != nil {
if err := os.WriteFile(configFile.Name(), mergedConfig, 0640); err != nil { // #nosec G306 -- this is fencing off the "other" permissions
return fmt.Errorf("error writing config file: %w", err)
}

Expand Down Expand Up @@ -525,7 +525,7 @@ func (c *Component) SetTenantLimits(tenant string, limits validation.Limits) err
return err
}

return os.WriteFile(c.overridesFile, config, 0o777)
return os.WriteFile(c.overridesFile, config, 0640) // #nosec G306 -- this is fencing off the "other" permissions
}

func (c *Component) GetTenantLimits(tenant string) validation.Limits {
Expand Down
6 changes: 3 additions & 3 deletions integration/cluster/ruler.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,17 @@ func (c *Component) WithTenantRules(tenantFilesMap map[string]map[string]string)
sharedPath := c.ClusterSharedPath()
rulesPath := filepath.Join(sharedPath, "rules")

if err := os.Mkdir(rulesPath, 0755); err != nil {
if err := os.Mkdir(rulesPath, 0750); err != nil {
return fmt.Errorf("error creating rules path: %w", err)
}

for tenant, files := range tenantFilesMap {
for filename, file := range files {
path := filepath.Join(rulesPath, tenant)
if err := os.Mkdir(path, 0755); err != nil {
if err := os.Mkdir(path, 0750); err != nil {
return fmt.Errorf("error creating tenant %s rules path: %w", tenant, err)
}
if err := os.WriteFile(filepath.Join(path, filename), []byte(strings.TrimSpace(file)), 0644); err != nil {
if err := os.WriteFile(filepath.Join(path, filename), []byte(strings.TrimSpace(file)), 0640); err != nil { // #nosec G306 -- this is fencing off the "other" permissions
return fmt.Errorf("error creating rule file at path %s: %w", path, err)
}
}
Expand Down
18 changes: 9 additions & 9 deletions operator/internal/manifests/storage/var.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,15 +10,15 @@ const (
// EnvAWSAccessKeyID is the environment variable to specify the AWS client id to access S3.
EnvAWSAccessKeyID = "AWS_ACCESS_KEY_ID"
// EnvAWSAccessKeySecret is the environment variable to specify the AWS client secret to access S3.
EnvAWSAccessKeySecret = "AWS_ACCESS_KEY_SECRET"
EnvAWSAccessKeySecret = "AWS_ACCESS_KEY_SECRET" //#nosec G101 -- False positive
// EnvAWSSseKmsEncryptionContext is the environment variable to specify the AWS KMS encryption context when using type SSE-KMS.
EnvAWSSseKmsEncryptionContext = "AWS_SSE_KMS_ENCRYPTION_CONTEXT"
// EnvAWSRoleArn is the environment variable to specify the AWS role ARN secret for the federated identity workflow.
EnvAWSRoleArn = "AWS_ROLE_ARN"
// EnvAWSWebIdentityTokenFile is the environment variable to specify the path to the web identity token file used in the federated identity workflow.
EnvAWSWebIdentityTokenFile = "AWS_WEB_IDENTITY_TOKEN_FILE"
EnvAWSWebIdentityTokenFile = "AWS_WEB_IDENTITY_TOKEN_FILE" //#nosec G101 -- False positive
// EnvAWSCredentialsFile is the environment variable to specify the path to the shared credentials file
EnvAWSCredentialsFile = "AWS_SHARED_CREDENTIALS_FILE"
EnvAWSCredentialsFile = "AWS_SHARED_CREDENTIALS_FILE" //#nosec G101 -- False positive
// EnvAWSSdkLoadConfig is the environment that enabled the AWS SDK to enable the shared credentials file to be loaded
EnvAWSSdkLoadConfig = "AWS_SDK_LOAD_CONFIG"
// EnvAzureStorageAccountName is the environment variable to specify the Azure storage account name to access the container.
Expand All @@ -34,7 +34,7 @@ const (
// EnvAzureFederatedTokenFile is the environment variable used to store the path to the Managed Identity token.
EnvAzureFederatedTokenFile = "AZURE_FEDERATED_TOKEN_FILE"
// EnvGoogleApplicationCredentials is the environment variable to specify path to key.json
EnvGoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS"
EnvGoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS" //#nosec G101 -- False positive
// EnvSwiftPassword is the environment variable to specify the OpenStack Swift password.
EnvSwiftPassword = "SWIFT_PASSWORD"
// EnvSwiftUsername is the environment variable to specify the OpenStack Swift username.
Expand All @@ -52,7 +52,7 @@ const (
// KeyAWSAccessKeyID is the secret data key for the AWS client id to access S3.
KeyAWSAccessKeyID = "access_key_id"
// KeyAWSAccessKeySecret is the secret data key for the AWS client secret to access S3.
KeyAWSAccessKeySecret = "access_key_secret"
KeyAWSAccessKeySecret = "access_key_secret" //#nosec G101 -- False positive
// KeyAWSBucketNames is the secret data key for the AWS S3 bucket names.
KeyAWSBucketNames = "bucketnames"
// KeyAWSEndpoint is the secret data key for the AWS endpoint URL.
Expand Down Expand Up @@ -131,16 +131,16 @@ const (

saTokenVolumeName = "bound-sa-token"
saTokenExpiration int64 = 3600
saTokenVolumeMountPath = "/var/run/secrets/storage/serviceaccount"
saTokenVolumeMountPath = "/var/run/secrets/storage/serviceaccount" //#nosec G101 -- False positive

ServiceAccountTokenFilePath = saTokenVolumeMountPath + "/token"

secretDirectory = "/etc/storage/secrets"
secretDirectory = "/etc/storage/secrets" //#nosec G101 -- False positive
storageTLSVolume = "storage-tls"
caDirectory = "/etc/storage/ca"

tokenAuthConfigVolumeName = "token-auth-config"
tokenAuthConfigDirectory = "/etc/storage/token-auth"
tokenAuthConfigVolumeName = "token-auth-config" //#nosec G101 -- False positive
tokenAuthConfigDirectory = "/etc/storage/token-auth" //#nosec G101 -- False positive

awsDefaultAudience = "sts.amazonaws.com"
azureDefaultAudience = "api://AzureADTokenExchange"
Expand Down
4 changes: 2 additions & 2 deletions operator/internal/manifests/var.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ const (
// PrometheusCAFile declares the path for prometheus CA file for service monitors.
PrometheusCAFile string = "/etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt"
// BearerTokenFile declares the path for bearer token file for service monitors.
BearerTokenFile string = "/var/run/secrets/kubernetes.io/serviceaccount/token"
BearerTokenFile string = "/var/run/secrets/kubernetes.io/serviceaccount/token" //#nosec G101 -- False positive

// labelJobComponent is a ServiceMonitor.Spec.JobLabel.
labelJobComponent string = "loki.grafana.com/component"
Expand All @@ -80,7 +80,7 @@ const (
// AnnotationLokiObjectStoreHash stores the last SHA1 hash of the loki object storage credetials.
AnnotationLokiObjectStoreHash string = "loki.grafana.com/object-store-hash"
// AnnotationLokiTokenCCOAuthHash stores the SHA1 hash of the secret generated by the Cloud Credential Operator.
AnnotationLokiTokenCCOAuthHash string = "loki.grafana.com/token-cco-auth-hash"
AnnotationLokiTokenCCOAuthHash string = "loki.grafana.com/token-cco-auth-hash" //#nosec G101 -- False positive

// LabelCompactorComponent is the label value for the compactor component
LabelCompactorComponent string = "compactor"
Expand Down
2 changes: 1 addition & 1 deletion pkg/canary/comparator/comparator.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ func (c *Comparator) run() {
t := time.NewTicker(c.pruneInterval)
// Use a random tick up to the interval for the first tick
firstMt := true
randomGenerator := rand.New(rand.NewSource(time.Now().UnixNano()))
randomGenerator := rand.New(rand.NewSource(time.Now().UnixNano())) //#nosec G404 -- Random sampling for health testing purposes, does not require secure random.
mt := time.NewTicker(time.Duration(randomGenerator.Int63n(c.metricTestInterval.Nanoseconds())))
sc := time.NewTicker(c.spotCheckQueryRate)
ct := time.NewTicker(c.cacheTestInterval)
Expand Down
4 changes: 2 additions & 2 deletions pkg/canary/writer/writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,8 @@ func (w *Writer) run() {
select {
case <-t.C:
t := time.Now()
if i := rand.Intn(100); i < w.outOfOrderPercentage {
n := rand.Intn(int(w.outOfOrderMax.Seconds()-w.outOfOrderMin.Seconds())) + int(w.outOfOrderMin.Seconds())
if i := rand.Intn(100); i < w.outOfOrderPercentage { //#nosec G404 -- Random sampling for testing purposes, does not require secure random.
n := rand.Intn(int(w.outOfOrderMax.Seconds()-w.outOfOrderMin.Seconds())) + int(w.outOfOrderMin.Seconds()) //#nosec G404 -- Random sampling for testing purposes, does not require secure random.
t = t.Add(-time.Duration(n) * time.Second)
}
ts := strconv.FormatInt(t.UnixNano(), 10)
Expand Down
2 changes: 1 addition & 1 deletion pkg/chunkenc/memchunk.go
Original file line number Diff line number Diff line change
Expand Up @@ -1332,7 +1332,7 @@ func (hb *headBlock) SampleIterator(ctx context.Context, mint, maxt int64, extra
}

func unsafeGetBytes(s string) []byte {
return unsafe.Slice(unsafe.StringData(s), len(s))
return unsafe.Slice(unsafe.StringData(s), len(s)) // #nosec G103 -- we know the string is not mutated
}

type bufferedIterator struct {
Expand Down
2 changes: 1 addition & 1 deletion pkg/compactor/deletion/delete_requests_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -413,7 +413,7 @@ func splitUserIDAndRequestID(rangeValue string) (userID, requestID, seqID string

// unsafeGetString is like yolostring but with a meaningful name
func unsafeGetString(buf []byte) string {
return *((*string)(unsafe.Pointer(&buf)))
return *((*string)(unsafe.Pointer(&buf))) // #nosec G103 -- we know the string is not mutated
}

func generateCacheGenNumber() []byte {
Expand Down
2 changes: 1 addition & 1 deletion pkg/compactor/retention/retention.go
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,7 @@ func CopyMarkers(src string, dst string) error {
return fmt.Errorf("read marker file: %w", err)
}

if err := os.WriteFile(filepath.Join(targetDir, marker.Name()), data, 0o666); err != nil {
if err := os.WriteFile(filepath.Join(targetDir, marker.Name()), data, 0640); err != nil { // #nosec G306 -- this is fencing off the "other" permissions
return fmt.Errorf("write marker file: %w", err)
}
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/compactor/retention/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ import (

// unsafeGetString is like yolostring but with a meaningful name
func unsafeGetString(buf []byte) string {
return *((*string)(unsafe.Pointer(&buf)))
return *((*string)(unsafe.Pointer(&buf))) // #nosec G103 -- we know the string is not mutated
}

func copyFile(src, dst string) (int64, error) {
Expand Down
4 changes: 2 additions & 2 deletions pkg/compactor/testutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func SetupTable(t *testing.T, path string, commonDBsConfig IndexesConfig, perUse
idx := 0
for filename, content := range commonIndexes {
filePath := filepath.Join(path, strings.TrimSuffix(filename, ".gz"))
require.NoError(t, os.WriteFile(filePath, []byte(content), 0777))
require.NoError(t, os.WriteFile(filePath, []byte(content), 0640)) // #nosec G306 -- this is fencing off the "other" permissions
if strings.HasSuffix(filename, ".gz") {
compressFile(t, filePath)
}
Expand All @@ -92,7 +92,7 @@ func SetupTable(t *testing.T, path string, commonDBsConfig IndexesConfig, perUse
require.NoError(t, util.EnsureDirectory(filepath.Join(path, userID)))
for filename, content := range files {
filePath := filepath.Join(path, userID, strings.TrimSuffix(filename, ".gz"))
require.NoError(t, os.WriteFile(filePath, []byte(content), 0777))
require.NoError(t, os.WriteFile(filePath, []byte(content), 0640)) // #nosec G306 -- this is fencing off the "other" permissions
if strings.HasSuffix(filename, ".gz") {
compressFile(t, filePath)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/ingester/checkpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ func (w *WALCheckpointWriter) Advance() (bool, error) {
}
}

if err := os.MkdirAll(checkpointDirTemp, 0777); err != nil {
if err := os.MkdirAll(checkpointDirTemp, 0750); err != nil {
return false, fmt.Errorf("create checkpoint dir: %w", err)
}

Expand Down
4 changes: 2 additions & 2 deletions pkg/ingester/ingester.go
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ func New(cfg Config, clientConfig client.Config, store Store, limits Limits, con
i.replayController = newReplayController(metrics, cfg.WAL, &replayFlusher{i})

if cfg.WAL.Enabled {
if err := os.MkdirAll(cfg.WAL.Dir, os.ModePerm); err != nil {
if err := os.MkdirAll(cfg.WAL.Dir, 0750); err != nil {
// Best effort try to make path absolute for easier debugging.
path, _ := filepath.Abs(cfg.WAL.Dir)
if path == "" {
Expand Down Expand Up @@ -759,7 +759,7 @@ func (i *Ingester) loop() {
// flush at the same time. Flushing at the same time can cause concurrently
// writing the same chunk to object storage, which in AWS S3 leads to being
// rate limited.
jitter := time.Duration(rand.Int63n(int64(float64(i.cfg.FlushCheckPeriod.Nanoseconds()) * 0.8)))
jitter := time.Duration(rand.Int63n(int64(float64(i.cfg.FlushCheckPeriod.Nanoseconds()) * 0.8))) //#nosec G404 -- Jitter does not require a CSPRNG.
initialDelay := time.NewTimer(jitter)
defer initialDelay.Stop()

Expand Down
3 changes: 2 additions & 1 deletion pkg/kafka/partitionring/partition_ring.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,8 +62,9 @@ func ExtractIngesterPartitionID(ingesterID string) (int32, error) {
if len(match) == 0 {
return 0, fmt.Errorf("ingester ID %s doesn't match regular expression %q", ingesterID, ingesterIDRegexp.String())
}

// Parse the ingester sequence number.
ingesterSeq, err := strconv.Atoi(match[1])
ingesterSeq, err := strconv.ParseInt(match[1], 10, 32)
if err != nil {
return 0, fmt.Errorf("no ingester sequence number in ingester ID %s", ingesterID)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/loghttp/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ func (s Streams) ToProto() []logproto.Stream {
}
result := make([]logproto.Stream, 0, len(s))
for _, s := range s {
entries := *(*[]logproto.Entry)(unsafe.Pointer(&s.Entries))
entries := *(*[]logproto.Entry)(unsafe.Pointer(&s.Entries)) // #nosec G103 -- we know the string is not mutated
result = append(result, logproto.Stream{
Labels: s.Labels.String(),
Entries: entries,
Expand Down
6 changes: 3 additions & 3 deletions pkg/logproto/compat.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,14 @@ func ToWriteRequest(lbls []labels.Labels, samples []LegacySample, metadata []*Me
// Note: while resulting labels.Labels is supposedly sorted, this function
// doesn't enforce that. If input is not sorted, output will be wrong.
func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels {
return *(*labels.Labels)(unsafe.Pointer(&ls))
return *(*labels.Labels)(unsafe.Pointer(&ls)) // #nosec G103 -- we know the string is not mutated
}

// FromLabelsToLabelAdapters casts labels.Labels to []LabelAdapter.
// It uses unsafe, but as LabelAdapter == labels.Label this should be safe.
// This allows us to use labels.Labels directly in protos.
func FromLabelsToLabelAdapters(ls labels.Labels) []LabelAdapter {
return *(*[]LabelAdapter)(unsafe.Pointer(&ls))
return *(*[]LabelAdapter)(unsafe.Pointer(&ls)) // #nosec G103 -- we know the string is not mutated
}

// FromLabelAdaptersToMetric converts []LabelAdapter to a model.Metric.
Expand Down Expand Up @@ -155,7 +155,7 @@ func SampleJsoniterDecode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
}

bs := iter.ReadStringAsSlice()
ss := *(*string)(unsafe.Pointer(&bs))
ss := *(*string)(unsafe.Pointer(&bs)) // #nosec G103 -- we know the string is not mutated
v, err := strconv.ParseFloat(ss, 64)
if err != nil {
iter.ReportError("logproto.LegacySample", err.Error())
Expand Down
Loading
Loading