diff --git a/hack/testing/test-viaq-data-model.py b/hack/testing/test-viaq-data-model.py index ad2903162..27c3ef4aa 100644 --- a/hack/testing/test-viaq-data-model.py +++ b/hack/testing/test-viaq-data-model.py @@ -41,7 +41,8 @@ def empty(thing): "undefined23": False }, "undefined4": "undefined4", - "undefined5": "undefined5" + "undefined5": "undefined5", + "undefined.6": "undefined6" }, "test2": {"undefined": {"undefined1": "undefined1", @@ -53,7 +54,8 @@ def empty(thing): "undefined23": False }, "undefined4": "undefined4", - "undefined5": "undefined5" + "undefined5": "undefined5", + "undefined.6": "undefined6" } }, "test3": {"undefined": @@ -64,7 +66,8 @@ def empty(thing): "undefined2": "undefined2", "undefined22": 2222, "undefined23": False - } + }, + "undefined.6": "undefined6" }, "undefined4": "undefined4", "undefined5": "undefined5" @@ -77,7 +80,8 @@ def empty(thing): "undefined2": "undefined2", "undefined22": 2222, "undefined23": False - } + }, + "undefined.6": "undefined6" }, "undefined4": "undefined4", "undefined5": "undefined5" @@ -90,7 +94,34 @@ def empty(thing): "undefined2": "undefined2", "undefined22": 2222, "undefined23": False - } + }, + "undefined.6": "undefined6" + }, + "undefined4": "undefined4", + "undefined5": "undefined5", + "empty1": "", + "undefined3": {"emptyvalue": ""} + }, + "test6": {"myname": + {"undefined1": "undefined1", + "undefined11": 1111, + "undefined12": True, + "undefined2": { + "undefined2": "undefined2", + "undefined22": 2222, + "undefined23": False + }, + "undefined_6": "undefined6" + }, + "undefined4": "undefined4", + "undefined5": "undefined5", + "empty1": "", + "undefined3": {"emptyvalue": ""} + }, + "test7": {"myname": + {"undefined1": "undefined1", + "undefined11": 1111, + "undefined12": True, }, "undefined4": "undefined4", "undefined5": "undefined5", diff --git a/hack/testing/util.sh b/hack/testing/util.sh index 3c6d34006..a97e465c1 100644 --- a/hack/testing/util.sh +++ b/hack/testing/util.sh @@ -433,6 +433,10 @@ function flush_fluentd_pos_files() { os::cmd::expect_success "oal_sudo rm -f /var/log/journal.pos /var/log/journal_pos.json" } +function flush_rsyslog_pos_files() { + os::cmd::expect_success "oal_sudo rm -f /var/lib/rsyslog.pod/imfile-state* /var/lib/rsyslog.pod/imjournal.state" +} + # return 0 if given file has size ge given size, otherwise, return 1 function file_has_size() { local f=$1 @@ -709,18 +713,6 @@ get_fluentd_pod_log() { fi } -# rsyslog may have pod logs and logs in the file -get_rsyslog_pod_log() { - local pod=${1:-$( get_running_pod rsyslog )} - local container=${2:-rsyslog} - oc logs -c $container $pod 2>&1 - if [ $container = rsyslog ] ; then - oc exec -c $container $pod -- logs 2>&1 || oal_sudo cat /var/log/rsyslog/rsyslog.log.* /var/log/rsyslog/rsyslog.log || : - else - oal_sudo cat /var/lib/rsyslog.pod/logrotate.log /var/log/rsyslog/logrotate.log || : - fi -} - get_mux_pod_log() { local pod=${1:-$( get_running_pod mux )} local logfile=${2:-/var/log/fluentd/fluentd.log} @@ -728,6 +720,22 @@ get_mux_pod_log() { oc exec $pod -- cat $logfile 2> /dev/null || : } +# rsyslog may have pod logs and logs in the file +get_rsyslog_pod_log() { + local pod=${1:-$( get_running_pod rsyslog )} + local logfile=${2:-/var/log/rsyslog/rsyslog.log} + oc logs $pod -c rsyslog 2>&1 + if oc exec $pod -c rsyslog -- logs --all 2>&1 ; then + : # done + elif oal_sudo test -f $logfile ; then + # can't read from the pod directly - see if we can get the log file + oal_sudo cat ${logfile}.log-* $logfile 2> /dev/null || : + oal_sudo cat /var/log/rsyslog/rsyslog_debug.log-* /var/log/rsyslog/rsyslog_debug.log 2> /dev/null || : + oal_sudo cat /var/log/rsyslog/logrotate.log-* /var/log/rsyslog/logrotate.log 2> /dev/null || : + oal_sudo cat /var/lib/rsyslog.pod/logrotate.log-* /var/lib/rsyslog.pod/logrotate.log 2> /dev/null || : + fi +} + # writes all pod logs to the given outdir or $ARTIFACT_DIR in the form # pod_name.container_name.log # it will get both the oc logs output and any log files produced by @@ -745,17 +753,18 @@ get_all_logging_pod_logs() { continue fi if [ $container = logrotate ] ; then - get_rsyslog_pod_log $p $container > $ARTIFACT_DIR/$p.$container.log 2>&1 + # we got the logs from the rsyslog container already continue fi case "$p" in logging-fluentd-*|fluentd-*) get_fluentd_pod_log $p > $ARTIFACT_DIR/$p.$container.log 2>&1 ;; logging-mux-*) get_mux_pod_log $p > $ARTIFACT_DIR/$p.$container.log 2>&1 ;; - *) oc logs -n ${LOGGING_NS} -c $container $p > $ARTIFACT_DIR/$p.$container.log 2>&1 - oc exec -n ${LOGGING_NS} -c $container $p -- logs >> $ARTIFACT_DIR/$p.$container.log 2>&1 || : - ;; + logging-es-*|elasticsearch-*) oc logs -n ${LOGGING_NS} -c $container $p > $ARTIFACT_DIR/$p.$container.log 2>&1 + oc exec -c elasticsearch -n ${LOGGING_NS} $p -- logs >> $ARTIFACT_DIR/$p.$container.log 2>&1 + ;; + *) oc logs -n ${LOGGING_NS} -c $container $p > $ARTIFACT_DIR/$p.$container.log 2>&1 ;; esac - done + done done } @@ -786,6 +795,32 @@ start_fluentd() { os::cmd::try_until_text "oc get pods -l component=fluentd" "^(logging-)*fluentd-.* Running " $wait_time } +stop_rsyslog() { + local rpod=${1:-$( get_running_pod rsyslog )} + local wait_time=${2:-$(( 2 * minute ))} + + oc label node -l logging-infra-rsyslog=true --overwrite logging-infra-rsyslog=false + os::cmd::try_until_text "oc get $rsyslog_ds -o jsonpath='{ .status.numberReady }'" "0" $wait_time + # not sure if it is a bug or a flake, but sometimes .status.numberReady is 0, the rsyslog pod hangs around + # in the Terminating state for many seconds, which seems to cause problems with subsequent tests + # so, we have to wait for the pod to completely disappear - we cannot rely on .status.numberReady == 0 + if [ -n "${rpod:-}" ] ; then + os::cmd::try_until_failure "oc get pod $rpod > /dev/null 2>&1" $wait_time + fi +} + +start_rsyslog() { + local cleanfirst=${1:-false} + local wait_time=${2:-$(( 2 * minute ))} + + if [ "$cleanfirst" != false ] ; then + flush_rsyslog_pos_files + oal_sudo rm -f /var/log/rsyslog/* /var/lib/rsyslog.pod/* + fi + oc label node -l logging-infra-rsyslog=false --overwrite logging-infra-rsyslog=true + os::cmd::try_until_text "oc get pods -l component=rsyslog" "^(logging-)*rsyslog-.* Running " $wait_time +} + get_fluentd_ds_name() { if oc -n ${LOGGING_NS} get daemonset fluentd -o name > /dev/null 2>&1 ; then echo daemonset/fluentd @@ -806,6 +841,9 @@ get_fluentd_cm_name() { fluentd_cm=${fluentd_cm:-$(get_fluentd_cm_name)} +# Hardcode daemonset/rsyslog for the case the rsyslog pod does not exist. +rsyslog_ds=${rsyslog_ds:-daemonset/rsyslog} + enable_cluster_logging_operator() { if oc -n ${LOGGING_NS} get deploy cluster-logging-operator > /dev/null 2>&1 ; then oc patch -n ${LOGGING_NS} clusterlogging instance --type=json --patch '[ diff --git a/openshift/ci-operator/build-image/Dockerfile.full b/openshift/ci-operator/build-image/Dockerfile.full index 527ba1305..851b3be59 100644 --- a/openshift/ci-operator/build-image/Dockerfile.full +++ b/openshift/ci-operator/build-image/Dockerfile.full @@ -14,3 +14,4 @@ ADD openshift/ /go/src/github.com/openshift/origin-aggregated-logging/openshift/ ADD hack/ /go/src/github.com/openshift/origin-aggregated-logging/hack/ ADD test/ /go/src/github.com/openshift/origin-aggregated-logging/test/ ADD fluentd/ /go/src/github.com/openshift/origin-aggregated-logging/fluentd/ +ADD rsyslog/ /go/src/github.com/openshift/origin-aggregated-logging/rsyslog/ diff --git a/rsyslog/README.md b/rsyslog/README.md index e98da5a57..3f18342e9 100644 --- a/rsyslog/README.md +++ b/rsyslog/README.md @@ -10,6 +10,15 @@ Following are the environment variables that can be modified to adjust the confi | `LOGGING_FILE_PATH` | The log file absolute path where Rsyslog is writting its logs. If you want rsyslog to output its logs as Rsyslog does by default (`STDOUT`) set this variable to `console` value. Default value is `/var/log/rsyslog/rsyslog.log`. | `LOGGING_FILE_PATH=console` | | `LOGGING_FILE_AGE` | Number of log files that Rsyslog keeps before deleting the oldest file. Default value is `10`. | `LOGGING_FILE_AGE=30` | | `LOGGING_FILE_SIZE` | Maximum size of a rsyslog log file in bytes. If the size of the log file is bigger, the log file gets rotated. Default is 1MB | `LOGGING_FILE_SIZE=1024000` +| `CDM_USE_UNDEFINED` | Parameter to switch whether undefined fields are moved to the undefined property or not. default to "false". | `CDM_USE_UNDEFINED=true` +| `CDM_DEFAULT_KEEP_FIELDS` | Default set of fields to be kept in the top level of json. default to "CEE,time,@timestamp,aushape,ci_job,collectd,docker,fedora-ci,file,foreman,geoip,hostname,ipaddr4,ipaddr6,kubernetes,level,message,namespace_name,namespace_uuid,offset,openstack,ovirt,pid,pipeline_metadata,rsyslog,service,systemd,tags,testcase,tlog,viaq_msg_id", | - +| `CDM_EXTRA_KEEP_FIELDS` | Extra set of fields to be kept in the top level of json. A field not included in ${CDM_DEFAULT_KEEP_FIELDS} nor ${CDM_EXTRA_KEEP_FIELDS} are moved to ${CDM_UNDEFINED_NAME} if CDM_USE_UNDEFINED is "true". default to "" | `CDM_EXTRA_KEEP_FIELDS="broker"` +| `CDM_UNDEFINED_NAME` | Undefined property name used when CDM_USE_UNDEFINED is set to "true". default to "undefined". | `CDM_UNDEFINED_NAME="undef"` +| `CDM_KEEP_EMPTY_FIELDS` | Empty fields are dropped except the fields which names are set to CDM_KEEP_EMPTY_FIELDS in the CSV format. default to "". | `CDM_KEEP_EMPTY_FIELDS="offset"` +| `CDM_UNDEFINED_TO_STRING` | If set to "true", when CDM_USE_UNDEFINED is "true" and undefined property with ${CDM_UNDEFINED_NAME} is created, the value is converted to the json string. default to "false". | `CDM_UNDEFINED_TO_STRING=true +| `CDM_UNDEFINED_DOT_REPLACE_CHAR` | A dot character '.' in a property name (key) is replaced with the specified character unless the value is not "UNUSED". default to "UNUSED". Effective when MERGE_JSON_LOG is true. | `CDM_UNDEFINED_DOT_REPLACE_CHAR="_"` +| `CDM_UNDEFINED_MAX_NUM_FIELDS` | If a positive value is set, undefined fields are dropped once the number of fields has reached the value. default to "-1". | `CDM_UNDEFINED_MAX_NUM_FIELDS=4` +| `UNDEFINED_DEBUG` | Debug flag used in undefined_field as well as the config file calling the plugin. A debug file /var/log/rsyslog/rsyslog_debug.log is generated. default to "false". | `UNDEFINED_DEBUG=true` ## Rsyslog logging to file Rsyslog by default writes its logs into a file given by `LOGGING_FILE_PATH` environment variable. You can change the maximum size of a single log file or number of log files to keep(age), by setting `LOGGING_FILE_SIZE` and `LOGGING_FILE_AGE` environment variables accordingly. diff --git a/rsyslog/undefined_field/undefined_field.go b/rsyslog/undefined_field/undefined_field.go index 3a410b62f..cbaef9285 100644 --- a/rsyslog/undefined_field/undefined_field.go +++ b/rsyslog/undefined_field/undefined_field.go @@ -76,285 +76,412 @@ package main import ( - "encoding/json" "bufio" - "strings" + "encoding/json" "fmt" "io/ioutil" "os" + "strings" ) const ( - initial_logging_file_path = "/var/log/rsyslog/rsyslog.log" - undefined_config = "/var/lib/rsyslog.pod/undefined.json" - noChanges = "{}" + initialLoggingFilePath = "/var/log/rsyslog/rsyslog.log" + defaultUndefinedConfig = "/var/lib/rsyslog.pod/undefined.json" + noChanges = "{}" ) -type UndefinedConfig struct { - Debug bool `json:"UNDEFINED_DEBUG"` - Merge_json_log bool `json:"MERGE_JSON_LOG"` - Use_undefined bool `json:"CDM_USE_UNDEFINED"` - Undefined_to_string bool `json:"CDM_UNDEFINED_TO_STRING"` - Default_keep_fields string `json:"CDM_DEFAULT_KEEP_FIELDS"` - Extra_keep_fields string `json:"CDM_EXTRA_KEEP_FIELDS"` - Undefined_name string `json:"CDM_UNDEFINED_NAME"` - Keep_empty_fields string `json:"CDM_KEEP_EMPTY_FIELDS"` - Undefined_dot_replace_char string `json:"CDM_UNDEFINED_DOT_REPLACE_CHAR"` - Undefined_max_num_fields int64 `json:"CDM_UNDEFINED_MAX_NUM_FIELDS"` +type undefinedConfig struct { + Debug bool `json:"UNDEFINED_DEBUG"` + MergeJSONLog bool `json:"MERGE_JSON_LOG"` + UseUndefined bool `json:"CDM_USE_UNDEFINED"` + UndefinedToString bool `json:"CDM_UNDEFINED_TO_STRING"` + DefaultKeepFields string `json:"CDM_DEFAULT_KEEP_FIELDS"` + ExtraKeepFields string `json:"CDM_EXTRA_KEEP_FIELDS"` + UndefinedName string `json:"CDM_UNDEFINED_NAME"` + KeepEmptyFields string `json:"CDM_KEEP_EMPTY_FIELDS"` + UndefinedDotReplaceChar string `json:"CDM_UNDEFINED_DOT_REPLACE_CHAR"` + UndefinedMaxNumFields int64 `json:"CDM_UNDEFINED_MAX_NUM_FIELDS"` } var ( - undefined_debug bool - merge_json_log bool - use_undefined bool - keep_fields map[string]string - keep_empty_fields map[string]string - undefined_name string - undefined_to_string bool - undefined_dot_replace_char string - undefined_max_num_fields int64 - undefined_cur_num_fields int64 - logfile *os.File - noaction = false - replacer = &strings.Replacer{} + keepFields map[string]string + keepEmptyFields map[string]string + logfile *os.File + replacer = &strings.Replacer{} + checkMaxNumFields bool + cfg undefinedConfig ) -func getMapStringValue(m map[string]interface{}, key string) (string, bool) { - if val, ok := m[key]; ok { - return val.(string), ok - } else { - return "", ok - } -} - func onInit() { // opening the rsyslog log file - logging_file_path := initial_logging_file_path + loggingFilePath := initialLoggingFilePath if eval := os.Getenv("LOGGING_FILE_PATH"); eval != "" { - logging_file_path = eval + loggingFilePath = eval } - if f, err := os.OpenFile(logging_file_path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644); err == nil { + if f, err := os.OpenFile(loggingFilePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644); err == nil { logfile = f } else { - panic(fmt.Errorf("Could not open file [%s]: [%v]", logging_file_path, err)) + panic(fmt.Errorf("Could not open file [%s]: [%v]", loggingFilePath, err)) } - var undefined_config_obj UndefinedConfig - var default_keep_fields string - var extra_keep_fields string - var tmp_keep_empty_fields string - if config, err := os.Open(undefined_config); err == nil { + undefinedConfigFile := defaultUndefinedConfig + if eval := os.Getenv("UNDEFINED_CONFIG"); eval != "" { + undefinedConfigFile = eval + } + if config, err := os.Open(undefinedConfigFile); err == nil { defer config.Close() + configRead, _ := ioutil.ReadAll(config) + json.Unmarshal(configRead, &cfg) + } else if err != os.ErrNotExist { + fmt.Fprintf(logfile, "ERROR: Could not open config file [%s]: [%v]\n", undefinedConfigFile, err) + panic(fmt.Errorf("Could not open config file [%s]: [%v]", undefinedConfigFile, err)) + } - config_read, _ := ioutil.ReadAll(config) - json.Unmarshal(config_read, &undefined_config_obj) + if cfg.UndefinedMaxNumFields == -1 { + checkMaxNumFields = false + cfg.UndefinedMaxNumFields = int64(^uint(0) >> 1) + } else { + checkMaxNumFields = true + } + if cfg.UseUndefined { + tmpDefault := strings.Split(cfg.DefaultKeepFields, ",") + tmpExtra := strings.Split(cfg.ExtraKeepFields, ",") + keepFields = make(map[string]string) + for _, str := range tmpDefault { + keepFields[str] = str + } + for _, str := range tmpExtra { + keepFields[str] = str + } + } + tmp := strings.Split(cfg.KeepEmptyFields, ",") + keepEmptyFields = make(map[string]string) + for _, str := range tmp { + keepEmptyFields[str] = str + } + + if cfg.UndefinedDotReplaceChar != "UNUSED" { + replacer = strings.NewReplacer(".", cfg.UndefinedDotReplaceChar) + } - undefined_debug = undefined_config_obj.Debug - merge_json_log = undefined_config_obj.Merge_json_log - use_undefined = undefined_config_obj.Use_undefined - default_keep_fields = undefined_config_obj.Default_keep_fields - extra_keep_fields = undefined_config_obj.Extra_keep_fields - undefined_name = undefined_config_obj.Undefined_name - tmp_keep_empty_fields = undefined_config_obj.Keep_empty_fields - undefined_to_string = undefined_config_obj.Undefined_to_string - undefined_dot_replace_char = undefined_config_obj.Undefined_dot_replace_char - undefined_max_num_fields = undefined_config_obj.Undefined_max_num_fields - } else if !strings.Contains(err.Error(), "no such file or directory") { - fmt.Fprintln(logfile, "ERROR: Could not open config file [%s]: [%v]", undefined_config, err) - panic(fmt.Errorf("Could not open config file [%s]: [%v]", undefined_config, err)) + if cfg.Debug { + fmt.Fprintln(logfile, "mmexternal: merge_json_log: ", cfg.MergeJSONLog) + fmt.Fprintln(logfile, "mmexternal: use_undefined: ", cfg.UseUndefined) + fmt.Fprintln(logfile, "mmexternal: default_keep_fields: ", cfg.DefaultKeepFields) + fmt.Fprintln(logfile, "mmexternal: extra_keep_fields: ", cfg.ExtraKeepFields) + fmt.Fprintln(logfile, "mmexternal: undefined_name: ", cfg.UndefinedName) + fmt.Fprintf(logfile, "mmexternal: keep_empty_fields: %v %v\n", cfg.KeepEmptyFields, keepEmptyFields) + fmt.Fprintln(logfile, "mmexternal: undefined_to_string: ", cfg.UndefinedToString) + fmt.Fprintln(logfile, "mmexternal: undefined_dot_replace_char: ", cfg.UndefinedDotReplaceChar) + fmt.Fprintln(logfile, "mmexternal: undefinedMaxNumFields: ", cfg.UndefinedMaxNumFields) } +} - if !use_undefined && tmp_keep_empty_fields == "" && !undefined_to_string && undefined_dot_replace_char == "UNUSED" { - noaction = true +// This function has two purposes. +// 1) Check if the number of undefined fields exceeds the maximum value, if any. +// If so, return a JSON string representation of a map of the undefined +// fields, suitable for returning in the record[$undefined] value, and +// the error, if any, returned from json.Marshal. +// 2) If use_undefined is true, move the undefined fields to a separate +// map. +// Return values: +// * The JSON string representation of the map of the undefined fields if the +// undefined field count exceeded the max, or "" +// * A map of the undefined fields if undefined field checking was enabled. +// Otherwise, nil. Map may be empty if there were no undefined fields. +// * err return from json.Marshal +// Side effects: +// The undefined fields are moved from the input map to the undefMap returned +func processUndefinedAndMaxNumFields(input map[string]interface{}) (string, map[string]interface{}, error) { + if !checkMaxNumFields && !cfg.UseUndefined { + return "", nil, nil // not used } - if undefined_max_num_fields == -1 { - undefined_max_num_fields = int64(^uint(0) >> 1) + count := cfg.UndefinedMaxNumFields + var undefMap map[string]interface{} + for field, val := range input { + if _, keep := keepFields[field]; !keep { + if undefMap == nil { + undefMap = make(map[string]interface{}) + } + undefMap[field] = val + count-- + } } - if use_undefined { - tmp_default := strings.Split(default_keep_fields, ",") - tmp_extra := strings.Split(extra_keep_fields, ",") - keep_fields = make(map[string]string) - for _, str := range tmp_default { - keep_fields[str] = str + if undefMap == nil || len(undefMap) == 0 { + return "", nil, nil // no undefined fields + } + if count < 0 || cfg.UseUndefined { + for field := range undefMap { + delete(input, field) } - for _, str := range tmp_extra { - keep_fields[str] = str + if count < 0 { + // undefined fields converted to string - no undefMap + b, err := json.Marshal(undefMap) + return string(b), nil, err } } - tmp := strings.Split(tmp_keep_empty_fields, ",") - keep_empty_fields = make(map[string]string) - for _, str := range tmp { - keep_empty_fields[str] = str + return "", nil, nil +} + +func isFieldUndefined(field string, hasDefinedFields, hasUndefinedFields bool) bool { + if !hasUndefinedFields { + return false // input contains only defined fields + } else if !hasDefinedFields { + return true // input contains only undefined fields + } else { + _, definedField := keepFields[field] // see if field is a keeper + return !definedField } +} - if undefined_dot_replace_char != "UNUSED" { - replacer = strings.NewReplacer(".", undefined_dot_replace_char) +// convert the given field value to a string if not already a string +// modifies the value in input in place - if the returned bool is +// true, the field value was changed, and the new value is returned in the +// interface{} return value +func processUndefinedToString(input map[string]interface{}, field string, val interface{}) (interface{}, bool) { + inputWasModified := false + var newval string + if _, isstring := val.(string); !isstring { + bval, err := json.Marshal(val) // convert to JSON string + if err == nil { + newval = string(bval) + input[field] = newval // replace val in-place + } else { + if cfg.Debug { + fmt.Fprintf(logfile, "Could not convert field [%s] value [%v] to JSON string: %v\n", field, val, err) + } + // fallback + newval = fmt.Sprintf("%v", val) + input[field] = newval // replace val in-place + } + inputWasModified = true } + return newval, inputWasModified +} - fmt.Fprintln(logfile, "mmexternal: merge_json_log: ", merge_json_log) - fmt.Fprintln(logfile, "mmexternal: use_undefined: ", use_undefined) - fmt.Fprintln(logfile, "mmexternal: default_keep_fields: ", default_keep_fields) - fmt.Fprintln(logfile, "mmexternal: extra_keep_fields: ", extra_keep_fields) - fmt.Fprintln(logfile, "mmexternal: undefined_name: ", undefined_name) - fmt.Fprintln(logfile, "mmexternal: keep_empty_fields: ", tmp_keep_empty_fields) - fmt.Fprintln(logfile, "mmexternal: undefined_to_string: ", undefined_to_string) - fmt.Fprintln(logfile, "mmexternal: undefined_dot_replace_char: ", undefined_dot_replace_char) - fmt.Fprintln(logfile, "mmexternal: undefined_max_num_fields: ", undefined_max_num_fields) - fmt.Fprintln(logfile, "mmexternal: noaction: ", noaction) +func processDotReplaceChar(field string, replacedFields map[string]string) (string, map[string]string) { + newfield := replacer.Replace(field) + if newfield != field { + if replacedFields == nil { + replacedFields = make(map[string]string) + } + replacedFields[field] = newfield + } + return newfield, replacedFields } -func replaceDotMoveUndefined(input map[string]interface{}, topPropLevel bool) (map[string]interface{},bool,bool) { - replace_me := false - has_undefined := false - cp := make(map[string]interface{}) - for origkey, value := range input { - key := origkey - if topPropLevel && merge_json_log && undefined_dot_replace_char != "UNUSED" { - // replace '.' with specified char (e.g., '_') - key = replacer.Replace(origkey) - if key != origkey { - replace_me = true +// a value is empty if +// * it is nil +// * it is an empty string +// * it is a zero length array or map +// * it is an array and all its elements are empty +// * it is a map and all of its values are empty +func isEmpty(val interface{}) bool { + switch tval := val.(type) { + case nil: + return true + case string: + return len(tval) == 0 + case []interface{}: + return len(tval) == 0 + case map[string]interface{}: + return len(tval) == 0 + default: + return false // no other type can have an empty value + } +} + +// go through val recursively deleting any empty elements found +// returns the value with the empty elements removed +func delEmpty(val interface{}) (interface{}, bool) { + changed := false + elemChanged := false + switch tval := val.(type) { + case []interface{}: + if len(tval) == 0 { + return val, changed + } + result := tval[:0] + for _, elem := range tval { + elem, elemChanged = delEmpty(elem) + if !isEmpty(elem) { + result = append(result, elem) + } else { + changed = true } + if elemChanged { + changed = true + } + } + return result, changed + case map[string]interface{}: + if len(tval) == 0 { + return val, changed } - // skip empty or not? - valuemap, ismap := value.(map[string]interface{}) - valuearraymap, isarraymap := value.([]interface{}) - if _, exists := keep_empty_fields[origkey]; !exists { - if !ismap && (value == nil || len(value.(string)) == 0) || - isarraymap && len(valuearraymap) == 0 || - ismap && len(valuemap) == 0 { - replace_me = true - continue + for key, mapval := range tval { + mapval, elemChanged = delEmpty(mapval) + if isEmpty(mapval) { + delete(tval, key) + changed = true + } else { + tval[key] = mapval + } + if elemChanged { + changed = true } } - // use_undefined and key is not in keep_fields? - _, keepit := keep_fields[origkey] - if topPropLevel && use_undefined && !keepit { - // if unmdefined_max_num_fields > 0, move the undefined item to undefined_name - if undefined_cur_num_fields > 0 { - if cp[undefined_name] == nil { - subcp := make(map[string]interface{}) - cp[undefined_name] = subcp + return val, changed + default: + return val, changed + } +} + +// process the undefined fields - convert to string value, convert +// "." in the field names to the undefined_dot_replace_char +// also remove empty fields +// The given input map may contain only undefined fields, or only +// defined fields, or a mix of both +// the given input map is modified in place +// Returns false if the input was unchanged +func processUndefinedAndEmpty(input map[string]interface{}, hasDefinedFields, hasUndefinedFields bool) bool { + var replacedFields map[string]string // map old to new name + inputWasModified := false + for field, val := range input { + newfield := field + if isFieldUndefined(field, hasDefinedFields, hasUndefinedFields) { + if cfg.UndefinedToString { + newval, changed := processUndefinedToString(input, field, val) + if changed { + inputWasModified = true + val = newval // use new val now } - if isarraymap { - rval := replaceDotMoveUndefinedArray(valuearraymap) - if len(rval) > 0 { - cp[undefined_name].(map[string]interface{})[key] = rval - } - } else if ismap { - rval, _, _ := replaceDotMoveUndefined(valuemap, false) - if len(rval) > 0 { - cp[undefined_name].(map[string]interface{})[key] = rval - } - } else { - cp[undefined_name].(map[string]interface{})[key] = value + } + if cfg.UndefinedDotReplaceChar != "UNUSED" && strings.Contains(field, ".") { + newfield, replacedFields = processDotReplaceChar(field, replacedFields) + } + } + // should be the newfield if using undefined_dot_replace_char + _, keepEmpty := keepEmptyFields[newfield] + if !keepEmpty { + changed := false + val, changed = delEmpty(val) + if isEmpty(val) { + inputWasModified = true + delete(input, field) + } else { + input[field] = val + if changed { + inputWasModified = true } - undefined_cur_num_fields-- - replace_me = true - has_undefined = true } - } else if isarraymap { - rval := replaceDotMoveUndefinedArray(valuearraymap) - cp[key] = rval - } else if ismap { - rval, _, _ := replaceDotMoveUndefined(valuemap, false) - cp[key] = rval - } else { - cp[key] = value } } - return cp, replace_me, has_undefined -} - -func replaceDotMoveUndefinedArray(inputs []interface{}) []interface{} { - cp := make([]interface{}, 0) - for _, input := range inputs { - valuemap, ismap := input.(map[string]interface{}) - valuearraymap, isarraymap := input.([]interface{}) - if ismap { - rval, _, _ := replaceDotMoveUndefined(valuemap, false) - cp = append(cp, rval) - } else if isarraymap { - rval := replaceDotMoveUndefinedArray(valuearraymap) - cp = append(cp, rval) - } else { - fmt.Fprintln(logfile, "Error:", input, " is not a map. Ignoring...") + if replacedFields != nil && len(replacedFields) > 0 { + inputWasModified = true + for oldfield, newfield := range replacedFields { + input[newfield] = input[oldfield] + delete(input, oldfield) } } - return cp + return inputWasModified } func main() { onInit() defer logfile.Close() - - reader := bufio.NewReader(os.Stdin) + var reader *bufio.Reader + var testInputFile *os.File + if ff := os.Getenv("TEST_INPUT_FILE"); ff != "" { + var err error + testInputFile, err = os.Open(ff) + if err != nil { + panic(fmt.Errorf("Could not open %v: %v", ff, err)) + } + reader = bufio.NewReader(testInputFile) + defer testInputFile.Close() + } else { + reader = bufio.NewReader(os.Stdin) + } scanner := bufio.NewScanner(reader) scanner.Split(bufio.ScanLines) for scanner.Scan() { - jsonCopyMap := make(map[string]interface{}) jsonMap := make(map[string]interface{}) rawStr := scanner.Text() - if noaction { - fmt.Fprintln(logfile, "No Action Needed for ", rawStr) - fmt.Println(noChanges) - continue - } - if undefined_debug { + if cfg.Debug { fmt.Fprintln(logfile, "Source: ", rawStr) } - if err := json.Unmarshal([]byte(rawStr), &jsonMap); err != nil { + if err := json.Unmarshal(scanner.Bytes(), &jsonMap); err != nil { fmt.Fprintln(logfile, "json.Unmarshal failed (", err, "): ", rawStr) fmt.Println(noChanges) continue } if jsonMap["$!"] == nil { + fmt.Fprintln(logfile, "Source contains no $! field: ", rawStr) continue } topval, ismap := jsonMap["$!"].(map[string]interface{}) if !ismap { - fmt.Fprintln(logfile, "Result is String: ", rawStr) + if cfg.Debug { + fmt.Fprintln(logfile, "Result is String: ", rawStr) + } fmt.Println(noChanges) continue } - if jsonCopyMap["$!"] == nil { - jsonCopyMap["$!"] = make(map[string]interface{}) - } - undefined_cur_num_fields = undefined_max_num_fields - all, replace_me, has_undefined := replaceDotMoveUndefined(topval, true) - if !replace_me { - fmt.Fprintln(logfile, "No Need to Replace for ", rawStr) + undefString, undefMap, err := processUndefinedAndMaxNumFields(topval) + if err != nil { + // error marshalling undefined fields to JSON + if cfg.Debug { + fmt.Fprintf(logfile, "Unable to convert undefined fields to JSON string: %v : rawStr: %s\n", err, rawStr) + } fmt.Println(noChanges) continue } - jsonCopyMap["$!"].(map[string]interface{})["openshift_logging_all"] = all - if tmp_val, err := json.Marshal(jsonCopyMap); err == nil { - if use_undefined && undefined_to_string && has_undefined { - if err := json.Unmarshal([]byte(tmp_val), &jsonCopyMap); err == nil { - // if unmarshal fails, giving up converting to string... - if undefined, err := json.Marshal(jsonCopyMap[undefined_name]); err == nil { - jsonCopyMap[undefined_name] = string(undefined) - if tmp_val0, err := json.Marshal(jsonCopyMap); err == nil { - tmp_val = tmp_val0 - } else { - fmt.Fprintln(logfile, "Marshaling undefined value converted to string failed (", err, "): ", tmp_val) - } - } else { - fmt.Fprintln(logfile, "Marshaling undefined value failed (", err, "): ", tmp_val) - } - } else { - fmt.Fprintln(logfile, "Parsing processed json failed (", err, "): ", tmp_val) - } + changes := false + if len(undefString) > 0 || undefMap != nil { + changes = true + } + if undefMap != nil { + if processUndefinedAndEmpty(undefMap, false, true) { + changes = true } - if undefined_debug { - fmt.Fprintln(logfile, "Result: ", string(tmp_val)) + } + if len(topval) > 0 { + if processUndefinedAndEmpty(topval, true, (undefMap == nil)) { + changes = true + } + } + if !changes { + if cfg.Debug { + fmt.Fprintln(logfile, "No Need to Replace for ", rawStr) } - fmt.Println(string(tmp_val)) - } else { - fmt.Fprintln(logfile, "Final Marshal failed (", err, "): ", rawStr) fmt.Println(noChanges) + continue + } + if len(undefString) > 0 { + topval[cfg.UndefinedName] = undefString + } else if undefMap != nil && len(undefMap) > 0 { + // if len is 0, means all fields were empty + topval[cfg.UndefinedName] = undefMap + } + loggingAll := map[string]interface{}{ + "openshift_logging_all": topval, + } + outputMap := map[string]interface{}{ + "$!": loggingAll, + } + outputString, err := json.Marshal(outputMap) + if err != nil { + if cfg.Debug { + fmt.Fprintln(logfile, "Final Marshal failed (", err, "): ", rawStr) + } + fmt.Println(noChanges) + } else { + if cfg.Debug { + fmt.Fprintln(logfile, "Result: ", string(outputString)) + } + fmt.Println(string(outputString)) } } } diff --git a/rsyslog/undefined_field/undefined_field_test.go b/rsyslog/undefined_field/undefined_field_test.go new file mode 100644 index 000000000..23965a466 --- /dev/null +++ b/rsyslog/undefined_field/undefined_field_test.go @@ -0,0 +1,299 @@ +package main + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "reflect" + "testing" +) + +var ( + testLogfile *os.File +) + +func setup(t *testing.T, cfg undefinedConfig) error { + var err error + testLogfile, err = ioutil.TempFile("", "log") + if err != nil { + return fmt.Errorf("Could not create temp log file: %v", err) + } + testCfgfile, err := ioutil.TempFile("", "cfg") + if err != nil { + os.Remove(testLogfile.Name()) + return fmt.Errorf("Could not create temp cfg file") + } + defer os.Remove(testCfgfile.Name()) + os.Setenv("LOGGING_FILE_PATH", testLogfile.Name()) + // write cfg options to testCfgfile + b, err := json.Marshal(cfg) + if err != nil { + os.Remove(testLogfile.Name()) + return fmt.Errorf("Could not marshal JSON config object: %v", err) + } + if _, err := testCfgfile.Write(b); err != nil { + os.Remove(testLogfile.Name()) + return fmt.Errorf("Could not write config to %v: %v", testCfgfile.Name(), err) + } + os.Setenv("UNDEFINED_CONFIG", testCfgfile.Name()) + testLogfile.Close() + onInit() + return nil +} + +func teardown(t *testing.T) { + tdLogfile, err := os.Open(testLogfile.Name()) + if err != nil { + t.Errorf("Could not open testLogfile: %v", err) + return + } + fi, err := tdLogfile.Stat() + if err != nil { + tdLogfile.Close() + t.Errorf("Could not seek to end of testLogfile: %v", err) + return + } + _, err = tdLogfile.Seek(0, 0) + if err != nil { + tdLogfile.Close() + t.Errorf("Could not rewind testLogfile: %v", err) + return + } + buf := make([]byte, fi.Size()) + _, err = tdLogfile.Read(buf) + if err != nil { + tdLogfile.Close() + t.Errorf("Could not read %v bytes from testLogfile: %v", fi.Size(), err) + return + } + tdLogfile.Close() + t.Logf("Test output: %s", buf) + os.Remove(testLogfile.Name()) +} + +func checkFieldsEqual(t *testing.T, expected, actual map[string]interface{}, fieldlist []string) error { + var err error + for _, field := range fieldlist { + if !reflect.DeepEqual(expected[field], actual[field]) { + t.Errorf("field [%s] expected value [%v] does not match actual value [%v]", + field, expected[field], actual[field]) + if err == nil { + err = fmt.Errorf("one or more field values did not match") + } + } + } + return err +} + +func TestKeepEmpty(t *testing.T) { + testcfg := undefinedConfig{ + Debug: true, + MergeJSONLog: true, + UseUndefined: true, + UndefinedToString: false, + DefaultKeepFields: "method,statusCode,type,@timestamp,req,res,CONTAINER_NAME,CONTAINER_ID_FULL", + ExtraKeepFields: "undefined4,undefined5,empty1,undefined3", + UndefinedName: "undefined", + KeepEmptyFields: "undefined4,undefined5,empty1,undefined3", + UndefinedDotReplaceChar: "UNUSED", + UndefinedMaxNumFields: -1, + } + err := setup(t, testcfg) + defer teardown(t) + if err != nil { + t.Errorf("test setup failed: %v", err) + } + inputString := `{"@timestamp": "2019-07-17T21:26:45.913217+00:00", ` + + `"undefined1": "undefined1", "undefined11": 1111, "undefined12": true, "empty1": "", ` + + `"undefined2": { "undefined2": "undefined2", "": "", "undefined22": 2222, "undefined23": false }, ` + + `"undefined3": { "emptyvalue": "" }, "undefined4": {}, "undefined5": "undefined5", ` + + `"undefined.6": "undefined6" }` + inputMap := make(map[string]interface{}) + if err := json.Unmarshal([]byte(inputString), &inputMap); err != nil { + t.Errorf("json.Unmarshal failed for inputString [%v]: %v", inputString, err) + } + origMap := make(map[string]interface{}) + if err := json.Unmarshal([]byte(inputString), &origMap); err != nil { + t.Errorf("json.Unmarshal failed for inputString [%v]: %v", inputString, err) + } + changed := processUndefinedAndEmpty(inputMap, true, true) + if !changed { + t.Errorf("Expected changes not performed on the input") + } + outputBytes, _ := json.Marshal(inputMap) + t.Logf("outputBytes [%s]", outputBytes) + fieldlist := []string{"@timestamp", "empty1", "undefined3", "undefined4", "undefined5"} + if err = checkFieldsEqual(t, origMap, inputMap, fieldlist); err != nil { + t.Error(err) + } + var val1 float64 = 1111 + var val2 float64 = 2222 + undefined2Map := map[string]interface{}{ + "undefined2": "undefined2", + "undefined22": val2, + "undefined23": false, + } + undefinedMap := map[string]interface{}{ + "undefined1": "undefined1", + "undefined11": val1, + "undefined12": true, + "undefined2": undefined2Map, + "undefined.6": "undefined6", + } + fieldlist = []string{"undefined1", "undefined11", "undefined12", "undefined2", "undefined.6"} + if err = checkFieldsEqual(t, undefinedMap, inputMap, fieldlist); err != nil { + t.Error(err) + } +} + +func TestUndefinedMaxNumFields(t *testing.T) { + cfg = undefinedConfig{ + Debug: true, + MergeJSONLog: true, + UseUndefined: true, + UndefinedToString: false, + DefaultKeepFields: "method,statusCode,type,@timestamp,req,res,CONTAINER_NAME,CONTAINER_ID_FULL", + ExtraKeepFields: "undefined4,undefined5,empty1,undefined3", + UndefinedName: "undefined", + KeepEmptyFields: "undefined4,undefined5,empty1,undefined3", + UndefinedDotReplaceChar: "UNUSED", + // the test should have 5 undefined fields - if UndefinedMaxNumFields == number of undefined fields - 1 + // this allows us to check for off-by-one errors as well + UndefinedMaxNumFields: 4, + } + err := setup(t, cfg) + defer teardown(t) + if err != nil { + t.Errorf("test setup failed: %v", err) + } + inputString := `{"@timestamp": "2019-07-17T21:26:45.913217+00:00", ` + + `"undefined1": "undefined1", "undefined11": 1111, "undefined12": true, "empty1": "", ` + + `"undefined2": { "undefined2": "undefined2", "": "", "undefined22": 2222, "undefined23": false }, ` + + `"undefined3": { "emptyvalue": "" }, "undefined4": {}, "undefined5": "undefined5", ` + + `"undefined.6": "undefined6" }` + inputMap := make(map[string]interface{}) + if err := json.Unmarshal([]byte(inputString), &inputMap); err != nil { + t.Errorf("json.Unmarshal failed for inputString [%v]: %v", inputString, err) + } + origMap := make(map[string]interface{}) + if err := json.Unmarshal([]byte(inputString), &origMap); err != nil { + t.Errorf("json.Unmarshal failed for inputString [%v]: %v", inputString, err) + } + expectedUndefString := `{"undefined.6":"undefined6","undefined1":"undefined1","undefined11":1111,"undefined12":true,"undefined2":{"":"","undefined2":"undefined2","undefined22":2222,"undefined23":false}}` + undefString, undefMap, _ := processUndefinedAndMaxNumFields(inputMap) + outputBytes, _ := json.Marshal(inputMap) + t.Logf("outputBytes [%s] undefString [%s] undefMap [%v]", outputBytes, undefString, undefMap) + if undefMap != nil { + t.Errorf("undefMap should be nil but has value %v", undefMap) + } + fieldlist := []string{"@timestamp", "empty1", "undefined3", "undefined4", "undefined5"} + if err = checkFieldsEqual(t, origMap, inputMap, fieldlist); err != nil { + t.Error(err) + } + if undefMap != nil { + t.Error("The undefMap is supposed to be nil") + } + // convert undefString back to map for comparison purposes + undefMap = make(map[string]interface{}) + if err = json.Unmarshal([]byte(undefString), &undefMap); err != nil { + t.Errorf("Could not convert undefString [%s] back to map: %v", undefString, err) + } + expectedUndefMap := make(map[string]interface{}) + if err = json.Unmarshal([]byte(expectedUndefString), &expectedUndefMap); err != nil { + t.Errorf("Could not convert expectedUndefString [%s] back to map: %v", expectedUndefString, err) + } + fieldlist = []string{"undefined1", "undefined11", "undefined12", "undefined2", "undefined.6"} + if err = checkFieldsEqual(t, expectedUndefMap, undefMap, fieldlist); err != nil { + t.Error(err) + } +} + +func TestUndefinedToString(t *testing.T) { + testcfg := undefinedConfig{ + Debug: true, + MergeJSONLog: true, + UseUndefined: true, + UndefinedToString: true, + DefaultKeepFields: "method,statusCode,type,@timestamp,req,res,CONTAINER_NAME,CONTAINER_ID_FULL", + ExtraKeepFields: "undefined4,undefined5,empty1,undefined3", + UndefinedName: "undefined", + KeepEmptyFields: "undefined4,undefined5,empty1,undefined3", + UndefinedDotReplaceChar: "UNUSED", + UndefinedMaxNumFields: -1, + } + err := setup(t, testcfg) + defer teardown(t) + if err != nil { + t.Errorf("test setup failed: %v", err) + } + inputString := `{"@timestamp": "2019-07-17T21:26:45.913217+00:00", ` + + `"undefined1": "undefined1", "undefined11": 1111, "undefined12": true, "empty1": "", ` + + `"undefined2": { "undefined2": "undefined2", "": "", "undefined22": 2222, "undefined23": false }, ` + + `"undefinedary": ["a",1,false,{"b":"c"},["d",2,true,{"e":"f"}]],` + + `"undefined3": { "emptyvalue": "" }, "undefined4": {}, "undefined5": "undefined5", ` + + `"undefined.6": "undefined6" }` + expectedOutputString := `{"@timestamp": "2019-07-17T21:26:45.913217+00:00", ` + + `"undefined1": "undefined1", "undefined11": "1111", "undefined12": "true", "empty1": "", ` + + `"undefined2": "{ \"undefined2\": \"undefined2\", \"\": \"\", \"undefined22\": 2222, \"undefined23\": false }", ` + + `"undefinedary": "[\"a\",1,false,{\"b\":\"c\"},[\"d\",2,true,{\"e\":\"f\"}]]",` + + `"undefined3": { "emptyvalue": "" }, "undefined4": {}, "undefined5": "undefined5", ` + + `"undefined.6": "undefined6" }` + inputMap := make(map[string]interface{}) + if err := json.Unmarshal([]byte(inputString), &inputMap); err != nil { + t.Errorf("json.Unmarshal failed for inputString [%v]: %v", inputString, err) + } + origMap := make(map[string]interface{}) + if err := json.Unmarshal([]byte(inputString), &origMap); err != nil { + t.Errorf("json.Unmarshal failed for inputString [%v]: %v", inputString, err) + } + changed := processUndefinedAndEmpty(inputMap, true, true) + if !changed { + t.Errorf("Expected changes not performed on the input") + } + outputBytes, _ := json.Marshal(inputMap) + t.Logf("outputBytes [%s]", outputBytes) + expectedOutputMap := make(map[string]interface{}) + if err = json.Unmarshal([]byte(expectedOutputString), &expectedOutputMap); err != nil { + t.Errorf("Could not convert expectedOutputString [%s] to map: %v", expectedOutputString, err) + } + t.Logf("using s [%s] using v [%v]", expectedOutputMap, expectedOutputMap) + expectedUndef2 := expectedOutputMap["undefined2"] + delete(expectedOutputMap, "undefined2") + expectedUndefAryStr := expectedOutputMap["undefinedary"] + delete(expectedOutputMap, "undefinedary") + actualUndef2 := inputMap["undefined2"] + delete(inputMap, "undefined2") + actualUndefAryStr := inputMap["undefinedary"] + delete(inputMap, "undefinedary") + t.Logf("expected undef2 [%s] undefary [%s] actual undef2 [%s] undefary [%s]", expectedUndef2, expectedUndefAryStr, actualUndef2, actualUndefAryStr) + + if !reflect.DeepEqual(expectedOutputMap, inputMap) { + t.Errorf("expected [%s] does not match actual [%s]", expectedOutputString, outputBytes) + } + + expectedUndef2Map := make(map[string]interface{}) + if err = json.Unmarshal([]byte(expectedUndef2.(string)), &expectedUndef2Map); err != nil { + t.Errorf("Could not convert expectedUndef2 [%s] to map: %v", expectedUndef2, err) + } + actualUndef2Map := make(map[string]interface{}) + if err = json.Unmarshal([]byte(actualUndef2.(string)), &actualUndef2Map); err != nil { + t.Errorf("Could not convert actualUndef2 [%s] to map: %v", actualUndef2, err) + } + if !reflect.DeepEqual(expectedUndef2Map, actualUndef2Map) { + t.Errorf("field undefined2 expected [%s] does not match actual [%s]", expectedUndef2, actualUndef2) + } + + var expectedUndefAry []interface{} + if err = json.Unmarshal([]byte(expectedUndefAryStr.(string)), &expectedUndefAry); err != nil { + t.Errorf("Could not convert expectedUndefAryStr [%s] to array: %v", expectedUndefAryStr, err) + } + var actualUndefAry []interface{} + if err = json.Unmarshal([]byte(actualUndefAryStr.(string)), &actualUndefAry); err != nil { + t.Errorf("Could not convert actualUndefAryStr [%s] to array: %v", actualUndefAryStr, err) + } + if !reflect.DeepEqual(expectedUndefAry, actualUndefAry) { + t.Errorf("field undefinedary expected [%s] does not match actual [%s]", expectedUndefAryStr, actualUndefAryStr) + } + +} diff --git a/rsyslog/utils/logs b/rsyslog/utils/logs index ba908f940..d9173b391 100755 --- a/rsyslog/utils/logs +++ b/rsyslog/utils/logs @@ -2,6 +2,7 @@ set -euo pipefail log_dir=`dirname ${LOGGING_FILE_PATH:-/var/log/rsyslog/rsyslog.log}` +pod_dir=${RSYSLOG_WORKDIRECTORY:-/var/lib/rsyslog.pod} cmd=cat helpMsg() { @@ -11,11 +12,13 @@ cat < /dev/null 2>&1 ; then + echo "" + echo "===== rsyslog debug logs =====" + for log_file in $( ls $log_dir/rsyslog_debug.* | sort -Vr ); do + cat $log_file + done + fi + if ls $log_dir/logrotate* > /dev/null 2>&1 ; then + echo "" + echo "===== logrotation logs ($log_dir) =====" + for log_file in $( ls $log_dir/logrotate* | sort -Vr ); do + cat $log_file + done + fi + if ls $pod_dir/logrotate* > /dev/null 2>&1 ; then + echo "" + echo "===== logrotation logs ($pod_dir) =====" + for log_file in $( ls $pod_dir/logrotate* | sort -Vr ); do + cat $log_file + done + fi + else + for log_file in $( ls $log_dir/rsyslog.* | sort -Vr ); do + cat $log_file + done + fi else log_file=`basename ${LOGGING_FILE_PATH:-/var/log/rsyslog/rsyslog.log}` exec tail "$tail_args" $log_dir/$log_file diff --git a/test/viaq-data-model.sh b/test/viaq-data-model.sh index 528b3d7f6..f6fc10853 100755 --- a/test/viaq-data-model.sh +++ b/test/viaq-data-model.sh @@ -85,6 +85,7 @@ cat > $cfg < remove_keys CONTAINER_TAG,logtag @@ -177,6 +178,47 @@ os::cmd::expect_success "cat $ARTIFACT_DIR/viaq-data-model-test.json | \ os::cmd::expect_success "cat $ARTIFACT_DIR/viaq-data-model-test-ops.json | \ python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test5 allow_empty" +# TEST 6 +# replace dot with underscore +stop_fluentd "${fpod:-}" $FLUENTD_WAIT_TIME 2>&1 | artifact_out +oc set env $fluentd_ds CDM_EXTRA_KEEP_FIELDS=undefined4,undefined5,empty1,undefined3,$keep_fields CDM_KEEP_EMPTY_FIELDS=undefined4,undefined5,empty1,undefined3 MERGE_JSON_LOG=true CDM_UNDEFINED_DOT_REPLACE_CHAR=_ 2>&1 | artifact_out +start_fluentd false $FLUENTD_WAIT_TIME 2>&1 | artifact_out +# if using MUX_CLIENT_MODE=maximal, also have to tell mux to replace the dots +is_maximal=$( oc set env $fluentd_ds --list | grep ^MUX_CLIENT_MODE=maximal ) || : +if [ -n "$is_maximal" ] ; then + muxpod=$( get_running_pod mux ) + oc set env dc/logging-mux MERGE_JSON_LOG=true CDM_UNDEFINED_DOT_REPLACE_CHAR=_ + os::cmd::try_until_failure "oc get pod $muxpod" + os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " +fi +fpod=$( get_running_pod fluentd ) +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $ARTIFACT_DIR/viaq-data-model-test.json | \ + python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test6 allow_empty" +os::cmd::expect_success "cat $ARTIFACT_DIR/viaq-data-model-test-ops.json | \ + python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test6 allow_empty" + +# TEST 7 +# set CDM_UNDEFINED_MAX_NUM_FIELDS to 3 +stop_fluentd "${fpod:-}" $FLUENTD_WAIT_TIME 2>&1 | artifact_out +oc set env $fluent_ds CDM_EXTRA_KEEP_FIELDS=undefined4,undefined5,empty1,undefined3,$keep_fields CDM_KEEP_EMPTY_FIELDS=undefined4,undefined5,empty1,undefined3 CMERGE_JSON_LOG=true DM_UNDEFINED_DOT_REPLACE_CHAR=_ CDM_UNDEFINED_MAX_NUM_FIELDS=3 2>&1 | artifact_out + +start_fluentd false $FLUENTD_WAIT_TIME 2>&1 | artifact_out +# if using MUX_CLIENT_MODE=maximal, also have to tell mux to replace the dots +is_maximal=$( oc set env $fluentd_ds --list | grep ^MUX_CLIENT_MODE=maximal ) || : +if [ -n "$is_maximal" ] ; then + muxpod=$( get_running_pod mux ) + oc set env dc/logging-mux CDM_UNDEFINED_MAX_NUM_FIELDS=3 + os::cmd::try_until_failure "oc get pod $muxpod" + os::cmd::try_until_text "oc get pods -l component=mux" "^logging-mux-.* Running " +fi +fpod=$( get_running_pod fluentd ) +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $ARTIFACT_DIR/viaq-data-model-test.json | \ + python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test7 allow_empty" +os::cmd::expect_success "cat $ARTIFACT_DIR/viaq-data-model-test-ops.json | \ + python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test7 allow_empty" + if [ -n "$is_maximal" ] ; then muxpod=$( get_running_pod mux ) oc set env dc/logging-mux CDM_KEEP_EMPTY_FIELDS- diff --git a/test/zzz-rsyslog.sh b/test/zzz-rsyslog.sh index 33ecd7cb6..9b6218da0 100755 --- a/test/zzz-rsyslog.sh +++ b/test/zzz-rsyslog.sh @@ -9,6 +9,11 @@ os::util::environment::use_sudo os::test::junit::declare_suite_start "test/zzz-rsyslog" +pushd ${OS_O_A_L_DIR}/rsyslog/undefined_field > /dev/null +go test -v 2>&1 | artifact_out +popd > /dev/null + +LOGGING_NS=${LOGGING_NS:-openshift-logging} es_pod=$( get_es_pod es ) es_ops_pod=$( get_es_pod es-ops ) es_ops_pod=${es_ops_pod:-$es_pod} @@ -36,17 +41,11 @@ fi # clear the journal clear_and_restart_journal -restart_rsyslog_pod() { - local rpod=${1:-$( get_running_pod rsyslog )} - oc delete --force pod $rpod - os::cmd::try_until_failure "oc get pods $rpod > /dev/null 2>&1" - os::cmd::try_until_success "oc get pods 2> /dev/null | grep -q 'rsyslog.*Running'" -} - cleanup() { local return_code="$?" set +e get_all_logging_pod_logs + oc get cm rsyslog -o yaml > $ARTIFACT_DIR/rsyslog_configmap.yaml 2>&1 if [ "deploy_using_ansible" = "$deployfunc" ] ; then if [ -n "${tmpinv:-}" -a -f "${tmpinv:-}" ] ; then rm -f $tmpinv @@ -146,11 +145,14 @@ deploy_using_operators() { disable_cluster_logging_operator sleep 10 os::cmd::try_until_success "oc get cm rsyslog 2> /dev/null" + stop_rsyslog $rpod + oc set env $rsyslog_ds RSYSLOG_JOURNAL_READ_FROM_TAIL=on RSYSLOG_FILE_READ_FROM_TAIL=on RSYSLOG_USE_IMPSTATS_FILE=true # enable annotation_match oc get cm rsyslog -o yaml | \ sed -e 's/action(type="mmkubernetes"/action(type="mmkubernetes" annotation_match=["."]/' | \ oc replace --force -f - 2>&1 | artifact_out - restart_rsyslog_pod $rpod + start_rsyslog cleanfirst + rpod=$( get_running_pod rsyslog ) } get_logmessage() { @@ -273,11 +275,10 @@ if [ $logsize -gt 0 ]; then artifact_log logs before rotation oc exec $rpod -c rsyslog -- ls -l /var/log/rsyslog/ | artifact_out oc exec $rpod -c rsyslog -- cat /var/log/rsyslog/rsyslog.log > $ARTIFACT_DIR/rsyslog-before-rotation.log - # this will restart the rsyslog pod - oc set env daemonset/rsyslog LOGGING_FILE_SIZE=$maxsize LOGGING_FILE_AGE=$maxcount + stop_rsyslog $rpod + oc set env $rsyslog_ds LOGGING_FILE_SIZE=$maxsize LOGGING_FILE_AGE=$maxcount + start_rsyslog - os::cmd::try_until_failure "oc get pods $rpod > /dev/null 2>&1" - os::cmd::try_until_success "oc get pods 2> /dev/null | grep -q 'rsyslog.*Running'" # wait longer than ($maxcount + 1) * 60 seconds. sleep $( expr $( expr $maxcount + 1 ) \* 60 ) rpod=$( get_running_pod rsyslog ) @@ -312,7 +313,7 @@ if [ $logsize -gt 0 ]; then oc exec $rpod -c logrotate -- /usr/bin/cat /var/lib/rsyslog.pod/logrotate.log 2>&1 | artifact_out artifact_log "==========" os::cmd::expect_success "test $filecount -le $maxcount" - os::cmd::expect_success "test $filesize -le $(( maxsize + 1024 ))" + os::cmd::expect_success "test $filesize -le $(( maxsize + 2048 ))" oc apply --force -f $savecm else @@ -320,9 +321,120 @@ else fi # switch LOGGING_FILE_PATH to console -oc set env daemonset/rsyslog LOGGING_FILE_PATH=console -os::cmd::try_until_failure "oc get pods $rpod > /dev/null 2>&1" -os::cmd::try_until_success "oc get pods 2> /dev/null | grep -q 'rsyslog.*Running'" +stop_rsyslog $rpod +oc set env $rsyslog_ds LOGGING_FILE_PATH=console +start_rsyslog rpod=$( get_running_pod rsyslog ) oc logs $rpod -c rsyslog > $ARTIFACT_DIR/rsyslog.console.log 2>&1 os::cmd::expect_failure "grep 'oc exec -- logs' $ARTIFACT_DIR/rsyslog.console.log" + +# switch back LOGGING_FILE_PATH to default +stop_rsyslog $rpod +oc set env $rsyslog_ds LOGGING_FILE_PATH- +start_rsyslog +rpod=$( get_running_pod rsyslog ) + +artifact_log "************************" +artifact_log "Testing undefined fields" +artifact_log "************************" + +artifact_log "0. Preparing test data" +cfgdir=$( mktemp -d ) +oc extract cm/rsyslog --to=$cfgdir +cat > $cfgdir/66-debug.conf << EOF +set \$.ret = parse_json('{"undefined1":"undefined1","undefined11":1111,"undefined12":True,"empty1":"","undefined2":{"undefined2":"undefined2","":"","undefined22":2222,"undefined23":False},"undefined3":{"emptyvalue":""},"undefined4":"undefined4","undefined5":"undefined5","undefined.6":"undefined6"}', '\$!'); +unset \$!stream; +EOF +ls -l $cfgdir | artifact_out +stop_rsyslog $rpod +oc delete cm rsyslog +oc create cm rsyslog --from-file=$cfgdir +start_rsyslog +rpod=$( get_running_pod rsyslog ) + +oc exec $rpod -c rsyslog -- ls -l /etc/rsyslog.d/66-debug.conf | artifact_out +oc exec $rpod -c rsyslog -- cat /etc/rsyslog.d/66-debug.conf | artifact_out + +# UNDEFINED_DEBUG needs to be enabled for checking configs in the following tests. +stop_rsyslog $rpod +oc set env $rsyslog_ds UNDEFINED_DEBUG=true +start_rsyslog +rpod=$( get_running_pod rsyslog ) +os::cmd::try_until_success "oc exec $rpod -c rsyslog -- grep 'use_undefined:' /var/log/rsyslog/rsyslog.log" + +artifact_log "1. default values" +os::cmd::expect_success_and_text "oc exec $rpod -c rsyslog -- grep 'use_undefined:' /var/log/rsyslog/rsyslog.log | tail -n 1 | awk '{print \$3}'" "false" +os::cmd::expect_success_and_text "oc exec $rpod -c rsyslog -- grep 'undefined_name:' /var/log/rsyslog/rsyslog.log | tail -n 1 | awk '{print \$3}'" "undefined" +artifact_log "ok" + +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $proj | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test1" +os::cmd::expect_success "cat $ops | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test1" + +artifact_log "2. enable use_undefined - undefined fields are stored in 'undefined' field" +keep_fields="method,statusCode,type,@timestamp,req,res,CONTAINER_NAME,CONTAINER_ID_FULL" +stop_rsyslog $rpod +oc set env $rsyslog_ds CDM_USE_UNDEFINED=true CDM_EXTRA_KEEP_FIELDS=$keep_fields 2>&1 | artifact_out +start_rsyslog +rpod=$( get_running_pod rsyslog ) +os::cmd::expect_success_and_text "oc exec $rpod -c rsyslog -- grep 'use_undefined:' /var/log/rsyslog/rsyslog.log | tail -n 1 | awk '{print \$3}'" "true" + +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $proj | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test2" +os::cmd::expect_success "cat $ops | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test2" + +artifact_log "3. user specifies extra fields to keep" +stop_rsyslog $rpod +oc set env $rsyslog_ds CDM_EXTRA_KEEP_FIELDS=undefined4,undefined5,$keep_fields 2>&1 | artifact_out +start_rsyslog +rpod=$( get_running_pod rsyslog ) +os::cmd::expect_success_and_text "oc exec $rpod -c rsyslog -- grep 'extra_keep_fields:' /var/log/rsyslog/rsyslog.log | tail -n 1 | awk '{print \$3}' | sed -e 's/.*\(undefined4\).*/\1/'" "undefined4" + +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $proj | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test3" +os::cmd::expect_success "cat $ops | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test3" + +artifact_log "4. user specifies alternate undefined name to use" +stop_rsyslog $rpod +oc set env $rsyslog_ds CDM_UNDEFINED_NAME=myname 2>&1 | artifact_out +start_rsyslog +rpod=$( get_running_pod rsyslog ) +os::cmd::expect_success_and_text "oc exec $rpod -c rsyslog -- grep 'undefined_name:' /var/log/rsyslog/rsyslog.log | tail -n 1 | awk '{print \$3}'" "myname" + +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $proj | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test4" +os::cmd::expect_success "cat $ops | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test4" + +artifact_log "5. reserve specified empty field as empty" +stop_rsyslog +oc set env $rsyslog_ds CDM_EXTRA_KEEP_FIELDS=undefined4,undefined5,empty1,undefined3,$keep_fields CDM_KEEP_EMPTY_FIELDS=undefined4,undefined5,empty1,undefined3 2>&1 | artifact_out +start_rsyslog +rpod=$( get_running_pod rsyslog ) +os::cmd::expect_success_and_text "oc exec $rpod -c rsyslog -- grep 'keep_empty_fields:' /var/log/rsyslog/rsyslog.log | tail -n 1 | awk '{print \$3}' | sed -e 's/.*\(empty1\).*/\1/'" "empty1" + +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $proj | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test5 allow_empty" +os::cmd::expect_success "cat $ops | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test5 allow_empty" + +artifact_log "6. replace dot with underscore" +stop_rsyslog $rpod +oc set env $rsyslog_ds CDM_EXTRA_KEEP_FIELDS=undefined4,undefined5,empty1,undefined3,$keep_fields CDM_KEEP_EMPTY_FIELDS=undefined4,undefined5,empty1,undefined3 MERGE_JSON_LOG=true CDM_UNDEFINED_DOT_REPLACE_CHAR=_ 2>&1 | artifact_out +start_rsyslog +rpod=$( get_running_pod rsyslog ) + +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $proj | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test6 allow_empty" +os::cmd::expect_success "cat $ops | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test6 allow_empty" + +artifact_log "7. set CDM_UNDEFINED_MAX_NUM_FIELDS to 3" +stop_rsyslog $rpod +oc set env $rsyslog_ds CDM_EXTRA_KEEP_FIELDS=undefined4,undefined5,empty1,undefined3,$keep_fields CDM_KEEP_EMPTY_FIELDS=undefined4,undefined5,empty1,undefined3 MERGE_JSON_LOG=true CDM_UNDEFINED_DOT_REPLACE_CHAR=_ CDM_UNDEFINED_MAX_NUM_FIELDS=3 2>&1 | artifact_out +start_rsyslog +rpod=$( get_running_pod rsyslog ) + +wait_for_fluentd_to_catch_up get_logmessage get_logmessage2 +os::cmd::expect_success "cat $proj | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test7 allow_empty" +os::cmd::expect_success "cat $ops | python $OS_O_A_L_DIR/hack/testing/test-viaq-data-model.py test7 allow_empty" + +os::cmd::expect_success "oc exec $rpod -c rsyslog -- ls -l /var/log/rsyslog/rsyslog.log" +os::cmd::expect_failure "oc exec $rpod -c rsyslog -- grep 'panic: interface conversion' /var/log/rsyslog/rsyslog.log"