diff --git a/VERSION b/VERSION index 28af839..a53741c 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.2.5 \ No newline at end of file +0.2.6 \ No newline at end of file diff --git a/eventOutput/fileHandler.go b/eventOutput/fileHandler.go index 588ac36..7537029 100644 --- a/eventOutput/fileHandler.go +++ b/eventOutput/fileHandler.go @@ -85,6 +85,8 @@ type SemiElasticFileEvent struct { PrintJobName string `json:"print_job_name,omitempty"` PrinterName string `json:"printer_name,omitempty"` PrintedFilesBackupPath string `json:"printed_files_backup_path,omitempty"` + RemoteActivity string `json:"remote_activity,omitempty"` + Trusted *bool `json:"trusted,omitempty"` } type ElasticFileEvent struct { @@ -156,6 +158,8 @@ type File struct { IdentifiedExtensionMIMEType string `json:"identified_extension_mime_type,omitempty"` CurrentExtensionMIMEType string `json:"current_extension_mime_type,omitempty"` SuspiciousFileTypeMismatch *bool `json:"suspicious_file_type_mismatch,omitempty"` + RemoteActivity string `json:"remote_activity,omitempty"` + Trusted *bool `json:"trusted,omitempty"` } type User struct { diff --git a/ffsEvent/ffsFetcher.go b/ffsEvent/ffsFetcher.go index 3a8e519..d92332d 100644 --- a/ffsEvent/ffsFetcher.go +++ b/ffsEvent/ffsFetcher.go @@ -181,6 +181,8 @@ func queryFetcher(query config.FFSQuery, inProgressQueries *[]eventOutput.InProg IdentifiedExtensionMIMEType: ffsEvent.IdentifiedExtensionMIMEType, CurrentExtensionMIMEType: ffsEvent.CurrentExtensionMIMEType, SuspiciousFileTypeMismatch: ffsEvent.SuspiciousFileTypeMismatch, + RemoteActivity: ffsEvent.RemoteActivity, + Trusted: ffsEvent.Trusted, } var user *eventOutput.User @@ -422,6 +424,8 @@ func queryFetcher(query config.FFSQuery, inProgressQueries *[]eventOutput.InProg PrintJobName: ffsEvent.PrintJobName, PrinterName: ffsEvent.PrinterName, PrintedFilesBackupPath: ffsEvent.PrintedFilesBackupPath, + RemoteActivity: ffsEvent.RemoteActivity, + Trusted: ffsEvent.Trusted, } var semiElasticFFSEvent eventOutput.SemiElasticFFSEvent diff --git a/go.mod b/go.mod index 63196ae..d7a8996 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module github.com/BenB196/crashplan-ffs-puller go 1.14 require ( - github.com/BenB196/crashplan-ffs-go-pkg v0.1.7 + github.com/BenB196/crashplan-ffs-go-pkg v0.1.8 github.com/BenB196/ip-api-go-pkg v0.0.8 github.com/VictoriaMetrics/fastcache v1.5.7 - github.com/olivere/elastic/v7 v7.0.14 - github.com/prometheus/client_golang v1.5.1 + github.com/olivere/elastic/v7 v7.0.15 + github.com/prometheus/client_golang v1.6.0 golang.org/x/net v0.0.0-20200202094626-16171245cfb2 ) diff --git a/go.sum b/go.sum index 96fb965..854ca62 100644 --- a/go.sum +++ b/go.sum @@ -34,6 +34,8 @@ github.com/BenB196/crashplan-ffs-go-pkg v0.1.6 h1:q3T0ElfvId+TM2Qvf8p+L86GA08U2y github.com/BenB196/crashplan-ffs-go-pkg v0.1.6/go.mod h1:8ikK7PskHvmZupmN1uUnL3+z/CeIsa+7ibvfPuY34Dg= github.com/BenB196/crashplan-ffs-go-pkg v0.1.7 h1:rn60QBcqLswveJX/xP2da+E2T3XBh6v0LDH+Co7aTsE= github.com/BenB196/crashplan-ffs-go-pkg v0.1.7/go.mod h1:8ikK7PskHvmZupmN1uUnL3+z/CeIsa+7ibvfPuY34Dg= +github.com/BenB196/crashplan-ffs-go-pkg v0.1.8 h1:Nh1CVO9ARQmFD7rePsuQ2kwmsu9flP/4NaxoqrXcLSk= +github.com/BenB196/crashplan-ffs-go-pkg v0.1.8/go.mod h1:8ikK7PskHvmZupmN1uUnL3+z/CeIsa+7ibvfPuY34Dg= github.com/BenB196/ip-api-go-pkg v0.0.3 h1:FWmM7FkhT1N55jd4jPW7W9LVOQrG89DLlrgwMb5fosw= github.com/BenB196/ip-api-go-pkg v0.0.3/go.mod h1:ccPdkBNnzf/uvuk7qXgEO06TCS/qILNJQP/KETQG4jU= github.com/BenB196/ip-api-go-pkg v0.0.4 h1:BbWELxooG6l2gaXQ4i4gm6NsyikdbhalF+TKg08gDPQ= @@ -89,6 +91,12 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -142,6 +150,8 @@ github.com/olivere/elastic/v7 v7.0.13 h1:rQdxt2duc9aoHkfa4p2PH3HOYkynjUygzmEPyec github.com/olivere/elastic/v7 v7.0.13/go.mod h1:14rWX28Pnh3qCKYRVnSGXWLf9MbLonYS/4FDCY3LAPo= github.com/olivere/elastic/v7 v7.0.14 h1:89dYPg6kD3WJx42ZtO4U6WDIzRy69FvQqz/yRiwekuM= github.com/olivere/elastic/v7 v7.0.14/go.mod h1:+FgncZ8ho1QF3NlBo77XbuoTKYHhvEOfFZKIAfHnnDE= +github.com/olivere/elastic/v7 v7.0.15 h1:v7kX5S+oMFfYKS4ZyzD37GH6lfZSpBo9atynRwBUywE= +github.com/olivere/elastic/v7 v7.0.15/go.mod h1:+FgncZ8ho1QF3NlBo77XbuoTKYHhvEOfFZKIAfHnnDE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -163,6 +173,8 @@ github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvls github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= @@ -188,6 +200,8 @@ github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQl github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -241,8 +255,11 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdO golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -261,6 +278,12 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/.gitignore b/vendor/github.com/BenB196/crashplan-ffs-go-pkg/.gitignore deleted file mode 100644 index df30947..0000000 --- a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ - -.idea/ diff --git a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/LICENSE b/vendor/github.com/BenB196/crashplan-ffs-go-pkg/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/README.md b/vendor/github.com/BenB196/crashplan-ffs-go-pkg/README.md deleted file mode 100644 index a51f481..0000000 --- a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/README.md +++ /dev/null @@ -1,180 +0,0 @@ -# crashplan-ffs-go-pkg -A third-party Golang package for Code42's Crashplan Forensic File Search (FFS) API - -The goal of this Golang package is to provide an easy to use/integrate package for Code42's Crashplan FFS API within the Golang environment. There are two main functions that can be used within the package: - -1. GetAuthData -2. GetFileEvents - -These functions allow for someone to query the Crashplan FFS API and get the results returned in a Golang struct which can then be used for other purposes. - -## GetAuthData function -The GetAuthData is intended to get an API token for a user that will last for one (1) hour, which can then be used with the GetFileEvents function. - -Arguments: -- uri - This is the URL which will provide the API token. (I believe it will always be: https://www.crashplan.com/c42api/v3/auth/jwt?useBody=true) -- username - The username of the account that has permissions to access the FFS API. (This must be an email address according to the API) -- password - The password of the account that is set as the username. - -Returns: -- AuthData - Golang struct that contains the API token. -``` -#AuthData struct structure -AuthData - Data AuthToken - -AuthToken - V3UserToken string -``` -- error - Any errors. - -## GetFileEvents function - -The GetFileEvents is intended to gather all events for a passed query and return them as a Golang struct slice. - -Arguments: -- authData - This is the Golang struct which is gotten from the GetAuthData function. -- ffsURI - This is the URL which actually hosts the FFS API. (See Code42 documentation for URI, default is https://forensicsearch-default.prod.ffs.us2.code42.com/forensic-search/queryservice/api/v1/) -- query - This is the properly formatted FFS Query struct which is what is actually executed against the Code42 Crashplan FFS API. (See documentation for how to properly format these queries.) - - Example JSON query (Returns all events within a 5 second delta) - -``` -#Json Format -{ - "groups":[ - { - "filters":[ - { - "operator":"IS", - "term":"fileName", - "value":"*" - }, - { - "operator":"ON_OR_AFTER", - "term":"insertionTimestamp", - "value":"2019-08-18T20:31:48.728Z" - }, - { - "operator":"ON_OR_BEFORE", - "term":"insertionTimestamp", - "value":"2019-08-18T20:32:03.728Z" - } - ], - "filterClause":"AND" - } - ], - "groupClause":"AND", - "pgNum":1, - "pgSize":100, - "srtDir":"asc", - "srtKey":"insertionTimestamp" -} - -#Query Struct format -Query - Groups []Group - GroupClause string (optional) - PgNum int (optional) - PgSize int (optional) - SrtDir string (optional) - SrtKey string (optional) -} - -Group - Filters []Filter - FilterClause string (optional) -} - -Filter - Operator string - Term string - Value string -} - ``` -Returns: - -- []FileEvent - Golang struct slice that contains all events returned from the jsonQuery string - -``` -#FileEvent struct structure -FileEvent - EventId string - EventType string - EventTimestamp *time.Time (potentially empty) - InsertionTimestamp *time.Time (potentially empty) - FilePath string (potentially empty) - FileName string - FileType string (potentially empty) - FileCategory string (potentially empty) - IdentifiedExtensionCategory string (potentially empty) - CurrentExtensionCategory string (potentially empty) - FileSize *int - FileOwner []string (potentially empty) - Md5Checksum string (potentially empty) - Sha256Checksum string (potentially empty) - CreatedTimestamp *time.Time (potentially empty) - ModifyTimestamp *time.Time (potentially empty) - DeviceUsername string (potentially empty) - DeviceUid string (potentially empty) - UserUid string (potentially empty) - OsHostname string (potentially empty) - DomainName string (potentially empty) - PublicIpAddress string (potentially empty) - PrivateIpAddresses []string (potentially empty) - Actor string (potentially empty) - DirectoryId []string (potentially empty) - Source string (potentially empty) - Url string (potentially empty) - Shared *bool (potentially empty) - SharedWith []string (potentially empty) - SharingTypeAdded []string (potentially empty) - CloudDriveId string (potentially empty) - DetectionSourceAlias string (potentially empty) - FileId string (potentially empty) - Exposure []string (potentially empty) - ProcessOwner string (potentially empty) - ProcessName string (potentially empty) - TabWindowTitle string (potentially empty) - TabUrl string (potentially empty) - RemovableMediaVendor string (potentially empty) - RemovableMediaName string (potentially empty) - RemovableMediaSerialNumber string (potentially empty) - RemovableMediaCapacity *int (potentially empty) - RemovableMediaBusType string (potentially empty) - RemovableMediaMediaName string (potentially empty) - RemovableMediaVolumeName string (potentially empty) - RemovableMediaPartitionId string (potentially empty) - SyncDestination string (potentially empty) - EmailDLPPolicyNames []string (potentially empty) - EmailDLPSubject string (potentially empty) - EmailDLPSender string (potentially empty) - EmailDLPFrom string (potentially empty) - EmailDLPRecipients []string (potentially empty) - OutsideActiveHours *bool (potentially empty) - IdentifiedExtensionMIMEType string (potentially empty) - CurrentExtensionMIMEType string (potentially empty) - SuspiciousFileTypeMismatch *bool (potentially empty) - PrintJobName string (potentially empty) - PrinterName string (potentially empty) - PrintedFilesBackupPath string (potentially empty) -``` - -- error - Any errors. - -Limitations: - -Code42 Crashplan FFS API has limitations like most APIs, these limitations affect the GetFileEvents function: - -1. 120 Queries per minute, any additional queries will be dropped. (never actually bothered to test if/how this limit is actually enforced) -2. 200,000 results returned per query. This limitation is kind of annoying to handle as there is no easy way to handle it. The API does not support paging and the only way to figure out how many results there is for a query is to first query, count, then if over 200,000 results, break up the query into smaller time increments and perform multiple queries to get all of the results. -3. The GetFileEvents function only supports the /v1/fileevent/export API endpoint currently. This has to do with how the highly limited functionality of the /v1/fileevent endpoint which isn't well documented. - -## Code42 Documentation - -Links for Code42 Documentation - -- [Crashplan FFS API Documentation](https://support.code42.com/Administrator/Cloud/Monitoring_and_managing/Forensic_File_Search_API) - -## TODOs - -1. Figure out a way to build tests for these functions \ No newline at end of file diff --git a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/VERSION b/vendor/github.com/BenB196/crashplan-ffs-go-pkg/VERSION deleted file mode 100644 index a1e1395..0000000 --- a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.1.7 \ No newline at end of file diff --git a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/ffs.go b/vendor/github.com/BenB196/crashplan-ffs-go-pkg/ffs.go deleted file mode 100644 index 4a3ac90..0000000 --- a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/ffs.go +++ /dev/null @@ -1,640 +0,0 @@ -//Packages provide a module for using the Code42 Crashplan FFS API -package ffs - -import ( - "bytes" - "encoding/csv" - "encoding/json" - "errors" - "io/ioutil" - "log" - "net/http" - "strconv" - "strings" - "sync" - "time" -) - -//The main body of a file event record -type FileEvent struct { - EventId string `json:"eventId,omitempty"` - EventType string `json:"eventType,omitempty"` - EventTimestamp *time.Time `json:"eventTimestamp,omitempty"` - InsertionTimestamp *time.Time `json:"insertionTimestamp,omitempty"` - FilePath string `json:"filePath,omitempty"` - FileName string `json:"fileName,omitempty"` - FileType string `json:"fileType,omitempty"` - FileCategory string `json:"fileCategory,omitempty"` - IdentifiedExtensionCategory string `json:"identifiedExtensionCategory,omitempty"` - CurrentExtensionCategory string `json:"currentExtensionCategory,omitempty"` - FileSize *int `json:"fileSize,omitempty"` - FileOwner []string `json:"fileOwner,omitempty"` //Array of owners - Md5Checksum string `json:"md5Checksum,omitempty"` - Sha256Checksum string `json:"sha256Checksum,omitempty"` - CreatedTimestamp *time.Time `json:"createdTimestamp,omitempty"` - ModifyTimestamp *time.Time `json:"modifyTimestamp,omitempty"` - DeviceUsername string `json:"deviceUsername,omitempty"` - DeviceUid string `json:"deviceUid,omitempty"` - UserUid string `json:"userUid,omitempty"` - OsHostname string `json:"osHostname,omitempty"` - DomainName string `json:"domainName,omitempty"` - PublicIpAddress string `json:"publicIpAddress,omitempty"` - PrivateIpAddresses []string `json:"privateIpAddresses,omitempty"` //Array of IP address strings - Actor string `json:"actor,omitempty"` - DirectoryId []string `json:"directoryId,omitempty"` //An array of something, I am not sure - Source string `json:"source,omitempty"` - Url string `json:"url,omitempty"` - Shared *bool `json:"shared,omitempty"` - SharedWith []string `json:"sharedWith,omitempty"` //An array of strings (Mainly Email Addresses) - SharingTypeAdded []string `json:"sharingTypeAdded,omitempty"` - CloudDriveId string `json:"cloudDriveId,omitempty"` - DetectionSourceAlias string `json:"detectionSourceAlias,omitempty"` - FileId string `json:"fileId,omitempty"` - Exposure []string `json:"exposure,omitempty"` - ProcessOwner string `json:"processOwner,omitempty"` - ProcessName string `json:"processName,omitempty"` - TabWindowTitle string `json:"tabWindowTitle,omitempty"` - TabUrl string `json:"tabUrl,omitempty"` - RemovableMediaVendor string `json:"removableMediaVendor,omitempty"` - RemovableMediaName string `json:"removableMediaName,omitempty"` - RemovableMediaSerialNumber string `json:"removableMediaSerialNumber,omitempty"` - RemovableMediaCapacity *int `json:"removableMediaCapacity,omitempty"` - RemovableMediaBusType string `json:"removableMediaBusType,omitempty"` - RemovableMediaMediaName string `json:"removableMediaMediaName,omitempty"` - RemovableMediaVolumeName string `json:"removableMediaVolumeName,omitempty"` - RemovableMediaPartitionId string `json:"removableMediaPartitionId,omitempty"` - SyncDestination string `json:"syncDestination,omitempty"` - EmailDLPPolicyNames []string `json:"emailDLPPolicyNames,omitempty"` - EmailDLPSubject string `json:"emailDLPSubject,omitempty"` - EmailDLPSender string `json:"emailDLPSender,omitempty"` - EmailDLPFrom string `json:"emailDLPFrom,omitempty"` - EmailDLPRecipients []string `json:"emailDLPRecipients,omitempty"` - OutsideActiveHours *bool `json:"outsideActiveHours,omitempty"` - IdentifiedExtensionMIMEType string `json:"identifiedExtensionMimeType,omitempty"` - CurrentExtensionMIMEType string `json:"currentExtensionMimeType,omitempty"` - SuspiciousFileTypeMismatch *bool `json:"suspiciousFileTypeMismatch,omitempty"` - PrintJobName string `json:"printJobName,omitempty"` - PrinterName string `json:"printerName,omitempty"` - PrintedFilesBackupPath string `json:"printedFilesBackupPath,omitempty"` -} - -//Currently recognized csv headers -var csvHeaders = []string{"Event ID", "Event type", "Date Observed (UTC)", "Date Inserted (UTC)", "File path", "Filename", "File type", "File Category", "Identified Extension Category", "Current Extension Category", "File size (bytes)", "File Owner", "MD5 Hash", "SHA-256 Hash", "Create Date", "Modified Date", "Username", "Device ID", "User UID", "Hostname", "Fully Qualified Domain Name", "IP address (public)", "IP address (private)", "Actor", "Directory ID", "Source", "URL", "Shared", "Shared With", "File exposure changed to", "Cloud drive ID", "Detection Source Alias", "File Id", "Exposure Type", "Process Owner", "Process Name", "Tab/Window Title", "Tab URL", "Removable Media Vendor", "Removable Media Name", "Removable Media Serial Number", "Removable Media Capacity", "Removable Media Bus Type", "Removable Media Media Name", "Removable Media Volume Name", "Removable Media Partition Id", "Sync Destination", "Email DLP Policy Names", "Email DLP Subject", "Email DLP Sender", "Email DLP From", "Email DLP Recipients", "Outside Active Hours", "Identified Extension MIME Type", "Current Extension MIME Type", "Suspicious File Type Mismatch", "Print Job Name", "Printer Name", "Printed Files Backup Path"} - -//Structs of Crashplan FFS API Authentication Token Return -type AuthData struct { - Data AuthToken `json:"data"` - Error string `json:"error,omitempty"` - Warnings string `json:"warnings,omitempty"` -} -type AuthToken struct { - V3UserToken string `json:"v3_user_token"` -} - -//Structs for FFS Queries -type Query struct { - Groups []Group `json:"groups"` - GroupClause string `json:"groupClause,omitempty"` - PgNum int `json:"pgNum,omitempty"` - PgSize int `json:"pgSize,omitempty"` - SrtDir string `json:"srtDir,omitempty"` - SrtKey string `json:"srtKey,omitempty"` -} - -type Group struct { - Filters []Filter `json:"filters"` - FilterClause string `json:"filterClause,omitempty"` -} - -type Filter struct { - Operator string `json:"operator"` - Term string `json:"term"` - Value string `json:"value"` -} - -/* -GetAuthData - Function to get the Authentication data (mainly the authentication token) which will be needed for the rest of the API calls -The authentication token is good for up to 1 hour before it expires -*/ -func GetAuthData(uri string, username string, password string) (*AuthData, error) { - //Build HTTP GET request - req, err := http.NewRequest("GET", uri, nil) - - //Return nil and err if Building of HTTP GET request fails - if err != nil { - return nil, err - } - - //Set Basic Auth Header - req.SetBasicAuth(username, password) - //Set Accept Header - req.Header.Set("Accept", "application/json") - - //Make the HTTP Call - resp, err := http.DefaultClient.Do(req) - - //Return nil and err if Building of HTTP GET request fails - if err != nil { - return nil, err - } - - defer resp.Body.Close() - - //Return err if status code != 200 - if resp.StatusCode != http.StatusOK { - return nil, errors.New("Error with Authentication Token GET: " + resp.Status) - } - - //Create AuthData variable - var authData AuthData - - respData := resp.Body - - responseBytes, _ := ioutil.ReadAll(respData) - - if strings.Contains(string(responseBytes), "Service Under Maintenance") { - return nil, errors.New("error: auth api service is under maintenance") - } - - //Decode the resp.Body into authData variable - err = json.Unmarshal(responseBytes, &authData) - - //Return nil and err if decoding of resp.Body fails - if err != nil { - return nil, err - } - - //Return AuthData - return &authData, nil -} - -//TODO create Global Function for calling getFileEvents with CSV url formatting (Priority, as will likely continue to be supported by Code42) -/* -csvLineToFileEvent - Converts a CSV Line into a File Event Struct -[]string - csv line. DO NOT PASS Line 0 (Headers) if they exist -This function contains panics in order to prevent messed up CSV parsing -*/ -func csvLineToFileEvent(csvLine []string) *FileEvent { - //Init variables - var fileEvent FileEvent - var err error - - //set eventId - fileEvent.EventId = csvLine[0] - - //set eventType - fileEvent.EventType = csvLine[1] - - //set eventTimestamp - //Convert eventTimeStamp to time - if csvLine[2] != "" { - var eventTimeStamp time.Time - eventTimeStamp, err = time.Parse(time.RFC3339Nano, csvLine[2]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing eventTimeStampString, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - - fileEvent.EventTimestamp = &eventTimeStamp - } else { - fileEvent.EventTimestamp = nil - } - - //set insertionTimestamp - //Convert insertionTimestamp to time - if csvLine[3] != "" { - var insertionTimestamp time.Time - insertionTimestamp, err = time.Parse(time.RFC3339Nano, csvLine[3]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing insertionTimestampString, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - - fileEvent.InsertionTimestamp = &insertionTimestamp - } else { - fileEvent.InsertionTimestamp = nil - } - - //set filePath - fileEvent.FilePath = csvLine[4] - - //set fileName - fileEvent.FileName = csvLine[5] - - //set fileType - fileEvent.FileType = csvLine[6] - - //set fileCategory - fileEvent.FileCategory = csvLine[7] - - //set identifiedExtensionCategory - fileEvent.IdentifiedExtensionCategory = csvLine[8] - - //set currentExtensionCategory - fileEvent.CurrentExtensionCategory = csvLine[9] - - //set fileSize - //Convert fileSizeString to int - if csvLine[10] != "" { - var fileSize int - fileSize, err = strconv.Atoi(csvLine[10]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing fileSizeString, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - - fileEvent.FileSize = &fileSize - } else { - fileEvent.FileSize = nil - } - - //set fileOwner - //Convert fileOwnerString to string slice - - if csvLine[11] != "" { - fileEvent.FileOwner = strings.Split(csvLine[11], ",") - } else { - fileEvent.FileOwner = nil - } - - //set md5Checksum - fileEvent.Md5Checksum = csvLine[12] - - //set sha256Checksum - fileEvent.Sha256Checksum = csvLine[13] - - //set createdTimestampString - //Convert createdTimestamp to time - if csvLine[14] != "" { - var createdTimestamp time.Time - createdTimestamp, err = time.Parse("2006-01-02 15:04:05", csvLine[14]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing createdTimestampString, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - fileEvent.CreatedTimestamp = &createdTimestamp - } else { - fileEvent.CreatedTimestamp = nil - } - - //set modifyTimestampString - //Convert modifyTimestamp to time - if csvLine[15] != "" { - var modifyTimestamp time.Time - modifyTimestamp, err = time.Parse("2006-01-02 15:04:05", csvLine[15]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing modifyTimestampString, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - fileEvent.ModifyTimestamp = &modifyTimestamp - } else { - fileEvent.ModifyTimestamp = nil - } - - //set deviceUserName - fileEvent.DeviceUsername = csvLine[16] - - //set deviceUid - fileEvent.DeviceUid = csvLine[17] - - //set userUid - fileEvent.UserUid = csvLine[18] - - //set osHostName - fileEvent.OsHostname = csvLine[19] - - //set domainName - fileEvent.DomainName = csvLine[20] - - //set publicIpAddress - fileEvent.PublicIpAddress = csvLine[21] - - //set privateIpAddresses - //Convert privateIpAddresses to string slice - if csvLine[22] != "" { - fileEvent.PrivateIpAddresses = strings.Split(csvLine[22], ",") - } else { - fileEvent.PrivateIpAddresses = nil - } - - //set actor - fileEvent.Actor = csvLine[23] - - //set directoryId - //Convert directoryId to string slice - if csvLine[24] != "" { - fileEvent.DirectoryId = strings.Split(csvLine[24], ",") - } else { - fileEvent.DirectoryId = nil - } - - //set source - fileEvent.Source = csvLine[25] - - //set url - fileEvent.Url = csvLine[26] - - //set shared - //Convert shared to bool - - if csvLine[27] != "" { - var shared bool - shared, err = strconv.ParseBool(csvLine[27]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing shared, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - fileEvent.Shared = &shared - } else { - fileEvent.Shared = nil - } - - //set sharedWith - //Convert sharedWith to string slice - if csvLine[28] != "" { - fileEvent.SharedWith = strings.Split(csvLine[28], ",") - } else { - fileEvent.SharedWith = nil - } - - //set sharingTypeAdded - //Convert sharingTypeAdded to string slice - if csvLine[29] != "" { - fileEvent.SharingTypeAdded = strings.Split(csvLine[29], ",") - } else { - fileEvent.SharingTypeAdded = nil - } - - //set cloudDriveId - fileEvent.CloudDriveId = csvLine[30] - - //set detectionSourceAlias - fileEvent.DetectionSourceAlias = csvLine[31] - - //set fileId - fileEvent.FileId = csvLine[32] - - //set exposure - //Convert exposure to string slice - if csvLine[33] != "" { - fileEvent.Exposure = strings.Split(csvLine[33], ",") - } else { - fileEvent.Exposure = nil - } - - //set processOwner - fileEvent.ProcessOwner = csvLine[34] - - //set processName - fileEvent.ProcessName = csvLine[35] - - //set tabWindowTitle - fileEvent.TabWindowTitle = csvLine[36] - - //set tabUrl - fileEvent.TabUrl = csvLine[37] - - //set removableMediaVendor - fileEvent.RemovableMediaVendor = csvLine[38] - - //set removableMediaName - fileEvent.RemovableMediaName = csvLine[39] - - //set removableMediaSerialNumber - fileEvent.RemovableMediaSerialNumber = csvLine[40] - - //set removableMediaCapacity - //Convert removableMediaCapacity to int - if csvLine[41] != "" { - var removableMediaCapacity int - removableMediaCapacity, err = strconv.Atoi(csvLine[41]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing removableMediaCapacityString, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - - fileEvent.RemovableMediaCapacity = &removableMediaCapacity - } else { - fileEvent.RemovableMediaCapacity = nil - } - - //set removableMediaBusType - fileEvent.RemovableMediaBusType = csvLine[42] - - //set removableMediaMediaName - fileEvent.RemovableMediaMediaName = csvLine[43] - - //set removableMediaVolumeName - fileEvent.RemovableMediaVolumeName = csvLine[44] - - //set removableMediaPartitionId - fileEvent.RemovableMediaPartitionId = csvLine[45] - - //set syncDestination - fileEvent.SyncDestination = csvLine[46] - - //set emailDLPPolicyNames - //Convert emailDLPPolicyNames to string slice - if csvLine[47] != "" { - fileEvent.EmailDLPPolicyNames = strings.Split(csvLine[47], ",") - } else { - fileEvent.EmailDLPPolicyNames = nil - } - - //set emailDLPSubject - fileEvent.EmailDLPSubject = csvLine[48] - - //set emailDLPSender - fileEvent.EmailDLPSender = csvLine[49] - - //set emailDLPFrom - fileEvent.EmailDLPFrom = csvLine[50] - - //set emailDLPRecipients - //Convert emailDLPRecipients to string slice - if csvLine[51] != "" { - fileEvent.EmailDLPRecipients = strings.Split(csvLine[51], ",") - } else { - fileEvent.EmailDLPRecipients = nil - } - - //set outsideActiveHours - if csvLine[52] != "" { - var outsideActiveHours bool - outsideActiveHours, err = strconv.ParseBool(csvLine[52]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing outsideActiveHoursString, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - - fileEvent.OutsideActiveHours = &outsideActiveHours - } else { - fileEvent.OutsideActiveHours = nil - } - - //set identifiedExtensionMimeType - fileEvent.IdentifiedExtensionMIMEType = csvLine[53] - - //set currentExtensionMimeType - fileEvent.CurrentExtensionMIMEType = csvLine[54] - - //set suspiciousFileTypeMismatch - if csvLine[55] != "" { - var suspiciousFileTypeMismatch bool - suspiciousFileTypeMismatch, err = strconv.ParseBool(csvLine[55]) - - //Panic if this fails, that means something is wrong with CSV handling - if err != nil { - log.Println("Error parsing suspiciousFileTypeMismatchString, something must be wrong with CSV parsing.") - log.Println(csvLine) - panic(err) - } - - fileEvent.SuspiciousFileTypeMismatch = &suspiciousFileTypeMismatch - } else { - fileEvent.SuspiciousFileTypeMismatch = nil - } - - //set printJobName - fileEvent.PrintJobName = csvLine[56] - - //set printerName - fileEvent.PrinterName = csvLine[57] - - //set printedFilesBackupPath - fileEvent.PrintedFilesBackupPath = csvLine[58] - - return &fileEvent -} - -//TODO create Global Function for calling getFileEvents with JSON url formatting (this may be not be needed, Code42 seems to frown upon using this for pulling large amounts of events.) - -/* -getFileEvents - Function to get the actual event records from FFS -authData - authData struct which contains the authentication API token -ffsURI - the URI for where to pull the FFS events -query - query struct which contains the actual FFS query and a golang valid form -This function contains a panic if the csv columns do not match the currently specified list. -This is to prevent data from being messed up during parsing. -*/ -func GetFileEvents(authData AuthData, ffsURI string, query Query) (*[]FileEvent, error) { - - //Validate jsonQuery is valid JSON - ffsQuery, err := json.Marshal(query) - if err != nil { - return nil, errors.New("jsonQuery is not in a valid json format") - } - - //Make sure authData token is not "" - if authData.Data.V3UserToken == "" { - return nil, errors.New("authData cannot be nil") - } - - //Query ffsURI with authData API token and jsonQuery body - req, err := http.NewRequest("POST", ffsURI, bytes.NewReader(ffsQuery)) - - //Handle request errors - if err != nil { - return nil, err - } - - //Set request headers - req.Header.Set("Content-Type", "application/json") - req.Header.Set("Authorization", "v3_user_token "+authData.Data.V3UserToken) - - //Get Response - resp, err := http.DefaultClient.Do(req) - - //Handle response errors - if err != nil { - return nil, err - } - - //defer body close - defer resp.Body.Close() - - //Make sure http status code is 200 - if resp.StatusCode != http.StatusOK { - return nil, errors.New("Error with gathering file events POST: " + resp.Status) - } - - //Read Response Body as CSV - reader := csv.NewReader(resp.Body) - reader.Comma = ',' - - //Read body into variable - data, err := reader.ReadAll() - - //Handle reader errors - if err != nil { - return nil, err - } - - var fileEvents []FileEvent - - //Loop through CSV lines - var wg sync.WaitGroup - wg.Add(len(data)) - go func() { - for lineNumber, lineContent := range data { - if lineNumber != 0 { - //Convert CSV line to file events and add to slice - fileEvents = append(fileEvents, *csvLineToFileEvent(lineContent)) - } else { - //Validate that the columns have not changed - equal := equal(lineContent, csvHeaders) - - if !equal { - panic(errors.New("number of columns in CSV file does not match expected number, API changed, panicking to keep data integrity. New CSV columns: " + strings.Join(lineContent, ","))) - } - } - wg.Done() - } - }() - - wg.Wait() - - return &fileEvents, nil -} - -/* -Calculate the difference between two different slices -Used in this case to tell if the csv columns have changed -*/ -func equal(slice1 []string, slice2 []string) bool { - if len(slice1) != len(slice2) { - return false - } - - for i, v := range slice1 { - if v != slice2[i] { - return false - } - } - - return true -} diff --git a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/go.mod b/vendor/github.com/BenB196/crashplan-ffs-go-pkg/go.mod deleted file mode 100644 index 89ef05e..0000000 --- a/vendor/github.com/BenB196/crashplan-ffs-go-pkg/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/BenB196/crashplan-ffs-go-pkg - -go 1.14 diff --git a/vendor/github.com/BenB196/ip-api-go-pkg/.gitignore b/vendor/github.com/BenB196/ip-api-go-pkg/.gitignore deleted file mode 100644 index df30947..0000000 --- a/vendor/github.com/BenB196/ip-api-go-pkg/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ - -.idea/ diff --git a/vendor/github.com/BenB196/ip-api-go-pkg/LICENSE b/vendor/github.com/BenB196/ip-api-go-pkg/LICENSE deleted file mode 100644 index 9cb2ff9..0000000 --- a/vendor/github.com/BenB196/ip-api-go-pkg/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019 BenB196 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/BenB196/ip-api-go-pkg/README.md b/vendor/github.com/BenB196/ip-api-go-pkg/README.md deleted file mode 100644 index 5648e12..0000000 --- a/vendor/github.com/BenB196/ip-api-go-pkg/README.md +++ /dev/null @@ -1,132 +0,0 @@ -# ip-api-go-pkg - -A third party Golang package for integrating Golang projects with [IP-API's](http://ip-api.com/) API. - -The goal of this Golang package is to provide an easy to use package for integrating IP-API's API into the Golang environment. - -## Structs - -There are two main structs within this package: - -1. Query -2. Location - -### Query struct - -The query struct is design to be what is passed to the functions and eventually be executed against IP-API's API. - -``` -type Query struct { - Queries []QueryIP `json:"queries"` Slice of QueryIPs. NOTE: If executing the single query function, only 1 QueryIP can be passed. - Fields string `json:"fields,omitempty"` This is a string of comma separated fields. - Lang string `json:"lang,omitempty"` This is a string of the language you wish to have returned. -} - -type QueryIP struct { - Query string `json:"query"` This is a string of either the IP address or DNS name you wish to query. - Fields string `json:"fields,omitempty"` This is a string of comma separated fields. NOTE: Overwrites fields in Query struct. - Lang string `json:"lang,omitempty"` This is a string of the language you wish to have returned. NOTE: Overwrites lang in Query struct. -} -``` - -List of possible fields that can be passed: status, message, continent, continentCode, country, countryCode, region, regionName, city, district, zip, lat, lon, timezone, isp, org, as, asname, reverse, mobile, proxy, query [1](http://ip-api.com/docs/api:json) - -List of possible languages that can be passed: en, de, es, pt-BR, fr, ja, zh-CN, ru [2](http://ip-api.com/docs/api:json) - -### Location struct - -The location struct is designed to take the return of the IP-API query and provide it in an easy to use struct. - -``` -type Location struct { - Status string `json:"status,omitempty"` - Message string `json:"message,omitempty"` - Continent string `json:"continent,omitempty"` - ContinentCode string `json:"continentCode,omitempty"` - Country string `json:"country,omitempty"` - CountryCode string `json:"countryCode,omitempty"` - Region string `json:"region,omitempty"` - RegionName string `json:"regionName,omitempty"` - City string `json:"city,omitempty"` - District string `json:"district,omitempty"` - ZIP string `json:"zip,omitempty"` - Lat *float32 `json:"lat,omitempty"` - Lon *float32 `json:"lon,omitempty"` - Timezone string `json:"timezone,omitempty"` - Currency string `json:"currency,omitempty"` - ISP string `json:"isp,omitempty"` - Org string `json:"org,omitempty"` - AS string `json:"as,omitempty"` - ASName string `json:"asame,omitempty"` - Reverse string `json:"reverse,omitempty"` - Mobile bool `json:"mobile,omitempty"` - Proxy bool `json:"proxy,omitempty"` - Hosting bool `json:"hosting,omitempty"` - Query string `json:"query,omitempty"` -} -``` - -## Functions - -There are four (4) main functions within this package: - -1. SingleQuery -2. BatchQuery -3. ValidateFields -4. ValidateLang - -These functions allow someone to query IP-API's API within Golang and return the values as Golang structs to be used within other Golang applications. - -### SingleQuery function - -The SingleQuery function is designed to make a single request against the API. - -Arguments: -- query - This is a Golang struct which when passed to the function will be reformatted into a proper query to be executed against the IP-API API. -- apiKey - This is for when you are using the pro version of IP-API and which to have the [increased functionality](https://members.ip-api.com/). -- baseURL - This is really only intended to be used if you are going through some sort of IP-API proxy. Otherwise, this can be left blank, and the URL will be determined by whether an API Key is provided or not. - -Returns: -- Location - Golang struct that contains the results of the query. -- error - Any errors. - -### BatchQuery function - -The BatchQuery function is designed to take advantage of IP-API's [batch](http://ip-api.com/docs/api:batch) API. This is designed to allow for multiple queries to be executed at the same time to reduce overall query time. - -Arguments: -- query - This is a Golang struct which when passed to the function will be reformatted into a proper query to be executed against the IP-API API. -- apiKey - This is for when you are using the pro version of IP-API and which to have the [increased functionality](https://members.ip-api.com/). -- baseURL - This is really only intended to be used if you are going through some sort of IP-API proxy. Otherwise, this can be left blank, and the URL will be determined by whether an API Key is provided or not. - -Returns: -- []Location - Golang slice of location structs that contains the results of the query. -- error - Any errors. - -An observation about the batch query. It sometime appears to return less data then the single query on the exact same query even when the same fields are passed. This is something that appears to be on the IP-API end, not the package end. - -### ValidateFields function - -The ValidateFields function is designed to validate that the fields which are being passed to the IP-API are valid. - -Arguments: -- fields - This is a string which contains comma separated values of the fields. It will be checked against the AllowedAPIFields. - -Returns: -- string - The same string which was passed to it in the fields argument. -- error - Any errors. - -### ValidateLang function - -The validateLang function is designed to validate the lang string which is one of the languages supported by IP-API. - -Arguments: -- lang - the string which contains the desired language. - -Returns: -- string - The same string which was passed to it in the lang argument. -- error - Any errors. - -#Important - -I currently have not tested the functionality of the Pro-stuff as I don't currently have access to it. If you encounter any issues with it, please let me know so that I can fix them. \ No newline at end of file diff --git a/vendor/github.com/BenB196/ip-api-go-pkg/VERSION b/vendor/github.com/BenB196/ip-api-go-pkg/VERSION deleted file mode 100644 index 7d6b3eb..0000000 --- a/vendor/github.com/BenB196/ip-api-go-pkg/VERSION +++ /dev/null @@ -1 +0,0 @@ -0.0.8 \ No newline at end of file diff --git a/vendor/github.com/BenB196/ip-api-go-pkg/go.mod b/vendor/github.com/BenB196/ip-api-go-pkg/go.mod deleted file mode 100644 index 7e2da4a..0000000 --- a/vendor/github.com/BenB196/ip-api-go-pkg/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/BenB196/ip-api-go-pkg - -go 1.14 diff --git a/vendor/github.com/BenB196/ip-api-go-pkg/ip_api.go b/vendor/github.com/BenB196/ip-api-go-pkg/ip_api.go deleted file mode 100644 index 7b7209c..0000000 --- a/vendor/github.com/BenB196/ip-api-go-pkg/ip_api.go +++ /dev/null @@ -1,291 +0,0 @@ -package ip_api - -import ( - "bytes" - "encoding/json" - "errors" - "log" - "net/http" - "strconv" - "strings" -) - -//URI for the free IP-API -const FreeAPIURI = "http://ip-api.com/" - -//URI for the pro IP-API -const ProAPIURI = "https://pro.ip-api.com/" - -type Location struct { - Status string `json:"status,omitempty"` - Message string `json:"message,omitempty"` - Continent string `json:"continent,omitempty"` - ContinentCode string `json:"continentCode,omitempty"` - Country string `json:"country,omitempty"` - CountryCode string `json:"countryCode,omitempty"` - Region string `json:"region,omitempty"` - RegionName string `json:"regionName,omitempty"` - City string `json:"city,omitempty"` - District string `json:"district,omitempty"` - ZIP string `json:"zip,omitempty"` - Lat *float32 `json:"lat,omitempty"` - Lon *float32 `json:"lon,omitempty"` - Timezone string `json:"timezone,omitempty"` - Currency string `json:"currency,omitempty"` - ISP string `json:"isp,omitempty"` - Org string `json:"org,omitempty"` - AS string `json:"as,omitempty"` - ASName string `json:"asname,omitempty"` - Reverse string `json:"reverse,omitempty"` - Mobile *bool `json:"mobile,omitempty"` - Proxy *bool `json:"proxy,omitempty"` - Hosting *bool `json:"hosting,omitempty"` - Query string `json:"query,omitempty"` -} - -type Query struct { - Queries []QueryIP `json:"queries"` - Fields string `json:"fields,omitempty"` - Lang string `json:"lang,omitempty"` -} - -type QueryIP struct { - Query string `json:"query"` - Fields string `json:"fields,omitempty"` - Lang string `json:"lang,omitempty"` -} - -//Execute a single query (queries field should only contain 1 value -func SingleQuery(query Query, apiKey string, baseURL string, debugging bool) (*Location, error) { - //Make sure that there is only 1 query value - if len(query.Queries) != 1 { - return nil, errors.New("error: only 1 query can be passed to single query api") - } - - if debugging { - log.Println(query) - } - - //Build URI - uri := buildURI(query, "single",apiKey, baseURL) - - //Execute query - req, err := http.NewRequest("GET",uri,nil) - - if err != nil { - return nil, err - } - - //Set request headers - req.Header.Set("Accept","application/json") - - resp, err := http.DefaultClient.Do(req) - - if err != nil { - return nil, err - } - - defer resp.Body.Close() - - //Check if invalid api key - if resp.StatusCode == 403 { - if strings.Contains(uri, "?key=") { - return nil, errors.New("error: invalid api key") - } else { - return nil, errors.New("error: exceeded api calls per minute, you need to un-blacklist yourself") - } - } - - if resp.StatusCode != http.StatusOK { - return nil, errors.New("error querying ip api: " + resp.Status + " " + strconv.Itoa(resp.StatusCode)) - } - - var location Location - - err = json.NewDecoder(resp.Body).Decode(&location) - - if err != nil { - return nil, err - } - - return &location, nil -} - -//Execute a batch query (queries field should contain 1 or more values -func BatchQuery(query Query, apiKey string, baseURL string, debugging bool) ([]Location, error) { - //Make sure that there are 1 or more query values - if len(query.Queries) < 1 { - return nil, errors.New("error: no queries passed to batch query") - } - - //Build URI - uri := buildURI(query,"batch",apiKey, baseURL) - - //Build queries list - queries, err := json.Marshal(query.Queries) - - if err != nil { - return nil, err - } - - if debugging { - log.Println(string(queries)) - } - - //Execute Query - req, err := http.NewRequest("POST",uri,bytes.NewReader(queries)) - - if err != nil { - return nil, err - } - - //Set request headers - req.Header.Set("Content-Type","application/json") - - resp, err := http.DefaultClient.Do(req) - - if err != nil { - return nil, err - } - - defer resp.Body.Close() - - //Check if invalid api key - if resp.StatusCode == 403 { - if strings.Contains(uri, "?key=") { - return nil, errors.New("error: invalid api key") - } else { - return nil, errors.New("error: exceeded api calls per minute, you need to un-blacklist yourself") - } - } - - if resp.StatusCode != http.StatusOK { - return nil, errors.New("error querying ip api: " + resp.Status + " " + strconv.Itoa(resp.StatusCode)) - } - - var locations []Location - - err = json.NewDecoder(resp.Body).Decode(&locations) - - if err != nil { - return nil, err - } - - return locations,nil -} - -func buildURI(query Query, queryType string, apiKey string, baseURL string) string { - var baseURI string - //Set base URI - if baseURL != "" { - baseURI = baseURL - } else { - switch apiKey { - case "": - baseURI = FreeAPIURI - default: - baseURI = ProAPIURI - } - } - - //Update base URI with query type - switch queryType { - case "single": - baseURI = baseURI + "json/" + query.Queries[0].Query - case "batch": - baseURI = baseURI + "batch" - } - - //Get fields list if fields len > 0 - var fieldsList string - if len(query.Fields) > 0 { - fieldsList = buildFieldList(query.Fields) - } - - //Get lang string if lang != "" - var lang string - if query.Lang != "" { - lang = buildLangString(query.Lang) - } - - //Update base URI with api key if not "" - switch apiKey { - case "": - if fieldsList != "" && lang != "" { - baseURI = baseURI + "?" + fieldsList + "&" + lang - } else if fieldsList != "" { - baseURI = baseURI + "?" + fieldsList - } else if lang != "" { - baseURI = baseURI + "?" + lang - } - default: - baseURI = baseURI + "?key=" + apiKey - if fieldsList != "" && lang != "" { - baseURI = baseURI + "&" + fieldsList + "&" + lang - } else if fieldsList != "" { - baseURI = baseURI + "&" + fieldsList - } else if lang != "" { - baseURI = baseURI + "&" + lang - } - } - return baseURI -} - -//Build fields string from slice -func buildFieldList(fields string) string { - return "fields=" + fields -} - -//Build lang string from lang value -func buildLangString(lang string) string { - return "lang=" + lang -} - -var AllowedAPIFields = []string{"status","message","continent","continentCode","country","countryCode","region","regionName","city","district","zip","lat","lon","timezone","isp","org","as","asname","reverse","mobile","proxy","hosting","query"} - -var AllowedLangs = []string{"en","de","es","pt-BR","fr","ja","zh-CN","ru"} - -/* -ValidateFields - validates the fields string to make sure it only has valid parameters -fields - string of comma separated values -*/ -func ValidateFields(fields string) (string, error) { - fieldsSlice := strings.Split(fields,",") - - for _, field := range fieldsSlice { - if !contains(AllowedAPIFields, field) { - return "", errors.New("error: illegal field provided: " + field) - } - } - - return fields, nil -} - -/* -ValidateLang - validates the lang string to make sure it is a valid lang option -lang - string with lang value -*/ -func ValidateLang(lang string) (string, error) { - if !contains(AllowedLangs,lang) { - return "", errors.New("error: illegal lang value provided: " + lang) - } - - return lang, nil -} - -/* -contains - checks a string slice to see if it contains a string -slice - string slice which you want to check -item - string which you want to see if exists in the string slice - -returns -bool - true if slice contains string, else false -*/ -func contains(slice []string, item string) bool { - for _, value := range slice { - if value == item { - return true - } - } - return false -} \ No newline at end of file diff --git a/vendor/github.com/VictoriaMetrics/fastcache/LICENSE b/vendor/github.com/VictoriaMetrics/fastcache/LICENSE deleted file mode 100644 index 9a8145e..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2018 VictoriaMetrics - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/VictoriaMetrics/fastcache/README.md b/vendor/github.com/VictoriaMetrics/fastcache/README.md deleted file mode 100644 index b353214..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/README.md +++ /dev/null @@ -1,116 +0,0 @@ -[![Build Status](https://github.com/VictoriaMetrics/fastcache/workflows/main/badge.svg)](https://github.com/VictoriaMetrics/fastcache/actions) -[![GoDoc](https://godoc.org/github.com/VictoriaMetrics/fastcache?status.svg)](http://godoc.org/github.com/VictoriaMetrics/fastcache) -[![Go Report](https://goreportcard.com/badge/github.com/VictoriaMetrics/fastcache)](https://goreportcard.com/report/github.com/VictoriaMetrics/fastcache) -[![codecov](https://codecov.io/gh/VictoriaMetrics/fastcache/branch/master/graph/badge.svg)](https://codecov.io/gh/VictoriaMetrics/fastcache) - -# fastcache - fast thread-safe inmemory cache for big number of entries in Go - -### Features - -* Fast. Performance scales on multi-core CPUs. See benchmark results below. -* Thread-safe. Concurrent goroutines may read and write into a single - cache instance. -* The fastcache is designed for storing big number of entries without - [GC overhead](https://syslog.ravelin.com/further-dangers-of-large-heaps-in-go-7a267b57d487). -* Fastcache automatically evicts old entries when reaching the maximum cache size - set on its creation. -* [Simple API](http://godoc.org/github.com/VictoriaMetrics/fastcache). -* Simple source code. -* Cache may be [saved to file](https://godoc.org/github.com/VictoriaMetrics/fastcache#Cache.SaveToFile) - and [loaded from file](https://godoc.org/github.com/VictoriaMetrics/fastcache#LoadFromFile). -* Works on [Google AppEngine](https://cloud.google.com/appengine/docs/go/). - - -### Benchmarks - -`Fastcache` performance is compared with [BigCache](https://github.com/allegro/bigcache), standard Go map -and [sync.Map](https://golang.org/pkg/sync/#Map). - -``` -GOMAXPROCS=4 go test github.com/VictoriaMetrics/fastcache -bench='Set|Get' -benchtime=10s -goos: linux -goarch: amd64 -pkg: github.com/VictoriaMetrics/fastcache -BenchmarkBigCacheSet-4 2000 10566656 ns/op 6.20 MB/s 4660369 B/op 6 allocs/op -BenchmarkBigCacheGet-4 2000 6902694 ns/op 9.49 MB/s 684169 B/op 131076 allocs/op -BenchmarkBigCacheSetGet-4 1000 17579118 ns/op 7.46 MB/s 5046744 B/op 131083 allocs/op -BenchmarkCacheSet-4 5000 3808874 ns/op 17.21 MB/s 1142 B/op 2 allocs/op -BenchmarkCacheGet-4 5000 3293849 ns/op 19.90 MB/s 1140 B/op 2 allocs/op -BenchmarkCacheSetGet-4 2000 8456061 ns/op 15.50 MB/s 2857 B/op 5 allocs/op -BenchmarkStdMapSet-4 2000 10559382 ns/op 6.21 MB/s 268413 B/op 65537 allocs/op -BenchmarkStdMapGet-4 5000 2687404 ns/op 24.39 MB/s 2558 B/op 13 allocs/op -BenchmarkStdMapSetGet-4 100 154641257 ns/op 0.85 MB/s 387405 B/op 65558 allocs/op -BenchmarkSyncMapSet-4 500 24703219 ns/op 2.65 MB/s 3426543 B/op 262411 allocs/op -BenchmarkSyncMapGet-4 5000 2265892 ns/op 28.92 MB/s 2545 B/op 79 allocs/op -BenchmarkSyncMapSetGet-4 1000 14595535 ns/op 8.98 MB/s 3417190 B/op 262277 allocs/op -``` - -`MB/s` column here actually means `millions of operations per second`. -As you can see, `fastcache` is faster than the `BigCache` in all the cases. -`fastcache` is faster than the standard Go map and `sync.Map` on workloads -with inserts. - - -### Limitations - -* Keys and values must be byte slices. Other types must be marshaled before - storing them in the cache. -* Big entries with sizes exceeding 64KB must be stored via [distinct API](http://godoc.org/github.com/VictoriaMetrics/fastcache#Cache.SetBig). -* There is no cache expiration. Entries are evicted from the cache only - on cache size overflow. Entry deadline may be stored inside the value in order - to implement cache expiration. - - -### Architecture details - -The cache uses ideas from [BigCache](https://github.com/allegro/bigcache): - -* The cache consists of many buckets, each with its own lock. - This helps scaling the performance on multi-core CPUs, since multiple - CPUs may concurrently access distinct buckets. -* Each bucket consists of a `hash(key) -> (key, value) position` map - and 64KB-sized byte slices (chunks) holding encoded `(key, value)` entries. - Each bucket contains only `O(chunksCount)` pointers. For instance, 64GB cache - would contain ~1M pointers, while similarly-sized `map[string][]byte` - would contain ~1B pointers for short keys and values. This would lead to - [huge GC overhead](https://syslog.ravelin.com/further-dangers-of-large-heaps-in-go-7a267b57d487). - -64KB-sized chunks reduce memory fragmentation and the total memory usage comparing -to a single big chunk per bucket. -Chunks are allocated off-heap if possible. This reduces total memory usage because -GC collects unused memory more frequently without the need in `GOGC` tweaking. - - -### Users - -* `Fastcache` has been extracted from [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) sources. - See [this article](https://medium.com/devopslinks/victoriametrics-creating-the-best-remote-storage-for-prometheus-5d92d66787ac) - for more info about `VictoriaMetrics`. - - -### FAQ - -#### What is the difference between `fastcache` and other similar caches like [BigCache](https://github.com/allegro/bigcache) or [FreeCache](https://github.com/coocood/freecache)? - -* `Fastcache` is faster. See benchmark results above. -* `Fastcache` uses less memory due to lower heap fragmentation. This allows - saving many GBs of memory on multi-GB caches. -* `Fastcache` API [is simpler](http://godoc.org/github.com/VictoriaMetrics/fastcache). - The API is designed to be used in zero-allocation mode. - - -#### Why `fastcache` doesn't support cache expiration? - -Because we don't need cache expiration in [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics). -Cached entries inside `VictoriaMetrics` never expire. They are automatically evicted on cache size overflow. - -It is easy to implement cache expiration on top of `fastcache` by caching values -with marshaled deadlines and verifying deadlines after reading these values -from the cache. - - -#### Why `fastcache` doesn't support advanced features such as [thundering herd protection](https://en.wikipedia.org/wiki/Thundering_herd_problem) or callbacks on entries' eviction? - -Because these features would complicate the code and would make it slower. -`Fastcache` source code is simple - just copy-paste it and implement the feature you want -on top of it. diff --git a/vendor/github.com/VictoriaMetrics/fastcache/bigcache.go b/vendor/github.com/VictoriaMetrics/fastcache/bigcache.go deleted file mode 100644 index 7ca6f48..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/bigcache.go +++ /dev/null @@ -1,152 +0,0 @@ -package fastcache - -import ( - "sync" - "sync/atomic" - - xxhash "github.com/cespare/xxhash/v2" -) - -// maxSubvalueLen is the maximum size of subvalue chunk. -// -// - 16 bytes are for subkey encoding -// - 4 bytes are for len(key)+len(value) encoding inside fastcache -// - 1 byte is implementation detail of fastcache -const maxSubvalueLen = chunkSize - 16 - 4 - 1 - -// maxKeyLen is the maximum size of key. -// -// - 16 bytes are for (hash + valueLen) -// - 4 bytes are for len(key)+len(subkey) -// - 1 byte is implementation detail of fastcache -const maxKeyLen = chunkSize - 16 - 4 - 1 - -// SetBig sets (k, v) to c where len(v) may exceed 64KB. -// -// GetBig must be used for reading stored values. -// -// The stored entry may be evicted at any time either due to cache -// overflow or due to unlikely hash collision. -// Pass higher maxBytes value to New if the added items disappear -// frequently. -// -// It is safe to store entries smaller than 64KB with SetBig. -// -// k and v contents may be modified after returning from SetBig. -func (c *Cache) SetBig(k, v []byte) { - atomic.AddUint64(&c.bigStats.SetBigCalls, 1) - if len(k) > maxKeyLen { - atomic.AddUint64(&c.bigStats.TooBigKeyErrors, 1) - return - } - valueLen := len(v) - valueHash := xxhash.Sum64(v) - - // Split v into chunks with up to 64Kb each. - subkey := getSubkeyBuf() - var i uint64 - for len(v) > 0 { - subkey.B = marshalUint64(subkey.B[:0], valueHash) - subkey.B = marshalUint64(subkey.B, uint64(i)) - i++ - subvalueLen := maxSubvalueLen - if len(v) < subvalueLen { - subvalueLen = len(v) - } - subvalue := v[:subvalueLen] - v = v[subvalueLen:] - c.Set(subkey.B, subvalue) - } - - // Write metavalue, which consists of valueHash and valueLen. - subkey.B = marshalUint64(subkey.B[:0], valueHash) - subkey.B = marshalUint64(subkey.B, uint64(valueLen)) - c.Set(k, subkey.B) - putSubkeyBuf(subkey) -} - -// GetBig searches for the value for the given k, appends it to dst -// and returns the result. -// -// GetBig returns only values stored via SetBig. It doesn't work -// with values stored via other methods. -// -// k contents may be modified after returning from GetBig. -func (c *Cache) GetBig(dst, k []byte) []byte { - atomic.AddUint64(&c.bigStats.GetBigCalls, 1) - subkey := getSubkeyBuf() - defer putSubkeyBuf(subkey) - - // Read and parse metavalue - subkey.B = c.Get(subkey.B[:0], k) - if len(subkey.B) == 0 { - // Nothing found. - return dst - } - if len(subkey.B) != 16 { - atomic.AddUint64(&c.bigStats.InvalidMetavalueErrors, 1) - return dst - } - valueHash := unmarshalUint64(subkey.B) - valueLen := unmarshalUint64(subkey.B[8:]) - - // Collect result from chunks. - dstLen := len(dst) - if n := dstLen + int(valueLen) - cap(dst); n > 0 { - dst = append(dst[:cap(dst)], make([]byte, n)...) - } - dst = dst[:dstLen] - var i uint64 - for uint64(len(dst)-dstLen) < valueLen { - subkey.B = marshalUint64(subkey.B[:0], valueHash) - subkey.B = marshalUint64(subkey.B, uint64(i)) - i++ - dstNew := c.Get(dst, subkey.B) - if len(dstNew) == len(dst) { - // Cannot find subvalue - return dst[:dstLen] - } - dst = dstNew - } - - // Verify the obtained value. - v := dst[dstLen:] - if uint64(len(v)) != valueLen { - atomic.AddUint64(&c.bigStats.InvalidValueLenErrors, 1) - return dst[:dstLen] - } - h := xxhash.Sum64(v) - if h != valueHash { - atomic.AddUint64(&c.bigStats.InvalidValueHashErrors, 1) - return dst[:dstLen] - } - return dst -} - -func getSubkeyBuf() *bytesBuf { - v := subkeyPool.Get() - if v == nil { - return &bytesBuf{} - } - return v.(*bytesBuf) -} - -func putSubkeyBuf(bb *bytesBuf) { - bb.B = bb.B[:0] - subkeyPool.Put(bb) -} - -var subkeyPool sync.Pool - -type bytesBuf struct { - B []byte -} - -func marshalUint64(dst []byte, u uint64) []byte { - return append(dst, byte(u>>56), byte(u>>48), byte(u>>40), byte(u>>32), byte(u>>24), byte(u>>16), byte(u>>8), byte(u)) -} - -func unmarshalUint64(src []byte) uint64 { - _ = src[7] - return uint64(src[0])<<56 | uint64(src[1])<<48 | uint64(src[2])<<40 | uint64(src[3])<<32 | uint64(src[4])<<24 | uint64(src[5])<<16 | uint64(src[6])<<8 | uint64(src[7]) -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go b/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go deleted file mode 100644 index 20a3c02..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/fastcache.go +++ /dev/null @@ -1,415 +0,0 @@ -// Package fastcache implements fast in-memory cache. -// -// The package has been extracted from https://victoriametrics.com/ -package fastcache - -import ( - "fmt" - "sync" - "sync/atomic" - - xxhash "github.com/cespare/xxhash/v2" -) - -const bucketsCount = 512 - -const chunkSize = 64 * 1024 - -const bucketSizeBits = 40 - -const genSizeBits = 64 - bucketSizeBits - -const maxGen = 1<= maxBucketSize { - panic(fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize)) - } - maxChunks := (maxBytes + chunkSize - 1) / chunkSize - b.chunks = make([][]byte, maxChunks) - b.m = make(map[uint64]uint64) - b.Reset() -} - -func (b *bucket) Reset() { - b.mu.Lock() - chunks := b.chunks - for i := range chunks { - putChunk(chunks[i]) - chunks[i] = nil - } - bm := b.m - for k := range bm { - delete(bm, k) - } - b.idx = 0 - b.gen = 1 - atomic.StoreUint64(&b.getCalls, 0) - atomic.StoreUint64(&b.setCalls, 0) - atomic.StoreUint64(&b.misses, 0) - atomic.StoreUint64(&b.collisions, 0) - atomic.StoreUint64(&b.corruptions, 0) - b.mu.Unlock() -} - -func (b *bucket) Clean() { - b.mu.Lock() - bGen := b.gen & ((1 << genSizeBits) - 1) - bIdx := b.idx - bm := b.m - for k, v := range bm { - gen := v >> bucketSizeBits - idx := v & ((1 << bucketSizeBits) - 1) - if gen == bGen && idx < bIdx || gen+1 == bGen && idx >= bIdx || gen == maxGen && bGen == 1 && idx >= bIdx { - continue - } - delete(bm, k) - } - b.mu.Unlock() -} - -func (b *bucket) UpdateStats(s *Stats) { - s.GetCalls += atomic.LoadUint64(&b.getCalls) - s.SetCalls += atomic.LoadUint64(&b.setCalls) - s.Misses += atomic.LoadUint64(&b.misses) - s.Collisions += atomic.LoadUint64(&b.collisions) - s.Corruptions += atomic.LoadUint64(&b.corruptions) - - b.mu.RLock() - s.EntriesCount += uint64(len(b.m)) - for _, chunk := range b.chunks { - s.BytesSize += uint64(cap(chunk)) - } - b.mu.RUnlock() -} - -func (b *bucket) Set(k, v []byte, h uint64) { - setCalls := atomic.AddUint64(&b.setCalls, 1) - if setCalls%(1<<14) == 0 { - b.Clean() - } - - if len(k) >= (1<<16) || len(v) >= (1<<16) { - // Too big key or value - its length cannot be encoded - // with 2 bytes (see below). Skip the entry. - return - } - var kvLenBuf [4]byte - kvLenBuf[0] = byte(uint16(len(k)) >> 8) - kvLenBuf[1] = byte(len(k)) - kvLenBuf[2] = byte(uint16(len(v)) >> 8) - kvLenBuf[3] = byte(len(v)) - kvLen := uint64(len(kvLenBuf) + len(k) + len(v)) - if kvLen >= chunkSize { - // Do not store too big keys and values, since they do not - // fit a chunk. - return - } - - b.mu.Lock() - idx := b.idx - idxNew := idx + kvLen - chunkIdx := idx / chunkSize - chunkIdxNew := idxNew / chunkSize - if chunkIdxNew > chunkIdx { - if chunkIdxNew >= uint64(len(b.chunks)) { - idx = 0 - idxNew = kvLen - chunkIdx = 0 - b.gen++ - if b.gen&((1< 0 { - gen := v >> bucketSizeBits - idx := v & ((1 << bucketSizeBits) - 1) - if gen == bGen && idx < b.idx || gen+1 == bGen && idx >= b.idx || gen == maxGen && bGen == 1 && idx >= b.idx { - chunkIdx := idx / chunkSize - if chunkIdx >= uint64(len(b.chunks)) { - // Corrupted data during the load from file. Just skip it. - atomic.AddUint64(&b.corruptions, 1) - goto end - } - chunk := b.chunks[chunkIdx] - idx %= chunkSize - if idx+4 >= chunkSize { - // Corrupted data during the load from file. Just skip it. - atomic.AddUint64(&b.corruptions, 1) - goto end - } - kvLenBuf := chunk[idx : idx+4] - keyLen := (uint64(kvLenBuf[0]) << 8) | uint64(kvLenBuf[1]) - valLen := (uint64(kvLenBuf[2]) << 8) | uint64(kvLenBuf[3]) - idx += 4 - if idx+keyLen+valLen >= chunkSize { - // Corrupted data during the load from file. Just skip it. - atomic.AddUint64(&b.corruptions, 1) - goto end - } - if string(k) == string(chunk[idx:idx+keyLen]) { - idx += keyLen - if returnDst { - dst = append(dst, chunk[idx:idx+valLen]...) - } - found = true - } else { - atomic.AddUint64(&b.collisions, 1) - } - } - } -end: - b.mu.RUnlock() - if !found { - atomic.AddUint64(&b.misses, 1) - } - return dst, found -} - -func (b *bucket) Del(h uint64) { - b.mu.Lock() - delete(b.m, h) - b.mu.Unlock() -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/file.go b/vendor/github.com/VictoriaMetrics/fastcache/file.go deleted file mode 100644 index bab5484..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/file.go +++ /dev/null @@ -1,419 +0,0 @@ -package fastcache - -import ( - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "runtime" - - "github.com/golang/snappy" -) - -// SaveToFile atomically saves cache data to the given filePath using a single -// CPU core. -// -// SaveToFile may be called concurrently with other operations on the cache. -// -// The saved data may be loaded with LoadFromFile*. -// -// See also SaveToFileConcurrent for faster saving to file. -func (c *Cache) SaveToFile(filePath string) error { - return c.SaveToFileConcurrent(filePath, 1) -} - -// SaveToFileConcurrent saves cache data to the given filePath using concurrency -// CPU cores. -// -// SaveToFileConcurrent may be called concurrently with other operations -// on the cache. -// -// The saved data may be loaded with LoadFromFile*. -// -// See also SaveToFile. -func (c *Cache) SaveToFileConcurrent(filePath string, concurrency int) error { - // Create dir if it doesn't exist. - dir := filepath.Dir(filePath) - if _, err := os.Stat(dir); err != nil { - if !os.IsNotExist(err) { - return fmt.Errorf("cannot stat %q: %s", dir, err) - } - if err := os.MkdirAll(dir, 0755); err != nil { - return fmt.Errorf("cannot create dir %q: %s", dir, err) - } - } - - // Save cache data into a temporary directory. - tmpDir, err := ioutil.TempDir(dir, "fastcache.tmp.") - if err != nil { - return fmt.Errorf("cannot create temporary dir inside %q: %s", dir, err) - } - defer func() { - if tmpDir != "" { - _ = os.RemoveAll(tmpDir) - } - }() - gomaxprocs := runtime.GOMAXPROCS(-1) - if concurrency <= 0 || concurrency > gomaxprocs { - concurrency = gomaxprocs - } - if err := c.save(tmpDir, concurrency); err != nil { - return fmt.Errorf("cannot save cache data to temporary dir %q: %s", tmpDir, err) - } - - // Remove old filePath contents, since os.Rename may return - // error if filePath dir exists. - if err := os.RemoveAll(filePath); err != nil { - return fmt.Errorf("cannot remove old contents at %q: %s", filePath, err) - } - if err := os.Rename(tmpDir, filePath); err != nil { - return fmt.Errorf("cannot move temporary dir %q to %q: %s", tmpDir, filePath, err) - } - tmpDir = "" - return nil -} - -// LoadFromFile loads cache data from the given filePath. -// -// See SaveToFile* for saving cache data to file. -func LoadFromFile(filePath string) (*Cache, error) { - return load(filePath, 0) -} - -// LoadFromFileOrNew tries loading cache data from the given filePath. -// -// The function falls back to creating new cache with the given maxBytes -// capacity if error occurs during loading the cache from file. -func LoadFromFileOrNew(filePath string, maxBytes int) *Cache { - c, err := load(filePath, maxBytes) - if err == nil { - return c - } - return New(maxBytes) -} - -func (c *Cache) save(dir string, workersCount int) error { - if err := saveMetadata(c, dir); err != nil { - return err - } - - // Save buckets by workersCount concurrent workers. - workCh := make(chan int, workersCount) - results := make(chan error) - for i := 0; i < workersCount; i++ { - go func(workerNum int) { - results <- saveBuckets(c.buckets[:], workCh, dir, workerNum) - }(i) - } - // Feed workers with work - for i := range c.buckets[:] { - workCh <- i - } - close(workCh) - - // Read results. - var err error - for i := 0; i < workersCount; i++ { - result := <-results - if result != nil && err == nil { - err = result - } - } - return err -} - -func load(filePath string, maxBytes int) (*Cache, error) { - maxBucketChunks, err := loadMetadata(filePath) - if err != nil { - return nil, err - } - if maxBytes > 0 { - maxBucketBytes := uint64((maxBytes + bucketsCount - 1) / bucketsCount) - expectedBucketChunks := (maxBucketBytes + chunkSize - 1) / chunkSize - if maxBucketChunks != expectedBucketChunks { - return nil, fmt.Errorf("cache file %s contains maxBytes=%d; want %d", filePath, maxBytes, expectedBucketChunks*chunkSize*bucketsCount) - } - } - - // Read bucket files from filePath dir. - d, err := os.Open(filePath) - if err != nil { - return nil, fmt.Errorf("cannot open %q: %s", filePath, err) - } - defer func() { - _ = d.Close() - }() - fis, err := d.Readdir(-1) - if err != nil { - return nil, fmt.Errorf("cannot read files from %q: %s", filePath, err) - } - results := make(chan error) - workersCount := 0 - var c Cache - for _, fi := range fis { - fn := fi.Name() - if fi.IsDir() || !dataFileRegexp.MatchString(fn) { - continue - } - workersCount++ - go func(dataPath string) { - results <- loadBuckets(c.buckets[:], dataPath, maxBucketChunks) - }(filePath + "/" + fn) - } - err = nil - for i := 0; i < workersCount; i++ { - result := <-results - if result != nil && err == nil { - err = result - } - } - if err != nil { - return nil, err - } - // Initialize buckets, which could be missing due to incomplete or corrupted files in the cache. - // It is better initializing such buckets instead of returning error, since the rest of buckets - // contain valid data. - for i := range c.buckets[:] { - b := &c.buckets[i] - if len(b.chunks) == 0 { - b.chunks = make([][]byte, maxBucketChunks) - b.m = make(map[uint64]uint64) - } - } - return &c, nil -} - -func saveMetadata(c *Cache, dir string) error { - metadataPath := dir + "/metadata.bin" - metadataFile, err := os.Create(metadataPath) - if err != nil { - return fmt.Errorf("cannot create %q: %s", metadataPath, err) - } - defer func() { - _ = metadataFile.Close() - }() - maxBucketChunks := uint64(cap(c.buckets[0].chunks)) - if err := writeUint64(metadataFile, maxBucketChunks); err != nil { - return fmt.Errorf("cannot write maxBucketChunks=%d to %q: %s", maxBucketChunks, metadataPath, err) - } - return nil -} - -func loadMetadata(dir string) (uint64, error) { - metadataPath := dir + "/metadata.bin" - metadataFile, err := os.Open(metadataPath) - if err != nil { - return 0, fmt.Errorf("cannot open %q: %s", metadataPath, err) - } - defer func() { - _ = metadataFile.Close() - }() - maxBucketChunks, err := readUint64(metadataFile) - if err != nil { - return 0, fmt.Errorf("cannot read maxBucketChunks from %q: %s", metadataPath, err) - } - if maxBucketChunks == 0 { - return 0, fmt.Errorf("invalid maxBucketChunks=0 read from %q", metadataPath) - } - return maxBucketChunks, nil -} - -var dataFileRegexp = regexp.MustCompile(`^data\.\d+\.bin$`) - -func saveBuckets(buckets []bucket, workCh <-chan int, dir string, workerNum int) error { - dataPath := fmt.Sprintf("%s/data.%d.bin", dir, workerNum) - dataFile, err := os.Create(dataPath) - if err != nil { - return fmt.Errorf("cannot create %q: %s", dataPath, err) - } - defer func() { - _ = dataFile.Close() - }() - zw := snappy.NewBufferedWriter(dataFile) - for bucketNum := range workCh { - if err := writeUint64(zw, uint64(bucketNum)); err != nil { - return fmt.Errorf("cannot write bucketNum=%d to %q: %s", bucketNum, dataPath, err) - } - if err := buckets[bucketNum].Save(zw); err != nil { - return fmt.Errorf("cannot save bucket[%d] to %q: %s", bucketNum, dataPath, err) - } - } - if err := zw.Close(); err != nil { - return fmt.Errorf("cannot close snappy.Writer for %q: %s", dataPath, err) - } - return nil -} - -func loadBuckets(buckets []bucket, dataPath string, maxChunks uint64) error { - dataFile, err := os.Open(dataPath) - if err != nil { - return fmt.Errorf("cannot open %q: %s", dataPath, err) - } - defer func() { - _ = dataFile.Close() - }() - zr := snappy.NewReader(dataFile) - for { - bucketNum, err := readUint64(zr) - if err == io.EOF { - // Reached the end of file. - return nil - } - if bucketNum >= uint64(len(buckets)) { - return fmt.Errorf("unexpected bucketNum read from %q: %d; must be smaller than %d", dataPath, bucketNum, len(buckets)) - } - if err := buckets[bucketNum].Load(zr, maxChunks); err != nil { - return fmt.Errorf("cannot load bucket[%d] from %q: %s", bucketNum, dataPath, err) - } - } -} - -func (b *bucket) Save(w io.Writer) error { - b.Clean() - - b.mu.RLock() - defer b.mu.RUnlock() - - // Store b.idx, b.gen and b.m to w. - - bIdx := b.idx - bGen := b.gen - chunksLen := 0 - for _, chunk := range b.chunks { - if chunk == nil { - break - } - chunksLen++ - } - kvs := make([]byte, 0, 2*8*len(b.m)) - var u64Buf [8]byte - for k, v := range b.m { - binary.LittleEndian.PutUint64(u64Buf[:], k) - kvs = append(kvs, u64Buf[:]...) - binary.LittleEndian.PutUint64(u64Buf[:], v) - kvs = append(kvs, u64Buf[:]...) - } - - if err := writeUint64(w, bIdx); err != nil { - return fmt.Errorf("cannot write b.idx: %s", err) - } - if err := writeUint64(w, bGen); err != nil { - return fmt.Errorf("cannot write b.gen: %s", err) - } - if err := writeUint64(w, uint64(len(kvs))/2/8); err != nil { - return fmt.Errorf("cannot write len(b.m): %s", err) - } - if _, err := w.Write(kvs); err != nil { - return fmt.Errorf("cannot write b.m: %s", err) - } - - // Store b.chunks to w. - if err := writeUint64(w, uint64(chunksLen)); err != nil { - return fmt.Errorf("cannot write len(b.chunks): %s", err) - } - for chunkIdx := 0; chunkIdx < chunksLen; chunkIdx++ { - chunk := b.chunks[chunkIdx][:chunkSize] - if _, err := w.Write(chunk); err != nil { - return fmt.Errorf("cannot write b.chunks[%d]: %s", chunkIdx, err) - } - } - - return nil -} - -func (b *bucket) Load(r io.Reader, maxChunks uint64) error { - if maxChunks == 0 { - return fmt.Errorf("the number of chunks per bucket cannot be zero") - } - bIdx, err := readUint64(r) - if err != nil { - return fmt.Errorf("cannot read b.idx: %s", err) - } - bGen, err := readUint64(r) - if err != nil { - return fmt.Errorf("cannot read b.gen: %s", err) - } - kvsLen, err := readUint64(r) - if err != nil { - return fmt.Errorf("cannot read len(b.m): %s", err) - } - kvsLen *= 2 * 8 - kvs := make([]byte, kvsLen) - if _, err := io.ReadFull(r, kvs); err != nil { - return fmt.Errorf("cannot read b.m: %s", err) - } - m := make(map[uint64]uint64, kvsLen/2/8) - for len(kvs) > 0 { - k := binary.LittleEndian.Uint64(kvs) - kvs = kvs[8:] - v := binary.LittleEndian.Uint64(kvs) - kvs = kvs[8:] - m[k] = v - } - - maxBytes := maxChunks * chunkSize - if maxBytes >= maxBucketSize { - return fmt.Errorf("too big maxBytes=%d; should be smaller than %d", maxBytes, maxBucketSize) - } - chunks := make([][]byte, maxChunks) - chunksLen, err := readUint64(r) - if err != nil { - return fmt.Errorf("cannot read len(b.chunks): %s", err) - } - if chunksLen > uint64(maxChunks) { - return fmt.Errorf("chunksLen=%d cannot exceed maxChunks=%d", chunksLen, maxChunks) - } - currChunkIdx := bIdx / chunkSize - if currChunkIdx > 0 && currChunkIdx >= chunksLen { - return fmt.Errorf("too big bIdx=%d; should be smaller than %d", bIdx, chunksLen*chunkSize) - } - for chunkIdx := uint64(0); chunkIdx < chunksLen; chunkIdx++ { - chunk := getChunk() - chunks[chunkIdx] = chunk - if _, err := io.ReadFull(r, chunk); err != nil { - // Free up allocated chunks before returning the error. - for _, chunk := range chunks { - if chunk != nil { - putChunk(chunk) - } - } - return fmt.Errorf("cannot read b.chunks[%d]: %s", chunkIdx, err) - } - } - // Adjust len for the chunk pointed by currChunkIdx. - if chunksLen > 0 { - chunkLen := bIdx % chunkSize - chunks[currChunkIdx] = chunks[currChunkIdx][:chunkLen] - } - - b.mu.Lock() - for _, chunk := range b.chunks { - putChunk(chunk) - } - b.chunks = chunks - b.m = m - b.idx = bIdx - b.gen = bGen - b.mu.Unlock() - - return nil -} - -func writeUint64(w io.Writer, u uint64) error { - var u64Buf [8]byte - binary.LittleEndian.PutUint64(u64Buf[:], u) - _, err := w.Write(u64Buf[:]) - return err -} - -func readUint64(r io.Reader) (uint64, error) { - var u64Buf [8]byte - if _, err := io.ReadFull(r, u64Buf[:]); err != nil { - return 0, err - } - u := binary.LittleEndian.Uint64(u64Buf[:]) - return u, nil -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/go.mod b/vendor/github.com/VictoriaMetrics/fastcache/go.mod deleted file mode 100644 index 1b53092..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module github.com/VictoriaMetrics/fastcache - -require ( - github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 - github.com/cespare/xxhash/v2 v2.1.1 - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/golang/snappy v0.0.1 - github.com/stretchr/testify v1.3.0 // indirect -) diff --git a/vendor/github.com/VictoriaMetrics/fastcache/go.sum b/vendor/github.com/VictoriaMetrics/fastcache/go.sum deleted file mode 100644 index 4afe0e7..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/go.sum +++ /dev/null @@ -1,14 +0,0 @@ -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8= -github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go b/vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go deleted file mode 100644 index 79a7183..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/malloc_heap.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine windows - -package fastcache - -func getChunk() []byte { - return make([]byte, chunkSize) -} - -func putChunk(chunk []byte) { - // No-op. -} diff --git a/vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go b/vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go deleted file mode 100644 index 424b79b..0000000 --- a/vendor/github.com/VictoriaMetrics/fastcache/malloc_mmap.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build !appengine,!windows - -package fastcache - -import ( - "fmt" - "sync" - "syscall" - "unsafe" -) - -const chunksPerAlloc = 1024 - -var ( - freeChunks []*[chunkSize]byte - freeChunksLock sync.Mutex -) - -func getChunk() []byte { - freeChunksLock.Lock() - if len(freeChunks) == 0 { - // Allocate offheap memory, so GOGC won't take into account cache size. - // This should reduce free memory waste. - data, err := syscall.Mmap(-1, 0, chunkSize*chunksPerAlloc, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE) - if err != nil { - panic(fmt.Errorf("cannot allocate %d bytes via mmap: %s", chunkSize*chunksPerAlloc, err)) - } - for len(data) > 0 { - p := (*[chunkSize]byte)(unsafe.Pointer(&data[0])) - freeChunks = append(freeChunks, p) - data = data[chunkSize:] - } - } - n := len(freeChunks) - 1 - p := freeChunks[n] - freeChunks[n] = nil - freeChunks = freeChunks[:n] - freeChunksLock.Unlock() - return p[:] -} - -func putChunk(chunk []byte) { - if chunk == nil { - return - } - chunk = chunk[:chunkSize] - p := (*[chunkSize]byte)(unsafe.Pointer(&chunk[0])) - - freeChunksLock.Lock() - freeChunks = append(freeChunks, p) - freeChunksLock.Unlock() -} diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE deleted file mode 100644 index 339177b..0000000 --- a/vendor/github.com/beorn7/perks/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (C) 2013 Blake Mizerany - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287..0000000 --- a/vendor/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index d7d14f8..0000000 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,316 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targetMap map[float64]float64) *Stream { - // Convert map to slice to avoid slow iterations on a map. - // ƒ is called on the hot path, so converting the map to a slice - // beforehand results in significant CPU savings. - targets := targetMapToSlice(targetMap) - - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for _, t := range targets { - if t.quantile*s.n <= r { - f = (2 * t.epsilon * r) / t.quantile - } else { - f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -type target struct { - quantile float64 - epsilon float64 -} - -func targetMapToSlice(targetMap map[float64]float64) []target { - targets := make([]target, 0, len(targetMap)) - - for quantile, epsilon := range targetMap { - t := target{ - quantile: quantile, - epsilon: epsilon, - } - targets = append(targets, t) - } - - return targets -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(math.Ceil(float64(l) * q)) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/github.com/cespare/xxhash/v2/.travis.yml b/vendor/github.com/cespare/xxhash/v2/.travis.yml deleted file mode 100644 index c516ea8..0000000 --- a/vendor/github.com/cespare/xxhash/v2/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - "1.x" - - master -env: - - TAGS="" - - TAGS="-tags purego" -script: go test $TAGS -v ./... diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt deleted file mode 100644 index 24b5306..0000000 --- a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md deleted file mode 100644 index 2fd8693..0000000 --- a/vendor/github.com/cespare/xxhash/v2/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) -[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| --- | --- | --- | -| 5 B | 979.66 MB/s | 1291.17 MB/s | -| 100 B | 7475.26 MB/s | 7973.40 MB/s | -| 4 KB | 17573.46 MB/s | 17602.65 MB/s | -| 10 MB | 17131.46 MB/s | 17142.16 MB/s | - -These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using -the following commands under Go 1.11.2: - -``` -$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes' -$ go test -benchtime 10s -bench '/xxhash,direct,bytes' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [FreeCache](https://github.com/coocood/freecache) diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod deleted file mode 100644 index 49f6760..0000000 --- a/vendor/github.com/cespare/xxhash/v2/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/cespare/xxhash/v2 - -go 1.11 diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum deleted file mode 100644 index e69de29..0000000 diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go deleted file mode 100644 index db0b35f..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash.go +++ /dev/null @@ -1,236 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = prime1v + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -prime1v - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(d.mem[d.n:], b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - copy(d.mem[d.n:], b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[32-d.n:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - i, end := 0, d.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(d.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(d.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(d.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - b = b[len(d.mem):] - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go deleted file mode 100644 index ad14b80..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(d *Digest, b []byte) int diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s deleted file mode 100644 index d580e32..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s +++ /dev/null @@ -1,215 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the d pointer. - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT, $0-40 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from d. - MOVQ d+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to d. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // The number of bytes written is CX minus the old base pointer. - SUBQ b_base+8(FP), CX - MOVQ CX, ret+32(FP) - - RET diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go deleted file mode 100644 index 4a5a821..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go deleted file mode 100644 index fc9bea7..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go deleted file mode 100644 index 53bf76e..0000000 --- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Notes: -// -// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ -// for some discussion about these unsafe conversions. -// -// In the future it's possible that compiler optimizations will make these -// unsafe operations unnecessary: https://golang.org/issue/2205. -// -// Both of these wrapper functions still incur function call overhead since they -// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write -// for strings to squeeze out a bit more speed. Mid-stack inlining should -// eventually fix this. - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -func Sum64String(s string) uint64 { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} - -// WriteString adds more data to d. It always returns len(s), nil. -// It may be faster than Write([]byte(s)) by avoiding a copy. -func (d *Digest) WriteString(s string) (n int, err error) { - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return d.Write(b) -} diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/github.com/golang/protobuf/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/github.com/golang/protobuf/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE deleted file mode 100644 index 0f64693..0000000 --- a/vendor/github.com/golang/protobuf/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go deleted file mode 100644 index 3cd3249..0000000 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ /dev/null @@ -1,253 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer deep copy and merge. -// TODO: RawMessage. - -package proto - -import ( - "fmt" - "log" - "reflect" - "strings" -) - -// Clone returns a deep copy of a protocol buffer. -func Clone(src Message) Message { - in := reflect.ValueOf(src) - if in.IsNil() { - return src - } - out := reflect.New(in.Type().Elem()) - dst := out.Interface().(Message) - Merge(dst, src) - return dst -} - -// Merger is the interface representing objects that can merge messages of the same type. -type Merger interface { - // Merge merges src into this message. - // Required and optional fields that are set in src will be set to that value in dst. - // Elements of repeated fields will be appended. - // - // Merge may panic if called with a different argument type than the receiver. - Merge(src Message) -} - -// generatedMerger is the custom merge method that generated protos will have. -// We must add this method since a generate Merge method will conflict with -// many existing protos that have a Merge data field already defined. -type generatedMerger interface { - XXX_Merge(src Message) -} - -// Merge merges src into dst. -// Required and optional fields that are set in src will be set to that value in dst. -// Elements of repeated fields will be appended. -// Merge panics if src and dst are not the same type, or if dst is nil. -func Merge(dst, src Message) { - if m, ok := dst.(Merger); ok { - m.Merge(src) - return - } - - in := reflect.ValueOf(src) - out := reflect.ValueOf(dst) - if out.IsNil() { - panic("proto: nil destination") - } - if in.Type() != out.Type() { - panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) - } - if in.IsNil() { - return // Merge from nil src is a noop - } - if m, ok := dst.(generatedMerger); ok { - m.XXX_Merge(src) - return - } - mergeStruct(out.Elem(), in.Elem()) -} - -func mergeStruct(out, in reflect.Value) { - sprop := GetProperties(in.Type()) - for i := 0; i < in.NumField(); i++ { - f := in.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) - } - - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - uf := in.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return - } - uin := uf.Bytes() - if len(uin) > 0 { - out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) - } -} - -// mergeAny performs a merge between two values of the same type. -// viaPtr indicates whether the values were indirected through a pointer (implying proto2). -// prop is set if this is a struct field (it may be nil). -func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { - if in.Type() == protoMessageType { - if !in.IsNil() { - if out.IsNil() { - out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) - } else { - Merge(out.Interface().(Message), in.Interface().(Message)) - } - } - return - } - switch in.Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - if !viaPtr && isProto3Zero(in) { - return - } - out.Set(in) - case reflect.Interface: - // Probably a oneof field; copy non-nil values. - if in.IsNil() { - return - } - // Allocate destination if it is not set, or set to a different type. - // Otherwise we will merge as normal. - if out.IsNil() || out.Elem().Type() != in.Elem().Type() { - out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) - } - mergeAny(out.Elem(), in.Elem(), false, nil) - case reflect.Map: - if in.Len() == 0 { - return - } - if out.IsNil() { - out.Set(reflect.MakeMap(in.Type())) - } - // For maps with value types of *T or []byte we need to deep copy each value. - elemKind := in.Type().Elem().Kind() - for _, key := range in.MapKeys() { - var val reflect.Value - switch elemKind { - case reflect.Ptr: - val = reflect.New(in.Type().Elem().Elem()) - mergeAny(val, in.MapIndex(key), false, nil) - case reflect.Slice: - val = in.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - default: - val = in.MapIndex(key) - } - out.SetMapIndex(key, val) - } - case reflect.Ptr: - if in.IsNil() { - return - } - if out.IsNil() { - out.Set(reflect.New(in.Elem().Type())) - } - mergeAny(out.Elem(), in.Elem(), true, nil) - case reflect.Slice: - if in.IsNil() { - return - } - if in.Type().Elem().Kind() == reflect.Uint8 { - // []byte is a scalar bytes field, not a repeated field. - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value, and should not - // be merged. - if prop != nil && prop.proto3 && in.Len() == 0 { - return - } - - // Make a deep copy. - // Append to []byte{} instead of []byte(nil) so that we never end up - // with a nil result. - out.SetBytes(append([]byte{}, in.Bytes()...)) - return - } - n := in.Len() - if out.IsNil() { - out.Set(reflect.MakeSlice(in.Type(), 0, n)) - } - switch in.Type().Elem().Kind() { - case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, - reflect.String, reflect.Uint32, reflect.Uint64: - out.Set(reflect.AppendSlice(out, in)) - default: - for i := 0; i < n; i++ { - x := reflect.Indirect(reflect.New(in.Type().Elem())) - mergeAny(x, in.Index(i), false, nil) - out.Set(reflect.Append(out, x)) - } - } - case reflect.Struct: - mergeStruct(out, in) - default: - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to copy %v", in) - } -} - -func mergeExtension(out, in map[int32]Extension) { - for extNum, eIn := range in { - eOut := Extension{desc: eIn.desc} - if eIn.value != nil { - v := reflect.New(reflect.TypeOf(eIn.value)).Elem() - mergeAny(v, reflect.ValueOf(eIn.value), false, nil) - eOut.value = v.Interface() - } - if eIn.enc != nil { - eOut.enc = make([]byte, len(eIn.enc)) - copy(eOut.enc, eIn.enc) - } - - out[extNum] = eOut - } -} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go deleted file mode 100644 index 63b0f08..0000000 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ /dev/null @@ -1,427 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for decoding protocol buffer data to construct in-memory representations. - */ - -import ( - "errors" - "fmt" - "io" -) - -// errOverflow is returned when an integer is too large to be represented. -var errOverflow = errors.New("proto: integer overflow") - -// ErrInternalBadWireType is returned by generated code when an incorrect -// wire type is encountered. It does not get returned to user code. -var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") - -// DecodeVarint reads a varint-encoded integer from the slice. -// It returns the integer and the number of bytes consumed, or -// zero if there is not enough. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func DecodeVarint(buf []byte) (x uint64, n int) { - for shift := uint(0); shift < 64; shift += 7 { - if n >= len(buf) { - return 0, 0 - } - b := uint64(buf[n]) - n++ - x |= (b & 0x7F) << shift - if (b & 0x80) == 0 { - return x, n - } - } - - // The number is too large to represent in a 64-bit value. - return 0, 0 -} - -func (p *Buffer) decodeVarintSlow() (x uint64, err error) { - i := p.index - l := len(p.buf) - - for shift := uint(0); shift < 64; shift += 7 { - if i >= l { - err = io.ErrUnexpectedEOF - return - } - b := p.buf[i] - i++ - x |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - p.index = i - return - } - } - - // The number is too large to represent in a 64-bit value. - err = errOverflow - return -} - -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - i := p.index - buf := p.buf - - if i >= len(buf) { - return 0, io.ErrUnexpectedEOF - } else if buf[i] < 0x80 { - p.index++ - return uint64(buf[i]), nil - } else if len(buf)-i < 10 { - return p.decodeVarintSlow() - } - - var b uint64 - // we already checked the first byte - x = uint64(buf[i]) - 0x80 - i++ - - b = uint64(buf[i]) - i++ - x += b << 7 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 7 - - b = uint64(buf[i]) - i++ - x += b << 14 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 14 - - b = uint64(buf[i]) - i++ - x += b << 21 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 21 - - b = uint64(buf[i]) - i++ - x += b << 28 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 28 - - b = uint64(buf[i]) - i++ - x += b << 35 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 35 - - b = uint64(buf[i]) - i++ - x += b << 42 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 42 - - b = uint64(buf[i]) - i++ - x += b << 49 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 49 - - b = uint64(buf[i]) - i++ - x += b << 56 - if b&0x80 == 0 { - goto done - } - x -= 0x80 << 56 - - b = uint64(buf[i]) - i++ - x += b << 63 - if b&0x80 == 0 { - goto done - } - - return 0, errOverflow - -done: - p.index = i - return x, nil -} - -// DecodeFixed64 reads a 64-bit integer from the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) DecodeFixed64() (x uint64, err error) { - // x, err already 0 - i := p.index + 8 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-8]) - x |= uint64(p.buf[i-7]) << 8 - x |= uint64(p.buf[i-6]) << 16 - x |= uint64(p.buf[i-5]) << 24 - x |= uint64(p.buf[i-4]) << 32 - x |= uint64(p.buf[i-3]) << 40 - x |= uint64(p.buf[i-2]) << 48 - x |= uint64(p.buf[i-1]) << 56 - return -} - -// DecodeFixed32 reads a 32-bit integer from the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) DecodeFixed32() (x uint64, err error) { - // x, err already 0 - i := p.index + 4 - if i < 0 || i > len(p.buf) { - err = io.ErrUnexpectedEOF - return - } - p.index = i - - x = uint64(p.buf[i-4]) - x |= uint64(p.buf[i-3]) << 8 - x |= uint64(p.buf[i-2]) << 16 - x |= uint64(p.buf[i-1]) << 24 - return -} - -// DecodeZigzag64 reads a zigzag-encoded 64-bit integer -// from the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) DecodeZigzag64() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) - return -} - -// DecodeZigzag32 reads a zigzag-encoded 32-bit integer -// from the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) DecodeZigzag32() (x uint64, err error) { - x, err = p.DecodeVarint() - if err != nil { - return - } - x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) - return -} - -// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { - n, err := p.DecodeVarint() - if err != nil { - return nil, err - } - - nb := int(n) - if nb < 0 { - return nil, fmt.Errorf("proto: bad byte length %d", nb) - } - end := p.index + nb - if end < p.index || end > len(p.buf) { - return nil, io.ErrUnexpectedEOF - } - - if !alloc { - // todo: check if can get more uses of alloc=false - buf = p.buf[p.index:end] - p.index += nb - return - } - - buf = make([]byte, nb) - copy(buf, p.buf[p.index:]) - p.index += nb - return -} - -// DecodeStringBytes reads an encoded string from the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) DecodeStringBytes() (s string, err error) { - buf, err := p.DecodeRawBytes(false) - if err != nil { - return - } - return string(buf), nil -} - -// Unmarshaler is the interface representing objects that can -// unmarshal themselves. The argument points to data that may be -// overwritten, so implementations should not keep references to the -// buffer. -// Unmarshal implementations should not clear the receiver. -// Any unmarshaled data should be merged into the receiver. -// Callers of Unmarshal that do not want to retain existing data -// should Reset the receiver before calling Unmarshal. -type Unmarshaler interface { - Unmarshal([]byte) error -} - -// newUnmarshaler is the interface representing objects that can -// unmarshal themselves. The semantics are identical to Unmarshaler. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newUnmarshaler interface { - XXX_Unmarshal([]byte) error -} - -// Unmarshal parses the protocol buffer representation in buf and places the -// decoded result in pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// Unmarshal resets pb before starting to unmarshal, so any -// existing data in pb is always removed. Use UnmarshalMerge -// to preserve and append to existing data. -func Unmarshal(buf []byte, pb Message) error { - pb.Reset() - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// UnmarshalMerge parses the protocol buffer representation in buf and -// writes the decoded result to pb. If the struct underlying pb does not match -// the data in buf, the results can be unpredictable. -// -// UnmarshalMerge merges into existing data in pb. -// Most code should use Unmarshal instead. -func UnmarshalMerge(buf []byte, pb Message) error { - if u, ok := pb.(newUnmarshaler); ok { - return u.XXX_Unmarshal(buf) - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - return u.Unmarshal(buf) - } - return NewBuffer(buf).Unmarshal(pb) -} - -// DecodeMessage reads a count-delimited message from the Buffer. -func (p *Buffer) DecodeMessage(pb Message) error { - enc, err := p.DecodeRawBytes(false) - if err != nil { - return err - } - return NewBuffer(enc).Unmarshal(pb) -} - -// DecodeGroup reads a tag-delimited group from the Buffer. -// StartGroup tag is already consumed. This function consumes -// EndGroup tag. -func (p *Buffer) DecodeGroup(pb Message) error { - b := p.buf[p.index:] - x, y := findEndGroup(b) - if x < 0 { - return io.ErrUnexpectedEOF - } - err := Unmarshal(b[:x], pb) - p.index += y - return err -} - -// Unmarshal parses the protocol buffer representation in the -// Buffer and places the decoded result in pb. If the struct -// underlying pb does not match the data in the buffer, the results can be -// unpredictable. -// -// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. -func (p *Buffer) Unmarshal(pb Message) error { - // If the object can unmarshal itself, let it. - if u, ok := pb.(newUnmarshaler); ok { - err := u.XXX_Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - if u, ok := pb.(Unmarshaler); ok { - // NOTE: The history of proto have unfortunately been inconsistent - // whether Unmarshaler should or should not implicitly clear itself. - // Some implementations do, most do not. - // Thus, calling this here may or may not do what people want. - // - // See https://github.com/golang/protobuf/issues/424 - err := u.Unmarshal(p.buf[p.index:]) - p.index = len(p.buf) - return err - } - - // Slow workaround for messages that aren't Unmarshalers. - // This includes some hand-coded .pb.go files and - // bootstrap protos. - // TODO: fix all of those and then add Unmarshal to - // the Message interface. Then: - // The cast above and code below can be deleted. - // The old unmarshaler can be deleted. - // Clients can call Unmarshal directly (can already do that, actually). - var info InternalMessageInfo - err := info.Unmarshal(pb, p.buf[p.index:]) - p.index = len(p.buf) - return err -} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go deleted file mode 100644 index 35b882c..0000000 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ /dev/null @@ -1,63 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import "errors" - -// Deprecated: do not use. -type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } - -// Deprecated: do not use. -func GetStats() Stats { return Stats{} } - -// Deprecated: do not use. -func MarshalMessageSet(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSet([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func MarshalMessageSetJSON(interface{}) ([]byte, error) { - return nil, errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func UnmarshalMessageSetJSON([]byte, interface{}) error { - return errors.New("proto: not implemented") -} - -// Deprecated: do not use. -func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go deleted file mode 100644 index dea2617..0000000 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ /dev/null @@ -1,350 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - -// DiscardUnknown recursively discards all unknown fields from this message -// and all embedded messages. -// -// When unmarshaling a message with unrecognized fields, the tags and values -// of such fields are preserved in the Message. This allows a later call to -// marshal to be able to produce a message that continues to have those -// unrecognized fields. To avoid this, DiscardUnknown is used to -// explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. -func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di - } - return di -} - -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) - } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } - } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - vf := v.Field(i) - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } - } -} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go deleted file mode 100644 index 3abfed2..0000000 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ /dev/null @@ -1,203 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "errors" - "reflect" -) - -var ( - // errRepeatedHasNil is the error returned if Marshal is called with - // a struct with a repeated field containing a nil element. - errRepeatedHasNil = errors.New("proto: repeated field has nil element") - - // errOneofHasNil is the error returned if Marshal is called with - // a struct with a oneof field containing a nil element. - errOneofHasNil = errors.New("proto: oneof field has nil value") - - // ErrNil is the error returned if Marshal is called with nil. - ErrNil = errors.New("proto: Marshal called with nil") - - // ErrTooLarge is the error returned if Marshal is called with a - // message that encodes to >2GB. - ErrTooLarge = errors.New("proto: message encodes to over 2 GB") -) - -// The fundamental encoders that put bytes on the wire. -// Those that take integer types all accept uint64 and are -// therefore of type valueEncoder. - -const maxVarintBytes = 10 // maximum length of a varint - -// EncodeVarint returns the varint encoding of x. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -// Not used by the package itself, but helpful to clients -// wishing to use the same encoding. -func EncodeVarint(x uint64) []byte { - var buf [maxVarintBytes]byte - var n int - for n = 0; x > 127; n++ { - buf[n] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - buf[n] = uint8(x) - n++ - return buf[0:n] -} - -// EncodeVarint writes a varint-encoded integer to the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) EncodeVarint(x uint64) error { - for x >= 1<<7 { - p.buf = append(p.buf, uint8(x&0x7f|0x80)) - x >>= 7 - } - p.buf = append(p.buf, uint8(x)) - return nil -} - -// SizeVarint returns the varint encoding size of an integer. -func SizeVarint(x uint64) int { - switch { - case x < 1<<7: - return 1 - case x < 1<<14: - return 2 - case x < 1<<21: - return 3 - case x < 1<<28: - return 4 - case x < 1<<35: - return 5 - case x < 1<<42: - return 6 - case x < 1<<49: - return 7 - case x < 1<<56: - return 8 - case x < 1<<63: - return 9 - } - return 10 -} - -// EncodeFixed64 writes a 64-bit integer to the Buffer. -// This is the format for the -// fixed64, sfixed64, and double protocol buffer types. -func (p *Buffer) EncodeFixed64(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24), - uint8(x>>32), - uint8(x>>40), - uint8(x>>48), - uint8(x>>56)) - return nil -} - -// EncodeFixed32 writes a 32-bit integer to the Buffer. -// This is the format for the -// fixed32, sfixed32, and float protocol buffer types. -func (p *Buffer) EncodeFixed32(x uint64) error { - p.buf = append(p.buf, - uint8(x), - uint8(x>>8), - uint8(x>>16), - uint8(x>>24)) - return nil -} - -// EncodeZigzag64 writes a zigzag-encoded 64-bit integer -// to the Buffer. -// This is the format used for the sint64 protocol buffer type. -func (p *Buffer) EncodeZigzag64(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} - -// EncodeZigzag32 writes a zigzag-encoded 32-bit integer -// to the Buffer. -// This is the format used for the sint32 protocol buffer type. -func (p *Buffer) EncodeZigzag32(x uint64) error { - // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - -// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. -// This is the format used for the bytes protocol buffer -// type and for embedded messages. -func (p *Buffer) EncodeRawBytes(b []byte) error { - p.EncodeVarint(uint64(len(b))) - p.buf = append(p.buf, b...) - return nil -} - -// EncodeStringBytes writes an encoded string to the Buffer. -// This is the format used for the proto2 string type. -func (p *Buffer) EncodeStringBytes(s string) error { - p.EncodeVarint(uint64(len(s))) - p.buf = append(p.buf, s...) - return nil -} - -// Marshaler is the interface representing objects that can marshal themselves. -type Marshaler interface { - Marshal() ([]byte, error) -} - -// EncodeMessage writes the protocol buffer to the Buffer, -// prefixed by a varint-encoded length. -func (p *Buffer) EncodeMessage(pb Message) error { - siz := Size(pb) - p.EncodeVarint(uint64(siz)) - return p.Marshal(pb) -} - -// All protocol buffer fields are nillable, but be careful. -func isNil(v reflect.Value) bool { - switch v.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return v.IsNil() - } - return false -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go deleted file mode 100644 index f9b6e41..0000000 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ /dev/null @@ -1,301 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2011 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Protocol buffer comparison. - -package proto - -import ( - "bytes" - "log" - "reflect" - "strings" -) - -/* -Equal returns true iff protocol buffers a and b are equal. -The arguments must both be pointers to protocol buffer structs. - -Equality is defined in this way: - - Two messages are equal iff they are the same type, - corresponding fields are equal, unknown field sets - are equal, and extensions sets are equal. - - Two set scalar fields are equal iff their values are equal. - If the fields are of a floating-point type, remember that - NaN != x for all x, including NaN. If the message is defined - in a proto3 .proto file, fields are not "set"; specifically, - zero length proto3 "bytes" fields are equal (nil == {}). - - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal. Note a "bytes" field, - although represented by []byte, is not a repeated field and the - rule for the scalar fields described above applies. - - Two unset fields are equal. - - Two unknown field sets are equal if their current - encoded state is equal. - - Two extension sets are equal iff they have corresponding - elements that are pairwise equal. - - Two map fields are equal iff their lengths are the same, - and they contain the same set of elements. Zero-length map - fields are equal. - - Every other combination of things are not equal. - -The return value is undefined if a and b are not protocol buffers. -*/ -func Equal(a, b Message) bool { - if a == nil || b == nil { - return a == b - } - v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) - if v1.Type() != v2.Type() { - return false - } - if v1.Kind() == reflect.Ptr { - if v1.IsNil() { - return v2.IsNil() - } - if v2.IsNil() { - return false - } - v1, v2 = v1.Elem(), v2.Elem() - } - if v1.Kind() != reflect.Struct { - return false - } - return equalStruct(v1, v2) -} - -// v1 and v2 are known to have the same type. -func equalStruct(v1, v2 reflect.Value) bool { - sprop := GetProperties(v1.Type()) - for i := 0; i < v1.NumField(); i++ { - f := v1.Type().Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - f1, f2 := v1.Field(i), v2.Field(i) - if f.Type.Kind() == reflect.Ptr { - if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { - // both unset - continue - } else if n1 != n2 { - // set/unset mismatch - return false - } - f1, f2 = f1.Elem(), f2.Elem() - } - if !equalAny(f1, f2, sprop.Prop[i]) { - return false - } - } - - if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_InternalExtensions") - if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { - return false - } - } - - if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { - em2 := v2.FieldByName("XXX_extensions") - if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { - return false - } - } - - uf := v1.FieldByName("XXX_unrecognized") - if !uf.IsValid() { - return true - } - - u1 := uf.Bytes() - u2 := v2.FieldByName("XXX_unrecognized").Bytes() - return bytes.Equal(u1, u2) -} - -// v1 and v2 are known to have the same type. -// prop may be nil. -func equalAny(v1, v2 reflect.Value, prop *Properties) bool { - if v1.Type() == protoMessageType { - m1, _ := v1.Interface().(Message) - m2, _ := v2.Interface().(Message) - return Equal(m1, m2) - } - switch v1.Kind() { - case reflect.Bool: - return v1.Bool() == v2.Bool() - case reflect.Float32, reflect.Float64: - return v1.Float() == v2.Float() - case reflect.Int32, reflect.Int64: - return v1.Int() == v2.Int() - case reflect.Interface: - // Probably a oneof field; compare the inner values. - n1, n2 := v1.IsNil(), v2.IsNil() - if n1 || n2 { - return n1 == n2 - } - e1, e2 := v1.Elem(), v2.Elem() - if e1.Type() != e2.Type() { - return false - } - return equalAny(e1, e2, nil) - case reflect.Map: - if v1.Len() != v2.Len() { - return false - } - for _, key := range v1.MapKeys() { - val2 := v2.MapIndex(key) - if !val2.IsValid() { - // This key was not found in the second map. - return false - } - if !equalAny(v1.MapIndex(key), val2, nil) { - return false - } - } - return true - case reflect.Ptr: - // Maps may have nil values in them, so check for nil. - if v1.IsNil() && v2.IsNil() { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return equalAny(v1.Elem(), v2.Elem(), prop) - case reflect.Slice: - if v1.Type().Elem().Kind() == reflect.Uint8 { - // short circuit: []byte - - // Edge case: if this is in a proto3 message, a zero length - // bytes field is considered the zero value. - if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { - return true - } - if v1.IsNil() != v2.IsNil() { - return false - } - return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) - } - - if v1.Len() != v2.Len() { - return false - } - for i := 0; i < v1.Len(); i++ { - if !equalAny(v1.Index(i), v2.Index(i), prop) { - return false - } - } - return true - case reflect.String: - return v1.Interface().(string) == v2.Interface().(string) - case reflect.Struct: - return equalStruct(v1, v2) - case reflect.Uint32, reflect.Uint64: - return v1.Uint() == v2.Uint() - } - - // unknown type, so not a protocol buffer - log.Printf("proto: don't know how to compare %v", v1) - return false -} - -// base is the struct type that the extensions are based on. -// x1 and x2 are InternalExtensions. -func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { - em1, _ := x1.extensionsRead() - em2, _ := x2.extensionsRead() - return equalExtMap(base, em1, em2) -} - -func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { - if len(em1) != len(em2) { - return false - } - - for extNum, e1 := range em1 { - e2, ok := em2[extNum] - if !ok { - return false - } - - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) - - if m1 == nil && m2 == nil { - // Both have only encoded form. - if bytes.Equal(e1.enc, e2.enc) { - continue - } - // The bytes are different, but the extensions might still be - // equal. We need to decode them to compare. - } - - if m1 != nil && m2 != nil { - // Both are unencoded. - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - continue - } - - // At least one is encoded. To do a semantically correct comparison - // we need to unmarshal them first. - var desc *ExtensionDesc - if m := extensionMaps[base]; m != nil { - desc = m[extNum] - } - if desc == nil { - // If both have only encoded form and the bytes are the same, - // it is handled above. We get here when the bytes are different. - // We don't know how to decode it, so just compare them as byte - // slices. - log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - return false - } - var err error - if m1 == nil { - m1, err = decodeExtension(e1.enc, desc) - } - if m2 == nil && err == nil { - m2, err = decodeExtension(e2.enc, desc) - } - if err != nil { - // The encoded form is invalid. - log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) - return false - } - if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { - return false - } - } - - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go deleted file mode 100644 index fa88add..0000000 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ /dev/null @@ -1,607 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Types and routines for supporting protocol buffer extensions. - */ - -import ( - "errors" - "fmt" - "io" - "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} - -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} - -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} - -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} - -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} - -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} - -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} - -var errNotExtendable = errors.New("proto: not an extendable proto.Message") - -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension - } -} - -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension - }) - e.p.extensionMap = make(map[int32]Extension) - } - return e.p.extensionMap -} - -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil - } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte -} - -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { - return - } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} - -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { - return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false - } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok -} - -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { - return - } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) -} - -// GetExtension retrieves a proto2 extended field from pb. -// -// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), -// then GetExtension parses the encoded field and returns a Go value of the specified type. -// If the field is not present, then the default value is returned (if one is specified), -// otherwise ErrMissingExtension is reported. -// -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err - } - } - - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil - } - - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil - } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err - } - - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default - return nil, ErrMissingExtension - } - - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension - } - - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } - - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) - } - return value.Interface(), nil -} - -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } - } - return value.Interface(), nil -} - -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } - if err != nil { - return - } - } - return -} - -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - registeredExtensions := RegisteredExtensions(pb) - - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil - } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } - } - - extensions = append(extensions, desc) - } - return extensions, nil -} - -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) - } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} - return nil -} - -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { - return - } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) - } -} - -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. - -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) - -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m - } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) - } - m[desc.Field] = desc -} - -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] -} - -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - } - return v -} - -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - } - } - return v -} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go deleted file mode 100644 index fdd328b..0000000 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ /dev/null @@ -1,965 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package proto converts data structures to and from the wire format of -protocol buffers. It works in concert with the Go source code generated -for .proto files by the protocol compiler. - -A summary of the properties of the protocol buffer interface -for a protocol buffer variable v: - - - Names are turned from camel_case to CamelCase for export. - - There are no methods on v to set fields; just treat - them as structure fields. - - There are getters that return a field's value if set, - and return the field's default value if unset. - The getters work even if the receiver is a nil message. - - The zero value for a struct is its correct initialization state. - All desired fields must be set before marshaling. - - A Reset() method will restore a protobuf struct to its zero state. - - Non-repeated fields are pointers to the values; nil means unset. - That is, optional or required field int32 f becomes F *int32. - - Repeated fields are slices. - - Helper functions are available to aid the setting of fields. - msg.Foo = proto.String("hello") // set field - - Constants are defined to hold the default values of all fields that - have them. They have the form Default_StructName_FieldName. - Because the getter methods handle defaulted values, - direct use of these constants should be rare. - - Enums are given type names and maps from names to values. - Enum values are prefixed by the enclosing message's name, or by the - enum's type name if it is a top-level enum. Enum types have a String - method, and a Enum method to assist in message construction. - - Nested messages, groups and enums have type names prefixed with the name of - the surrounding message type. - - Extensions are given descriptor names that start with E_, - followed by an underscore-delimited list of the nested messages - that contain it (if any) followed by the CamelCased name of the - extension field itself. HasExtension, ClearExtension, GetExtension - and SetExtension are functions for manipulating extensions. - - Oneof field sets are given a single field in their message, - with distinguished wrapper types for each possible field value. - - Marshal and Unmarshal are functions to encode and decode the wire format. - -When the .proto file specifies `syntax="proto3"`, there are some differences: - - - Non-repeated fields of non-message type are values instead of pointers. - - Enum types do not get an Enum method. - -The simplest way to describe this is to see an example. -Given file test.proto, containing - - package example; - - enum FOO { X = 17; } - - message Test { - required string label = 1; - optional int32 type = 2 [default=77]; - repeated int64 reps = 3; - optional group OptionalGroup = 4 { - required string RequiredField = 5; - } - oneof union { - int32 number = 6; - string name = 7; - } - } - -The resulting file, test.pb.go, is: - - package example - - import proto "github.com/golang/protobuf/proto" - import math "math" - - type FOO int32 - const ( - FOO_X FOO = 17 - ) - var FOO_name = map[int32]string{ - 17: "X", - } - var FOO_value = map[string]int32{ - "X": 17, - } - - func (x FOO) Enum() *FOO { - p := new(FOO) - *p = x - return p - } - func (x FOO) String() string { - return proto.EnumName(FOO_name, int32(x)) - } - func (x *FOO) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FOO_value, data) - if err != nil { - return err - } - *x = FOO(value) - return nil - } - - type Test struct { - Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` - Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` - Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` - Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` - // Types that are valid to be assigned to Union: - // *Test_Number - // *Test_Name - Union isTest_Union `protobuf_oneof:"union"` - XXX_unrecognized []byte `json:"-"` - } - func (m *Test) Reset() { *m = Test{} } - func (m *Test) String() string { return proto.CompactTextString(m) } - func (*Test) ProtoMessage() {} - - type isTest_Union interface { - isTest_Union() - } - - type Test_Number struct { - Number int32 `protobuf:"varint,6,opt,name=number"` - } - type Test_Name struct { - Name string `protobuf:"bytes,7,opt,name=name"` - } - - func (*Test_Number) isTest_Union() {} - func (*Test_Name) isTest_Union() {} - - func (m *Test) GetUnion() isTest_Union { - if m != nil { - return m.Union - } - return nil - } - const Default_Test_Type int32 = 77 - - func (m *Test) GetLabel() string { - if m != nil && m.Label != nil { - return *m.Label - } - return "" - } - - func (m *Test) GetType() int32 { - if m != nil && m.Type != nil { - return *m.Type - } - return Default_Test_Type - } - - func (m *Test) GetOptionalgroup() *Test_OptionalGroup { - if m != nil { - return m.Optionalgroup - } - return nil - } - - type Test_OptionalGroup struct { - RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` - } - func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } - func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } - - func (m *Test_OptionalGroup) GetRequiredField() string { - if m != nil && m.RequiredField != nil { - return *m.RequiredField - } - return "" - } - - func (m *Test) GetNumber() int32 { - if x, ok := m.GetUnion().(*Test_Number); ok { - return x.Number - } - return 0 - } - - func (m *Test) GetName() string { - if x, ok := m.GetUnion().(*Test_Name); ok { - return x.Name - } - return "" - } - - func init() { - proto.RegisterEnum("example.FOO", FOO_name, FOO_value) - } - -To create and play with a Test object: - - package main - - import ( - "log" - - "github.com/golang/protobuf/proto" - pb "./example.pb" - ) - - func main() { - test := &pb.Test{ - Label: proto.String("hello"), - Type: proto.Int32(17), - Reps: []int64{1, 2, 3}, - Optionalgroup: &pb.Test_OptionalGroup{ - RequiredField: proto.String("good bye"), - }, - Union: &pb.Test_Name{"fred"}, - } - data, err := proto.Marshal(test) - if err != nil { - log.Fatal("marshaling error: ", err) - } - newTest := &pb.Test{} - err = proto.Unmarshal(data, newTest) - if err != nil { - log.Fatal("unmarshaling error: ", err) - } - // Now test and newTest contain the same data. - if test.GetLabel() != newTest.GetLabel() { - log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) - } - // Use a type switch to determine which oneof was set. - switch u := test.Union.(type) { - case *pb.Test_Number: // u.Number contains the number. - case *pb.Test_Name: // u.Name contains the string. - } - // etc. - } -*/ -package proto - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "sort" - "strconv" - "sync" -) - -// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. -// Marshal reports this when a required field is not initialized. -// Unmarshal reports this when a required field is missing from the wire data. -type RequiredNotSetError struct{ field string } - -func (e *RequiredNotSetError) Error() string { - if e.field == "" { - return fmt.Sprintf("proto: required field not set") - } - return fmt.Sprintf("proto: required field %q not set", e.field) -} -func (e *RequiredNotSetError) RequiredNotSet() bool { - return true -} - -type invalidUTF8Error struct{ field string } - -func (e *invalidUTF8Error) Error() string { - if e.field == "" { - return "proto: invalid UTF-8 detected" - } - return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) -} -func (e *invalidUTF8Error) InvalidUTF8() bool { - return true -} - -// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. -// This error should not be exposed to the external API as such errors should -// be recreated with the field information. -var errInvalidUTF8 = &invalidUTF8Error{} - -// isNonFatal reports whether the error is either a RequiredNotSet error -// or a InvalidUTF8 error. -func isNonFatal(err error) bool { - if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { - return true - } - if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { - return true - } - return false -} - -type nonFatal struct{ E error } - -// Merge merges err into nf and reports whether it was successful. -// Otherwise it returns false for any fatal non-nil errors. -func (nf *nonFatal) Merge(err error) (ok bool) { - if err == nil { - return true // not an error - } - if !isNonFatal(err) { - return false // fatal error - } - if nf.E == nil { - nf.E = err // store first instance of non-fatal error - } - return true -} - -// Message is implemented by generated protocol buffer messages. -type Message interface { - Reset() - String() string - ProtoMessage() -} - -// A Buffer is a buffer manager for marshaling and unmarshaling -// protocol buffers. It may be reused between invocations to -// reduce memory usage. It is not necessary to use a Buffer; -// the global functions Marshal and Unmarshal create a -// temporary Buffer and are fine for most applications. -type Buffer struct { - buf []byte // encode/decode byte stream - index int // read point - - deterministic bool -} - -// NewBuffer allocates a new Buffer and initializes its internal data to -// the contents of the argument slice. -func NewBuffer(e []byte) *Buffer { - return &Buffer{buf: e} -} - -// Reset resets the Buffer, ready for marshaling a new protocol buffer. -func (p *Buffer) Reset() { - p.buf = p.buf[0:0] // for reading/writing - p.index = 0 // for reading -} - -// SetBuf replaces the internal buffer with the slice, -// ready for unmarshaling the contents of the slice. -func (p *Buffer) SetBuf(s []byte) { - p.buf = s - p.index = 0 -} - -// Bytes returns the contents of the Buffer. -func (p *Buffer) Bytes() []byte { return p.buf } - -// SetDeterministic sets whether to use deterministic serialization. -// -// Deterministic serialization guarantees that for a given binary, equal -// messages will always be serialized to the same bytes. This implies: -// -// - Repeated serialization of a message will return the same bytes. -// - Different processes of the same binary (which may be executing on -// different machines) will serialize equal messages to the same bytes. -// -// Note that the deterministic serialization is NOT canonical across -// languages. It is not guaranteed to remain stable over time. It is unstable -// across different builds with schema changes due to unknown fields. -// Users who need canonical serialization (e.g., persistent storage in a -// canonical form, fingerprinting, etc.) should define their own -// canonicalization specification and implement their own serializer rather -// than relying on this API. -// -// If deterministic serialization is requested, map entries will be sorted -// by keys in lexographical order. This is an implementation detail and -// subject to change. -func (p *Buffer) SetDeterministic(deterministic bool) { - p.deterministic = deterministic -} - -/* - * Helper routines for simplifying the creation of optional fields of basic type. - */ - -// Bool is a helper routine that allocates a new bool value -// to store v and returns a pointer to it. -func Bool(v bool) *bool { - return &v -} - -// Int32 is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it. -func Int32(v int32) *int32 { - return &v -} - -// Int is a helper routine that allocates a new int32 value -// to store v and returns a pointer to it, but unlike Int32 -// its argument value is an int. -func Int(v int) *int32 { - p := new(int32) - *p = int32(v) - return p -} - -// Int64 is a helper routine that allocates a new int64 value -// to store v and returns a pointer to it. -func Int64(v int64) *int64 { - return &v -} - -// Float32 is a helper routine that allocates a new float32 value -// to store v and returns a pointer to it. -func Float32(v float32) *float32 { - return &v -} - -// Float64 is a helper routine that allocates a new float64 value -// to store v and returns a pointer to it. -func Float64(v float64) *float64 { - return &v -} - -// Uint32 is a helper routine that allocates a new uint32 value -// to store v and returns a pointer to it. -func Uint32(v uint32) *uint32 { - return &v -} - -// Uint64 is a helper routine that allocates a new uint64 value -// to store v and returns a pointer to it. -func Uint64(v uint64) *uint64 { - return &v -} - -// String is a helper routine that allocates a new string value -// to store v and returns a pointer to it. -func String(v string) *string { - return &v -} - -// EnumName is a helper function to simplify printing protocol buffer enums -// by name. Given an enum map and a value, it returns a useful string. -func EnumName(m map[int32]string, v int32) string { - s, ok := m[v] - if ok { - return s - } - return strconv.Itoa(int(v)) -} - -// UnmarshalJSONEnum is a helper function to simplify recovering enum int values -// from their JSON-encoded representation. Given a map from the enum's symbolic -// names to its int values, and a byte buffer containing the JSON-encoded -// value, it returns an int32 that can be cast to the enum type by the caller. -// -// The function can deal with both JSON representations, numeric and symbolic. -func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { - if data[0] == '"' { - // New style: enums are strings. - var repr string - if err := json.Unmarshal(data, &repr); err != nil { - return -1, err - } - val, ok := m[repr] - if !ok { - return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) - } - return val, nil - } - // Old style: enums are ints. - var val int32 - if err := json.Unmarshal(data, &val); err != nil { - return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) - } - return val, nil -} - -// DebugPrint dumps the encoded data in b in a debugging format with a header -// including the string s. Used in testing but made available for general debugging. -func (p *Buffer) DebugPrint(s string, b []byte) { - var u uint64 - - obuf := p.buf - index := p.index - p.buf = b - p.index = 0 - depth := 0 - - fmt.Printf("\n--- %s ---\n", s) - -out: - for { - for i := 0; i < depth; i++ { - fmt.Print(" ") - } - - index := p.index - if index == len(p.buf) { - break - } - - op, err := p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: fetching op err %v\n", index, err) - break out - } - tag := op >> 3 - wire := op & 7 - - switch wire { - default: - fmt.Printf("%3d: t=%3d unknown wire=%d\n", - index, tag, wire) - break out - - case WireBytes: - var r []byte - - r, err = p.DecodeRawBytes(false) - if err != nil { - break out - } - fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) - if len(r) <= 6 { - for i := 0; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } else { - for i := 0; i < 3; i++ { - fmt.Printf(" %.2x", r[i]) - } - fmt.Printf(" ..") - for i := len(r) - 3; i < len(r); i++ { - fmt.Printf(" %.2x", r[i]) - } - } - fmt.Printf("\n") - - case WireFixed32: - u, err = p.DecodeFixed32() - if err != nil { - fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) - - case WireFixed64: - u, err = p.DecodeFixed64() - if err != nil { - fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) - - case WireVarint: - u, err = p.DecodeVarint() - if err != nil { - fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) - break out - } - fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) - - case WireStartGroup: - fmt.Printf("%3d: t=%3d start\n", index, tag) - depth++ - - case WireEndGroup: - depth-- - fmt.Printf("%3d: t=%3d end\n", index, tag) - } - } - - if depth != 0 { - fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) - } - fmt.Printf("\n") - - p.buf = obuf - p.index = index -} - -// SetDefaults sets unset protocol buffer fields to their default values. -// It only modifies fields that are both unset and have defined defaults. -// It recursively sets default values in any non-nil sub-messages. -func SetDefaults(pb Message) { - setDefaults(reflect.ValueOf(pb), true, false) -} - -// v is a pointer to a struct. -func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() - - defaultMu.RLock() - dm, ok := defaults[v.Type()] - defaultMu.RUnlock() - if !ok { - dm = buildDefaultMessage(v.Type()) - defaultMu.Lock() - defaults[v.Type()] = dm - defaultMu.Unlock() - } - - for _, sf := range dm.scalars { - f := v.Field(sf.index) - if !f.IsNil() { - // field already set - continue - } - dv := sf.value - if dv == nil && !zeros { - // no explicit default, and don't want to set zeros - continue - } - fptr := f.Addr().Interface() // **T - // TODO: Consider batching the allocations we do here. - switch sf.kind { - case reflect.Bool: - b := new(bool) - if dv != nil { - *b = dv.(bool) - } - *(fptr.(**bool)) = b - case reflect.Float32: - f := new(float32) - if dv != nil { - *f = dv.(float32) - } - *(fptr.(**float32)) = f - case reflect.Float64: - f := new(float64) - if dv != nil { - *f = dv.(float64) - } - *(fptr.(**float64)) = f - case reflect.Int32: - // might be an enum - if ft := f.Type(); ft != int32PtrType { - // enum - f.Set(reflect.New(ft.Elem())) - if dv != nil { - f.Elem().SetInt(int64(dv.(int32))) - } - } else { - // int32 field - i := new(int32) - if dv != nil { - *i = dv.(int32) - } - *(fptr.(**int32)) = i - } - case reflect.Int64: - i := new(int64) - if dv != nil { - *i = dv.(int64) - } - *(fptr.(**int64)) = i - case reflect.String: - s := new(string) - if dv != nil { - *s = dv.(string) - } - *(fptr.(**string)) = s - case reflect.Uint8: - // exceptional case: []byte - var b []byte - if dv != nil { - db := dv.([]byte) - b = make([]byte, len(db)) - copy(b, db) - } else { - b = []byte{} - } - *(fptr.(*[]byte)) = b - case reflect.Uint32: - u := new(uint32) - if dv != nil { - *u = dv.(uint32) - } - *(fptr.(**uint32)) = u - case reflect.Uint64: - u := new(uint64) - if dv != nil { - *u = dv.(uint64) - } - *(fptr.(**uint64)) = u - default: - log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) - } - } - - for _, ni := range dm.nested { - f := v.Field(ni) - // f is *T or []*T or map[T]*T - switch f.Kind() { - case reflect.Ptr: - if f.IsNil() { - continue - } - setDefaults(f, recur, zeros) - - case reflect.Slice: - for i := 0; i < f.Len(); i++ { - e := f.Index(i) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - - case reflect.Map: - for _, k := range f.MapKeys() { - e := f.MapIndex(k) - if e.IsNil() { - continue - } - setDefaults(e, recur, zeros) - } - } - } -} - -var ( - // defaults maps a protocol buffer struct type to a slice of the fields, - // with its scalar fields set to their proto-declared non-zero default values. - defaultMu sync.RWMutex - defaults = make(map[reflect.Type]defaultMessage) - - int32PtrType = reflect.TypeOf((*int32)(nil)) -) - -// defaultMessage represents information about the default values of a message. -type defaultMessage struct { - scalars []scalarField - nested []int // struct field index of nested messages -} - -type scalarField struct { - index int // struct field index - kind reflect.Kind // element type (the T in *T or []T) - value interface{} // the proto-declared default value, or nil -} - -// t is a struct type. -func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { - sprop := GetProperties(t) - for _, prop := range sprop.Prop { - fi, ok := sprop.decoderTags.get(prop.Tag) - if !ok { - // XXX_unrecognized - continue - } - ft := t.Field(fi).Type - - sf, nested, err := fieldDefault(ft, prop) - switch { - case err != nil: - log.Print(err) - case nested: - dm.nested = append(dm.nested, fi) - case sf != nil: - sf.index = fi - dm.scalars = append(dm.scalars, *sf) - } - } - - return dm -} - -// fieldDefault returns the scalarField for field type ft. -// sf will be nil if the field can not have a default. -// nestedMessage will be true if this is a nested message. -// Note that sf.index is not set on return. -func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { - var canHaveDefault bool - switch ft.Kind() { - case reflect.Ptr: - if ft.Elem().Kind() == reflect.Struct { - nestedMessage = true - } else { - canHaveDefault = true // proto2 scalar field - } - - case reflect.Slice: - switch ft.Elem().Kind() { - case reflect.Ptr: - nestedMessage = true // repeated message - case reflect.Uint8: - canHaveDefault = true // bytes field - } - - case reflect.Map: - if ft.Elem().Kind() == reflect.Ptr { - nestedMessage = true // map with message values - } - } - - if !canHaveDefault { - if nestedMessage { - return nil, true, nil - } - return nil, false, nil - } - - // We now know that ft is a pointer or slice. - sf = &scalarField{kind: ft.Elem().Kind()} - - // scalar fields without defaults - if !prop.HasDefault { - return sf, false, nil - } - - // a scalar field: either *T or []byte - switch ft.Elem().Kind() { - case reflect.Bool: - x, err := strconv.ParseBool(prop.Default) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Float32: - x, err := strconv.ParseFloat(prop.Default, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) - } - sf.value = float32(x) - case reflect.Float64: - x, err := strconv.ParseFloat(prop.Default, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.Int32: - x, err := strconv.ParseInt(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) - } - sf.value = int32(x) - case reflect.Int64: - x, err := strconv.ParseInt(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) - } - sf.value = x - case reflect.String: - sf.value = prop.Default - case reflect.Uint8: - // []byte (not *uint8) - sf.value = []byte(prop.Default) - case reflect.Uint32: - x, err := strconv.ParseUint(prop.Default, 10, 32) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) - } - sf.value = uint32(x) - case reflect.Uint64: - x, err := strconv.ParseUint(prop.Default, 10, 64) - if err != nil { - return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) - } - sf.value = x - default: - return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) - } - - return sf, false, nil -} - -// mapKeys returns a sort.Interface to be used for sorting the map keys. -// Map fields may have key types of non-float scalars, strings and enums. -func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{vs: vs} - - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. - if len(vs) == 0 { - return s - } - switch vs[0].Kind() { - case reflect.Int32, reflect.Int64: - s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } - case reflect.Uint32, reflect.Uint64: - s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } - case reflect.Bool: - s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true - case reflect.String: - s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } - default: - panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) - } - - return s -} - -type mapKeySorter struct { - vs []reflect.Value - less func(a, b reflect.Value) bool -} - -func (s mapKeySorter) Len() int { return len(s.vs) } -func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } -func (s mapKeySorter) Less(i, j int) bool { - return s.less(s.vs[i], s.vs[j]) -} - -// isProto3Zero reports whether v is a zero proto3 value. -func isProto3Zero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint32, reflect.Uint64: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.String: - return v.String() == "" - } - return false -} - -const ( - // ProtoPackageIsVersion3 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true - - // ProtoPackageIsVersion2 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true - - // ProtoPackageIsVersion1 is referenced from generated protocol buffer files - // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true -) - -// InternalMessageInfo is a type used internally by generated .pb.go files. -// This type is not intended to be used by non-generated code. -// This type is not subject to any compatibility guarantee. -type InternalMessageInfo struct { - marshal *marshalInfo - unmarshal *unmarshalInfo - merge *mergeInfo - discard *discardInfo -} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go deleted file mode 100644 index f48a756..0000000 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ /dev/null @@ -1,181 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Support for message sets. - */ - -import ( - "errors" -) - -// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. -// A message type ID is required for storing a protocol buffer in a message set. -var errNoMessageTypeID = errors.New("proto does not have a message type ID") - -// The first two types (_MessageSet_Item and messageSet) -// model what the protocol compiler produces for the following protocol message: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } -// That is the MessageSet wire format. We can't use a proto to generate these -// because that would introduce a circular dependency between it and this package. - -type _MessageSet_Item struct { - TypeId *int32 `protobuf:"varint,2,req,name=type_id"` - Message []byte `protobuf:"bytes,3,req,name=message"` -} - -type messageSet struct { - Item []*_MessageSet_Item `protobuf:"group,1,rep"` - XXX_unrecognized []byte - // TODO: caching? -} - -// Make sure messageSet is a Message. -var _ Message = (*messageSet)(nil) - -// messageTypeIder is an interface satisfied by a protocol buffer type -// that may be stored in a MessageSet. -type messageTypeIder interface { - MessageTypeId() int32 -} - -func (ms *messageSet) find(pb Message) *_MessageSet_Item { - mti, ok := pb.(messageTypeIder) - if !ok { - return nil - } - id := mti.MessageTypeId() - for _, item := range ms.Item { - if *item.TypeId == id { - return item - } - } - return nil -} - -func (ms *messageSet) Has(pb Message) bool { - return ms.find(pb) != nil -} - -func (ms *messageSet) Unmarshal(pb Message) error { - if item := ms.find(pb); item != nil { - return Unmarshal(item.Message, pb) - } - if _, ok := pb.(messageTypeIder); !ok { - return errNoMessageTypeID - } - return nil // TODO: return error instead? -} - -func (ms *messageSet) Marshal(pb Message) error { - msg, err := Marshal(pb) - if err != nil { - return err - } - if item := ms.find(pb); item != nil { - // reuse existing item - item.Message = msg - return nil - } - - mti, ok := pb.(messageTypeIder) - if !ok { - return errNoMessageTypeID - } - - mtid := mti.MessageTypeId() - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: &mtid, - Message: msg, - }) - return nil -} - -func (ms *messageSet) Reset() { *ms = messageSet{} } -func (ms *messageSet) String() string { return CompactTextString(ms) } -func (*messageSet) ProtoMessage() {} - -// Support for the message_set_wire_format message option. - -func skipVarint(buf []byte) []byte { - i := 0 - for ; buf[i]&0x80 != 0; i++ { - } - return buf[i+1:] -} - -// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func unmarshalMessageSet(buf []byte, exts interface{}) error { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - m = exts.extensionsWrite() - case map[int32]Extension: - m = exts - default: - return errors.New("proto: not an extension map") - } - - ms := new(messageSet) - if err := Unmarshal(buf, ms); err != nil { - return err - } - for _, item := range ms.Item { - id := *item.TypeId - msg := item.Message - - // Restore wire type and field number varint, plus length varint. - // Be careful to preserve duplicate items. - b := EncodeVarint(uint64(id)<<3 | WireBytes) - if ext, ok := m[id]; ok { - // Existing data; rip off the tag and length varint - // so we join the new data correctly. - // We can assume that ext.enc is set because we are unmarshaling. - o := ext.enc[len(b):] // skip wire type and field number - _, n := DecodeVarint(o) // calculate length of length varint - o = o[n:] // skip length varint - msg = append(o, msg...) // join old data and new data - } - b = append(b, EncodeVarint(uint64(len(msg)))...) - b = append(b, msg...) - - m[id] = Extension{enc: b} - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index 94fa919..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,360 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build purego appengine js - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "reflect" - "sync" -) - -const unsafeAllowed = false - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// zeroField is a noop when calling pointer.offset. -var zeroField = field([]int{}) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// The pointer type is for the table-driven decoder. -// The implementation here uses a reflect.Value of pointer type to -// create a generic pointer. In pointer_unsafe.go we use unsafe -// instead of reflect to implement the same (but faster) interface. -type pointer struct { - v reflect.Value -} - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - return pointer{v: reflect.ValueOf(*i)} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { - v := reflect.ValueOf(*i) - u := reflect.New(v.Type()) - u.Elem().Set(v) - if deref { - u = u.Elem() - } - return pointer{v: u} -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{v: v} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} -} - -func (p pointer) isNil() bool { - return p.v.IsNil() -} - -// grow updates the slice s in place to make it one element longer. -// s must be addressable. -// Returns the (addressable) new element. -func grow(s reflect.Value) reflect.Value { - n, m := s.Len(), s.Cap() - if n < m { - s.SetLen(n + 1) - } else { - s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) - } - return s.Index(n) -} - -func (p pointer) toInt64() *int64 { - return p.v.Interface().(*int64) -} -func (p pointer) toInt64Ptr() **int64 { - return p.v.Interface().(**int64) -} -func (p pointer) toInt64Slice() *[]int64 { - return p.v.Interface().(*[]int64) -} - -var int32ptr = reflect.TypeOf((*int32)(nil)) - -func (p pointer) toInt32() *int32 { - return p.v.Convert(int32ptr).Interface().(*int32) -} - -// The toInt32Ptr/Slice methods don't work because of enums. -// Instead, we must use set/get methods for the int32ptr/slice case. -/* - func (p pointer) toInt32Ptr() **int32 { - return p.v.Interface().(**int32) -} - func (p pointer) toInt32Slice() *[]int32 { - return p.v.Interface().(*[]int32) -} -*/ -func (p pointer) getInt32Ptr() *int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().(*int32) - } - // an enum - return p.v.Elem().Convert(int32PtrType).Interface().(*int32) -} -func (p pointer) setInt32Ptr(v int32) { - // Allocate value in a *int32. Possibly convert that to a *enum. - // Then assign it to a **int32 or **enum. - // Note: we can convert *int32 to *enum, but we can't convert - // **int32 to **enum! - p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) -} - -// getInt32Slice copies []int32 from p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getInt32Slice() []int32 { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - return p.v.Elem().Interface().([]int32) - } - // an enum - // Allocate a []int32, then assign []enum's values into it. - // Note: we can't convert []enum to []int32. - slice := p.v.Elem() - s := make([]int32, slice.Len()) - for i := 0; i < slice.Len(); i++ { - s[i] = int32(slice.Index(i).Int()) - } - return s -} - -// setInt32Slice copies []int32 into p as a new slice. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setInt32Slice(v []int32) { - if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { - // raw int32 type - p.v.Elem().Set(reflect.ValueOf(v)) - return - } - // an enum - // Allocate a []enum, then assign []int32's values into it. - // Note: we can't convert []enum to []int32. - slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) - for i, x := range v { - slice.Index(i).SetInt(int64(x)) - } - p.v.Elem().Set(slice) -} -func (p pointer) appendInt32Slice(v int32) { - grow(p.v.Elem()).SetInt(int64(v)) -} - -func (p pointer) toUint64() *uint64 { - return p.v.Interface().(*uint64) -} -func (p pointer) toUint64Ptr() **uint64 { - return p.v.Interface().(**uint64) -} -func (p pointer) toUint64Slice() *[]uint64 { - return p.v.Interface().(*[]uint64) -} -func (p pointer) toUint32() *uint32 { - return p.v.Interface().(*uint32) -} -func (p pointer) toUint32Ptr() **uint32 { - return p.v.Interface().(**uint32) -} -func (p pointer) toUint32Slice() *[]uint32 { - return p.v.Interface().(*[]uint32) -} -func (p pointer) toBool() *bool { - return p.v.Interface().(*bool) -} -func (p pointer) toBoolPtr() **bool { - return p.v.Interface().(**bool) -} -func (p pointer) toBoolSlice() *[]bool { - return p.v.Interface().(*[]bool) -} -func (p pointer) toFloat64() *float64 { - return p.v.Interface().(*float64) -} -func (p pointer) toFloat64Ptr() **float64 { - return p.v.Interface().(**float64) -} -func (p pointer) toFloat64Slice() *[]float64 { - return p.v.Interface().(*[]float64) -} -func (p pointer) toFloat32() *float32 { - return p.v.Interface().(*float32) -} -func (p pointer) toFloat32Ptr() **float32 { - return p.v.Interface().(**float32) -} -func (p pointer) toFloat32Slice() *[]float32 { - return p.v.Interface().(*[]float32) -} -func (p pointer) toString() *string { - return p.v.Interface().(*string) -} -func (p pointer) toStringPtr() **string { - return p.v.Interface().(**string) -} -func (p pointer) toStringSlice() *[]string { - return p.v.Interface().(*[]string) -} -func (p pointer) toBytes() *[]byte { - return p.v.Interface().(*[]byte) -} -func (p pointer) toBytesSlice() *[][]byte { - return p.v.Interface().(*[][]byte) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return p.v.Interface().(*XXX_InternalExtensions) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return p.v.Interface().(*map[int32]Extension) -} -func (p pointer) getPointer() pointer { - return pointer{v: p.v.Elem()} -} -func (p pointer) setPointer(q pointer) { - p.v.Elem().Set(q.v) -} -func (p pointer) appendPointer(q pointer) { - grow(p.v.Elem()).Set(q.v) -} - -// getPointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) getPointerSlice() []pointer { - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// setPointerSlice copies []pointer into p as a new []*T. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) setPointerSlice(v []pointer) { - if v == nil { - p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) - return - } - s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) - for _, p := range v { - s = reflect.Append(s, p.v) - } - p.v.Elem().Set(s) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - if p.v.Elem().IsNil() { - return pointer{v: p.v.Elem()} - } - return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct -} - -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - // TODO: check that p.v.Type().Elem() == t? - return p.v -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - atomicLock.Lock() - defer atomicLock.Unlock() - return *p -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomicLock.Lock() - defer atomicLock.Unlock() - *p = v -} - -var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go deleted file mode 100644 index dbfffe0..0000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ /dev/null @@ -1,313 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build !purego,!appengine,!js - -// This file contains the implementation of the proto field accesses using package unsafe. - -package proto - -import ( - "reflect" - "sync/atomic" - "unsafe" -) - -const unsafeAllowed = true - -// A field identifies a field in a struct, accessible from a pointer. -// In this implementation, a field is identified by its byte offset from the start of the struct. -type field uintptr - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return field(f.Offset) -} - -// invalidField is an invalid field identifier. -const invalidField = ^field(0) - -// zeroField is a noop when calling pointer.offset. -const zeroField = field(0) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { - return f != invalidField -} - -// The pointer type below is for the new table-driven encoder/decoder. -// The implementation here uses unsafe.Pointer to create a generic pointer. -// In pointer_reflect.go we use reflect instead of unsafe to implement -// the same (but slower) interface. -type pointer struct { - p unsafe.Pointer -} - -// size of pointer -var ptrSize = unsafe.Sizeof(uintptr(0)) - -// toPointer converts an interface of pointer type to a pointer -// that points to the same target. -func toPointer(i *Message) pointer { - // Super-tricky - read pointer out of data word of interface value. - // Saves ~25ns over the equivalent: - // return valToPointer(reflect.ValueOf(*i)) - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} -} - -// toAddrPointer converts an interface to a pointer that points to -// the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { - // Super-tricky - read or get the address of data word of interface value. - if isptr { - // The interface is of pointer type, thus it is a direct interface. - // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} - } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p -} - -// valToPointer converts v to a pointer. v must be of pointer type. -func valToPointer(v reflect.Value) pointer { - return pointer{p: unsafe.Pointer(v.Pointer())} -} - -// offset converts from a pointer to a structure to a pointer to -// one of its fields. -func (p pointer) offset(f field) pointer { - // For safety, we should panic if !f.IsValid, however calling panic causes - // this to no longer be inlineable, which is a serious performance cost. - /* - if !f.IsValid() { - panic("invalid field") - } - */ - return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} -} - -func (p pointer) isNil() bool { - return p.p == nil -} - -func (p pointer) toInt64() *int64 { - return (*int64)(p.p) -} -func (p pointer) toInt64Ptr() **int64 { - return (**int64)(p.p) -} -func (p pointer) toInt64Slice() *[]int64 { - return (*[]int64)(p.p) -} -func (p pointer) toInt32() *int32 { - return (*int32)(p.p) -} - -// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. -/* - func (p pointer) toInt32Ptr() **int32 { - return (**int32)(p.p) - } - func (p pointer) toInt32Slice() *[]int32 { - return (*[]int32)(p.p) - } -*/ -func (p pointer) getInt32Ptr() *int32 { - return *(**int32)(p.p) -} -func (p pointer) setInt32Ptr(v int32) { - *(**int32)(p.p) = &v -} - -// getInt32Slice loads a []int32 from p. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getInt32Slice() []int32 { - return *(*[]int32)(p.p) -} - -// setInt32Slice stores a []int32 to p. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setInt32Slice(v []int32) { - *(*[]int32)(p.p) = v -} - -// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? -func (p pointer) appendInt32Slice(v int32) { - s := (*[]int32)(p.p) - *s = append(*s, v) -} - -func (p pointer) toUint64() *uint64 { - return (*uint64)(p.p) -} -func (p pointer) toUint64Ptr() **uint64 { - return (**uint64)(p.p) -} -func (p pointer) toUint64Slice() *[]uint64 { - return (*[]uint64)(p.p) -} -func (p pointer) toUint32() *uint32 { - return (*uint32)(p.p) -} -func (p pointer) toUint32Ptr() **uint32 { - return (**uint32)(p.p) -} -func (p pointer) toUint32Slice() *[]uint32 { - return (*[]uint32)(p.p) -} -func (p pointer) toBool() *bool { - return (*bool)(p.p) -} -func (p pointer) toBoolPtr() **bool { - return (**bool)(p.p) -} -func (p pointer) toBoolSlice() *[]bool { - return (*[]bool)(p.p) -} -func (p pointer) toFloat64() *float64 { - return (*float64)(p.p) -} -func (p pointer) toFloat64Ptr() **float64 { - return (**float64)(p.p) -} -func (p pointer) toFloat64Slice() *[]float64 { - return (*[]float64)(p.p) -} -func (p pointer) toFloat32() *float32 { - return (*float32)(p.p) -} -func (p pointer) toFloat32Ptr() **float32 { - return (**float32)(p.p) -} -func (p pointer) toFloat32Slice() *[]float32 { - return (*[]float32)(p.p) -} -func (p pointer) toString() *string { - return (*string)(p.p) -} -func (p pointer) toStringPtr() **string { - return (**string)(p.p) -} -func (p pointer) toStringSlice() *[]string { - return (*[]string)(p.p) -} -func (p pointer) toBytes() *[]byte { - return (*[]byte)(p.p) -} -func (p pointer) toBytesSlice() *[][]byte { - return (*[][]byte)(p.p) -} -func (p pointer) toExtensions() *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(p.p) -} -func (p pointer) toOldExtensions() *map[int32]Extension { - return (*map[int32]Extension)(p.p) -} - -// getPointerSlice loads []*T from p as a []pointer. -// The value returned is aliased with the original slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) getPointerSlice() []pointer { - // Super-tricky - p should point to a []*T where T is a - // message type. We load it as []pointer. - return *(*[]pointer)(p.p) -} - -// setPointerSlice stores []pointer into p as a []*T. -// The value set is aliased with the input slice. -// This behavior differs from the implementation in pointer_reflect.go. -func (p pointer) setPointerSlice(v []pointer) { - // Super-tricky - p should point to a []*T where T is a - // message type. We store it as []pointer. - *(*[]pointer)(p.p) = v -} - -// getPointer loads the pointer at p and returns it. -func (p pointer) getPointer() pointer { - return pointer{p: *(*unsafe.Pointer)(p.p)} -} - -// setPointer stores the pointer q at p. -func (p pointer) setPointer(q pointer) { - *(*unsafe.Pointer)(p.p) = q.p -} - -// append q to the slice pointed to by p. -func (p pointer) appendPointer(q pointer) { - s := (*[]unsafe.Pointer)(p.p) - *s = append(*s, q.p) -} - -// getInterfacePointer returns a pointer that points to the -// interface data of the interface pointed by p. -func (p pointer) getInterfacePointer() pointer { - // Super-tricky - read pointer out of data word of interface value. - return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} -} - -// asPointerTo returns a reflect.Value that is a pointer to an -// object of type t stored at p. -func (p pointer) asPointerTo(t reflect.Type) reflect.Value { - return reflect.NewAt(t, p.p) -} - -func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { - return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { - return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { - return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} -func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { - return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) -} -func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) -} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go deleted file mode 100644 index a4b8c0c..0000000 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ /dev/null @@ -1,544 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - -import ( - "fmt" - "log" - "reflect" - "sort" - "strconv" - "strings" - "sync" -) - -const debug bool = false - -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 -) - -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. -type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order - - // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. - OneofTypes map[string]*OneofProperties -} - -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. -type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string - WireType int - Tag int - Required bool - Optional bool - Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only -} - -// String formats the properties in the protobuf struct field tag style. -func (p *Properties) String() string { - s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) - if p.Required { - s += ",req" - } - if p.Optional { - s += ",opt" - } - if p.Repeated { - s += ",rep" - } - if p.Packed { - s += ",packed" - } - s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { - s += ",json=" + p.JSONName - } - if p.proto3 { - s += ",proto3" - } - if p.oneof { - s += ",oneof" - } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } - if p.HasDefault { - s += ",def=" + p.Default - } - return s -} - -// Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": - p.Optional = true - case f == "rep": - p.Repeated = true - case f == "packed": - p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): - p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) - } - } -} - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - -// Init populates the properties from a protocol buffer struct tag. -func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" - p.Name = name - p.OrigName = name - if tag == "" { - return - } - p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) -} - -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) - -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. -func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop - } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop -} - -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop - } - - prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof - } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") - } - } - - // Re-order prop.order. - sort.Sort(prop) - - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. - prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T - Prop: new(Properties), - } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue - } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break - } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ - } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i - } - prop.reqCount = reqCount - - return prop -} - -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go deleted file mode 100644 index 5cb11fa..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ /dev/null @@ -1,2776 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "math" - "reflect" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// a sizer takes a pointer to a field and the size of its tag, computes the size of -// the encoded data. -type sizer func(pointer, int) int - -// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), -// marshals the field to the end of the slice, returns the slice and error (if any). -type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) - -// marshalInfo is the information used for marshaling a message. -type marshalInfo struct { - typ reflect.Type - fields []*marshalFieldInfo - unrecognized field // offset of XXX_unrecognized - extensions field // offset of XXX_InternalExtensions - v1extensions field // offset of XXX_extensions - sizecache field // offset of XXX_sizecache - initialized int32 // 0 -- only typ is set, 1 -- fully initialized - messageset bool // uses message set wire format - hasmarshaler bool // has custom marshaler - sync.RWMutex // protect extElems map, also for initialization - extElems map[int32]*marshalElemInfo // info of extension elements -} - -// marshalFieldInfo is the information used for marshaling a field of a message. -type marshalFieldInfo struct { - field field - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isPointer bool - required bool // field is required - name string // name of the field, for error reporting - oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements -} - -// marshalElemInfo is the information used for marshaling an extension or oneof element. -type marshalElemInfo struct { - wiretag uint64 // tag in wire format - tagsize int // size of tag in wire format - sizer sizer - marshaler marshaler - isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr -} - -var ( - marshalInfoMap = map[reflect.Type]*marshalInfo{} - marshalInfoLock sync.Mutex -) - -// getMarshalInfo returns the information to marshal a given type of message. -// The info it returns may not necessarily initialized. -// t is the type of the message (NOT the pointer to it). -func getMarshalInfo(t reflect.Type) *marshalInfo { - marshalInfoLock.Lock() - u, ok := marshalInfoMap[t] - if !ok { - u = &marshalInfo{typ: t} - marshalInfoMap[t] = u - } - marshalInfoLock.Unlock() - return u -} - -// Size is the entry point from generated code, -// and should be ONLY called by generated code. -// It computes the size of encoded data of msg. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Size(msg Message) int { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return 0 - } - return u.size(ptr) -} - -// Marshal is the entry point from generated code, -// and should be ONLY called by generated code. -// It marshals msg to the end of b. -// a is a pointer to a place to store cached marshal info. -func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { - u := getMessageMarshalInfo(msg, a) - ptr := toPointer(&msg) - if ptr.isNil() { - // We get here if msg is a typed nil ((*SomeMessage)(nil)), - // so it satisfies the interface, and msg == nil wouldn't - // catch it. We don't want crash in this case. - return b, ErrNil - } - return u.marshal(b, ptr, deterministic) -} - -func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { - // u := a.marshal, but atomically. - // We use an atomic here to ensure memory consistency. - u := atomicLoadMarshalInfo(&a.marshal) - if u == nil { - // Get marshal information from type of message. - t := reflect.ValueOf(msg).Type() - if t.Kind() != reflect.Ptr { - panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) - } - u = getMarshalInfo(t.Elem()) - // Store it in the cache for later users. - // a.marshal = u, but atomically. - atomicStoreMarshalInfo(&a.marshal, u) - } - return u -} - -// size is the main function to compute the size of the encoded data of a message. -// ptr is the pointer to the message. -func (u *marshalInfo) size(ptr pointer) int { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b, _ := m.Marshal() - return len(b) - } - - n := 0 - for _, f := range u.fields { - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - n += f.sizer(ptr.offset(f.field), f.tagsize) - } - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - n += u.sizeMessageSet(e) - } else { - n += u.sizeExtensions(e) - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - n += u.sizeV1Extensions(m) - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - n += len(s) - } - // cache the result for use in marshal - if u.sizecache.IsValid() { - atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) - } - return n -} - -// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), -// fall back to compute the size. -func (u *marshalInfo) cachedsize(ptr pointer) int { - if u.sizecache.IsValid() { - return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) - } - return u.size(ptr) -} - -// marshal is the main function to marshal a message. It takes a byte slice and appends -// the encoded data to the end of the slice, returns the slice and error (if any). -// ptr is the pointer to the message. -// If deterministic is true, map is marshaled in deterministic order. -func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { - if atomic.LoadInt32(&u.initialized) == 0 { - u.computeMarshalInfo() - } - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if u.hasmarshaler { - m := ptr.asPointerTo(u.typ).Interface().(Marshaler) - b1, err := m.Marshal() - b = append(b, b1...) - return b, err - } - - var err, errLater error - // The old marshaler encodes extensions at beginning. - if u.extensions.IsValid() { - e := ptr.offset(u.extensions).toExtensions() - if u.messageset { - b, err = u.appendMessageSet(b, e, deterministic) - } else { - b, err = u.appendExtensions(b, e, deterministic) - } - if err != nil { - return b, err - } - } - if u.v1extensions.IsValid() { - m := *ptr.offset(u.v1extensions).toOldExtensions() - b, err = u.appendV1Extensions(b, m, deterministic) - if err != nil { - return b, err - } - } - for _, f := range u.fields { - if f.required { - if ptr.offset(f.field).getPointer().isNil() { - // Required field is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name} - } - continue - } - } - if f.isPointer && ptr.offset(f.field).getPointer().isNil() { - // nil pointer always marshals to nothing - continue - } - b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) - if err != nil { - if err1, ok := err.(*RequiredNotSetError); ok { - // Required field in submessage is not set. - // We record the error but keep going, to give a complete marshaling. - if errLater == nil { - errLater = &RequiredNotSetError{f.name + "." + err1.field} - } - continue - } - if err == errRepeatedHasNil { - err = errors.New("proto: repeated field " + f.name + " has nil element") - } - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return b, err - } - } - if u.unrecognized.IsValid() { - s := *ptr.offset(u.unrecognized).toBytes() - b = append(b, s...) - } - return b, errLater -} - -// computeMarshalInfo initializes the marshal info. -func (u *marshalInfo) computeMarshalInfo() { - u.Lock() - defer u.Unlock() - if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock - return - } - - t := u.typ - u.unrecognized = invalidField - u.extensions = invalidField - u.v1extensions = invalidField - u.sizecache = invalidField - - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - if reflect.PtrTo(t).Implements(marshalerType) { - u.hasmarshaler = true - atomic.StoreInt32(&u.initialized, 1) - return - } - - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - - n := t.NumField() - - // deal with XXX fields first - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if !strings.HasPrefix(f.Name, "XXX_") { - continue - } - switch f.Name { - case "XXX_sizecache": - u.sizecache = toField(&f) - case "XXX_unrecognized": - u.unrecognized = toField(&f) - case "XXX_InternalExtensions": - u.extensions = toField(&f) - u.messageset = f.Tag.Get("protobuf_messageset") == "1" - case "XXX_extensions": - u.v1extensions = toField(&f) - case "XXX_NoUnkeyedLiteral": - // nothing to do - default: - panic("unknown XXX field: " + f.Name) - } - n-- - } - - // normal fields - fields := make([]marshalFieldInfo, n) // batch allocation - u.fields = make([]*marshalFieldInfo, 0, n) - for i, j := 0, 0; i < t.NumField(); i++ { - f := t.Field(i) - - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - field := &fields[j] - j++ - field.name = f.Name - u.fields = append(u.fields, field) - if f.Tag.Get("protobuf_oneof") != "" { - field.computeOneofFieldInfo(&f, oneofImplementers) - continue - } - if f.Tag.Get("protobuf") == "" { - // field has no tag (not in generated message), ignore it - u.fields = u.fields[:len(u.fields)-1] - j-- - continue - } - field.computeMarshalFieldInfo(&f) - } - - // fields are marshaled in tag order on the wire. - sort.Sort(byTag(u.fields)) - - atomic.StoreInt32(&u.initialized, 1) -} - -// helper for sorting fields by tag -type byTag []*marshalFieldInfo - -func (a byTag) Len() int { return len(a) } -func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } - -// getExtElemInfo returns the information to marshal an extension element. -// The info it returns is initialized. -func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { - // get from cache first - u.RLock() - e, ok := u.extElems[desc.Field] - u.RUnlock() - if ok { - return e - } - - t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct - tags := strings.Split(desc.Tag, ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } - e = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - isptr: t.Kind() == reflect.Ptr, - deref: deref, - } - - // update cache - u.Lock() - if u.extElems == nil { - u.extElems = make(map[int32]*marshalElemInfo) - } - u.extElems[desc.Field] = e - u.Unlock() - return e -} - -// computeMarshalFieldInfo fills up the information to marshal a field. -func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { - // parse protobuf tag of the field. - // tag has format of "bytes,49,opt,name=foo,def=hello!" - tags := strings.Split(f.Tag.Get("protobuf"), ",") - if tags[0] == "" { - return - } - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - if tags[2] == "req" { - fi.required = true - } - fi.setTag(f, tag, wt) - fi.setMarshaler(f, tags) -} - -func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { - fi.field = toField(f) - fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. - fi.isPointer = true - fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) - fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) - - ityp := f.Type // interface type - for _, o := range oneofImplementers { - t := reflect.TypeOf(o) - if !t.Implements(ityp) { - continue - } - sf := t.Elem().Field(0) // oneof implementer is a struct with a single field - tags := strings.Split(sf.Tag.Get("protobuf"), ",") - tag, err := strconv.Atoi(tags[1]) - if err != nil { - panic("tag is not an integer") - } - wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value - fi.oneofElems[t.Elem()] = &marshalElemInfo{ - wiretag: uint64(tag)<<3 | wt, - tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, - } - } -} - -// wiretype returns the wire encoding of the type. -func wiretype(encoding string) uint64 { - switch encoding { - case "fixed32": - return WireFixed32 - case "fixed64": - return WireFixed64 - case "varint", "zigzag32", "zigzag64": - return WireVarint - case "bytes": - return WireBytes - case "group": - return WireStartGroup - } - panic("unknown wire type " + encoding) -} - -// setTag fills up the tag (in wire format) and its size in the info of a field. -func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { - fi.field = toField(f) - fi.wiretag = uint64(tag)<<3 | wt - fi.tagsize = SizeVarint(uint64(tag) << 3) -} - -// setMarshaler fills up the sizer and marshaler in the info of a field. -func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { - switch f.Type.Kind() { - case reflect.Map: - // map field - fi.isPointer = true - fi.sizer, fi.marshaler = makeMapMarshaler(f) - return - case reflect.Ptr, reflect.Slice: - fi.isPointer = true - } - fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) -} - -// typeMarshaler returns the sizer and marshaler of a given field. -// t is the type of the field. -// tags is the generated "protobuf" tag of the field. -// If nozero is true, zero value is not marshaled to the wire. -// If oneof is true, it is a oneof field. -func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { - encoding := tags[0] - - pointer := false - slice := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - packed := false - proto3 := false - validateUTF8 := true - for i := 2; i < len(tags); i++ { - if tags[i] == "packed" { - packed = true - } - if tags[i] == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - switch t.Kind() { - case reflect.Bool: - if pointer { - return sizeBoolPtr, appendBoolPtr - } - if slice { - if packed { - return sizeBoolPackedSlice, appendBoolPackedSlice - } - return sizeBoolSlice, appendBoolSlice - } - if nozero { - return sizeBoolValueNoZero, appendBoolValueNoZero - } - return sizeBoolValue, appendBoolValue - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixed32Ptr, appendFixed32Ptr - } - if slice { - if packed { - return sizeFixed32PackedSlice, appendFixed32PackedSlice - } - return sizeFixed32Slice, appendFixed32Slice - } - if nozero { - return sizeFixed32ValueNoZero, appendFixed32ValueNoZero - } - return sizeFixed32Value, appendFixed32Value - case "varint": - if pointer { - return sizeVarint32Ptr, appendVarint32Ptr - } - if slice { - if packed { - return sizeVarint32PackedSlice, appendVarint32PackedSlice - } - return sizeVarint32Slice, appendVarint32Slice - } - if nozero { - return sizeVarint32ValueNoZero, appendVarint32ValueNoZero - } - return sizeVarint32Value, appendVarint32Value - } - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return sizeFixedS32Ptr, appendFixedS32Ptr - } - if slice { - if packed { - return sizeFixedS32PackedSlice, appendFixedS32PackedSlice - } - return sizeFixedS32Slice, appendFixedS32Slice - } - if nozero { - return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero - } - return sizeFixedS32Value, appendFixedS32Value - case "varint": - if pointer { - return sizeVarintS32Ptr, appendVarintS32Ptr - } - if slice { - if packed { - return sizeVarintS32PackedSlice, appendVarintS32PackedSlice - } - return sizeVarintS32Slice, appendVarintS32Slice - } - if nozero { - return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero - } - return sizeVarintS32Value, appendVarintS32Value - case "zigzag32": - if pointer { - return sizeZigzag32Ptr, appendZigzag32Ptr - } - if slice { - if packed { - return sizeZigzag32PackedSlice, appendZigzag32PackedSlice - } - return sizeZigzag32Slice, appendZigzag32Slice - } - if nozero { - return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero - } - return sizeZigzag32Value, appendZigzag32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixed64Ptr, appendFixed64Ptr - } - if slice { - if packed { - return sizeFixed64PackedSlice, appendFixed64PackedSlice - } - return sizeFixed64Slice, appendFixed64Slice - } - if nozero { - return sizeFixed64ValueNoZero, appendFixed64ValueNoZero - } - return sizeFixed64Value, appendFixed64Value - case "varint": - if pointer { - return sizeVarint64Ptr, appendVarint64Ptr - } - if slice { - if packed { - return sizeVarint64PackedSlice, appendVarint64PackedSlice - } - return sizeVarint64Slice, appendVarint64Slice - } - if nozero { - return sizeVarint64ValueNoZero, appendVarint64ValueNoZero - } - return sizeVarint64Value, appendVarint64Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return sizeFixedS64Ptr, appendFixedS64Ptr - } - if slice { - if packed { - return sizeFixedS64PackedSlice, appendFixedS64PackedSlice - } - return sizeFixedS64Slice, appendFixedS64Slice - } - if nozero { - return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero - } - return sizeFixedS64Value, appendFixedS64Value - case "varint": - if pointer { - return sizeVarintS64Ptr, appendVarintS64Ptr - } - if slice { - if packed { - return sizeVarintS64PackedSlice, appendVarintS64PackedSlice - } - return sizeVarintS64Slice, appendVarintS64Slice - } - if nozero { - return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero - } - return sizeVarintS64Value, appendVarintS64Value - case "zigzag64": - if pointer { - return sizeZigzag64Ptr, appendZigzag64Ptr - } - if slice { - if packed { - return sizeZigzag64PackedSlice, appendZigzag64PackedSlice - } - return sizeZigzag64Slice, appendZigzag64Slice - } - if nozero { - return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero - } - return sizeZigzag64Value, appendZigzag64Value - } - case reflect.Float32: - if pointer { - return sizeFloat32Ptr, appendFloat32Ptr - } - if slice { - if packed { - return sizeFloat32PackedSlice, appendFloat32PackedSlice - } - return sizeFloat32Slice, appendFloat32Slice - } - if nozero { - return sizeFloat32ValueNoZero, appendFloat32ValueNoZero - } - return sizeFloat32Value, appendFloat32Value - case reflect.Float64: - if pointer { - return sizeFloat64Ptr, appendFloat64Ptr - } - if slice { - if packed { - return sizeFloat64PackedSlice, appendFloat64PackedSlice - } - return sizeFloat64Slice, appendFloat64Slice - } - if nozero { - return sizeFloat64ValueNoZero, appendFloat64ValueNoZero - } - return sizeFloat64Value, appendFloat64Value - case reflect.String: - if validateUTF8 { - if pointer { - return sizeStringPtr, appendUTF8StringPtr - } - if slice { - return sizeStringSlice, appendUTF8StringSlice - } - if nozero { - return sizeStringValueNoZero, appendUTF8StringValueNoZero - } - return sizeStringValue, appendUTF8StringValue - } - if pointer { - return sizeStringPtr, appendStringPtr - } - if slice { - return sizeStringSlice, appendStringSlice - } - if nozero { - return sizeStringValueNoZero, appendStringValueNoZero - } - return sizeStringValue, appendStringValue - case reflect.Slice: - if slice { - return sizeBytesSlice, appendBytesSlice - } - if oneof { - // Oneof bytes field may also have "proto3" tag. - // We want to marshal it as a oneof field. Do this - // check before the proto3 check. - return sizeBytesOneof, appendBytesOneof - } - if proto3 { - return sizeBytes3, appendBytes3 - } - return sizeBytes, appendBytes - case reflect.Struct: - switch encoding { - case "group": - if slice { - return makeGroupSliceMarshaler(getMarshalInfo(t)) - } - return makeGroupMarshaler(getMarshalInfo(t)) - case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) - } - return makeMessageMarshaler(getMarshalInfo(t)) - } - } - panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) -} - -// Below are functions to size/marshal a specific type of a field. -// They are stored in the field's info, and called by function pointers. -// They have type sizer or marshaler. - -func sizeFixed32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixed32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixedS32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFixedS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - return (4 + tagsize) * len(s) -} -func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFloat32Value(_ pointer, tagsize int) int { - return 4 + tagsize -} -func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat32Ptr() - if p == nil { - return 0 - } - return 4 + tagsize -} -func sizeFloat32Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - return (4 + tagsize) * len(s) -} -func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return 0 - } - return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize -} -func sizeFixed64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixed64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFixedS64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFixedS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - return (8 + tagsize) * len(s) -} -func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeFloat64Value(_ pointer, tagsize int) int { - return 8 + tagsize -} -func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toFloat64Ptr() - if p == nil { - return 0 - } - return 8 + tagsize -} -func sizeFloat64Slice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - return (8 + tagsize) * len(s) -} -func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return 0 - } - return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize -} -func sizeVarint32Value(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarint32Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarint32Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarint64Value(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - return SizeVarint(v) + tagsize -} -func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toUint64() - if v == 0 { - return 0 - } - return SizeVarint(v) + tagsize -} -func sizeVarint64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toUint64Ptr() - if p == nil { - return 0 - } - return SizeVarint(*p) + tagsize -} -func sizeVarint64Slice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(v) + tagsize - } - return n -} -func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeVarintS64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v)) + tagsize -} -func sizeVarintS64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - return SizeVarint(uint64(*p)) + tagsize -} -func sizeVarintS64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) + tagsize - } - return n -} -func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag32Value(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt32() - if v == 0 { - return 0 - } - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Ptr(ptr pointer, tagsize int) int { - p := ptr.getInt32Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize -} -func sizeZigzag32Slice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize - } - return n -} -func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { - s := ptr.getInt32Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeZigzag64Value(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toInt64() - if v == 0 { - return 0 - } - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Ptr(ptr pointer, tagsize int) int { - p := *ptr.toInt64Ptr() - if p == nil { - return 0 - } - v := *p - return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize -} -func sizeZigzag64Slice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize - } - return n -} -func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return 0 - } - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - return n + SizeVarint(uint64(n)) + tagsize -} -func sizeBoolValue(_ pointer, tagsize int) int { - return 1 + tagsize -} -func sizeBoolValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toBool() - if !v { - return 0 - } - return 1 + tagsize -} -func sizeBoolPtr(ptr pointer, tagsize int) int { - p := *ptr.toBoolPtr() - if p == nil { - return 0 - } - return 1 + tagsize -} -func sizeBoolSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - return (1 + tagsize) * len(s) -} -func sizeBoolPackedSlice(ptr pointer, tagsize int) int { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return 0 - } - return len(s) + SizeVarint(uint64(len(s))) + tagsize -} -func sizeStringValue(ptr pointer, tagsize int) int { - v := *ptr.toString() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringValueNoZero(ptr pointer, tagsize int) int { - v := *ptr.toString() - if v == "" { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringPtr(ptr pointer, tagsize int) int { - p := *ptr.toStringPtr() - if p == nil { - return 0 - } - v := *p - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeStringSlice(ptr pointer, tagsize int) int { - s := *ptr.toStringSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} -func sizeBytes(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if v == nil { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytes3(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - if len(v) == 0 { - return 0 - } - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesOneof(ptr pointer, tagsize int) int { - v := *ptr.toBytes() - return len(v) + SizeVarint(uint64(len(v))) + tagsize -} -func sizeBytesSlice(ptr pointer, tagsize int) int { - s := *ptr.toBytesSlice() - n := 0 - for _, v := range s { - n += len(v) + SizeVarint(uint64(len(v))) + tagsize - } - return n -} - -// appendFixed32 appends an encoded fixed32 to b. -func appendFixed32(b []byte, v uint32) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24)) - return b -} - -// appendFixed64 appends an encoded fixed64 to b. -func appendFixed64(b []byte, v uint64) []byte { - b = append(b, - byte(v), - byte(v>>8), - byte(v>>16), - byte(v>>24), - byte(v>>32), - byte(v>>40), - byte(v>>48), - byte(v>>56)) - return b -} - -// appendVarint appends an encoded varint to b. -func appendVarint(b []byte, v uint64) []byte { - // TODO: make 1-byte (maybe 2-byte) case inline-able, once we - // have non-leaf inliner. - switch { - case v < 1<<7: - b = append(b, byte(v)) - case v < 1<<14: - b = append(b, - byte(v&0x7f|0x80), - byte(v>>7)) - case v < 1<<21: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte(v>>14)) - case v < 1<<28: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte(v>>21)) - case v < 1<<35: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte(v>>28)) - case v < 1<<42: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte(v>>35)) - case v < 1<<49: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte(v>>42)) - case v < 1<<56: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte(v>>49)) - case v < 1<<63: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte(v>>56)) - default: - b = append(b, - byte(v&0x7f|0x80), - byte((v>>7)&0x7f|0x80), - byte((v>>14)&0x7f|0x80), - byte((v>>21)&0x7f|0x80), - byte((v>>28)&0x7f|0x80), - byte((v>>35)&0x7f|0x80), - byte((v>>42)&0x7f|0x80), - byte((v>>49)&0x7f|0x80), - byte((v>>56)&0x7f|0x80), - 1) - } - return b -} - -func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, *p) - return b, nil -} -func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, v) - } - return b, nil -} -func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - return b, nil -} -func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(*p)) - return b, nil -} -func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, uint32(v)) - } - return b, nil -} -func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float32bits(*ptr.toFloat32()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, v) - return b, nil -} -func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(*p)) - return b, nil -} -func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(4*len(s))) - for _, v := range s { - b = appendFixed32(b, math.Float32bits(v)) - } - return b, nil -} -func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, *p) - return b, nil -} -func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, v) - } - return b, nil -} -func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - return b, nil -} -func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(*p)) - return b, nil -} -func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, uint64(v)) - } - return b, nil -} -func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := math.Float64bits(*ptr.toFloat64()) - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, v) - return b, nil -} -func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toFloat64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(*p)) - return b, nil -} -func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toFloat64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(8*len(s))) - for _, v := range s { - b = appendFixed64(b, math.Float64bits(v)) - } - return b, nil -} -func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toUint64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - return b, nil -} -func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toUint64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, *p) - return b, nil -} -func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, v) - } - return b, nil -} -func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toUint64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(v) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, v) - } - return b, nil -} -func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - return b, nil -} -func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(*p)) - return b, nil -} -func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v)) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v)) - } - return b, nil -} -func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt32() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := ptr.getInt32Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - return b, nil -} -func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := ptr.getInt32Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) - } - return b, nil -} -func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toInt64() - if v == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toInt64Ptr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - v := *p - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - return b, nil -} -func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toInt64Slice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - // compute size - n := 0 - for _, v := range s { - n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) - } - b = appendVarint(b, uint64(n)) - for _, v := range s { - b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) - } - return b, nil -} -func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBool() - if !v { - return b, nil - } - b = appendVarint(b, wiretag) - b = append(b, 1) - return b, nil -} - -func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toBoolPtr() - if p == nil { - return b, nil - } - b = appendVarint(b, wiretag) - if *p { - b = append(b, 1) - } else { - b = append(b, 0) - } - return b, nil -} -func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBoolSlice() - if len(s) == 0 { - return b, nil - } - b = appendVarint(b, wiretag&^7|WireBytes) - b = appendVarint(b, uint64(len(s))) - for _, v := range s { - if v { - b = append(b, 1) - } else { - b = append(b, 0) - } - } - return b, nil -} -func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toString() - if v == "" { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toStringSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} -func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - v := *ptr.toString() - if v == "" { - return b, nil - } - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - p := *ptr.toStringPtr() - if p == nil { - return b, nil - } - v := *p - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - var invalidUTF8 bool - s := *ptr.toStringSlice() - for _, v := range s { - if !utf8.ValidString(v) { - invalidUTF8 = true - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - if invalidUTF8 { - return b, errInvalidUTF8 - } - return b, nil -} -func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if v == nil { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - if len(v) == 0 { - return b, nil - } - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - v := *ptr.toBytes() - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - return b, nil -} -func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { - s := *ptr.toBytesSlice() - for _, v := range s { - b = appendVarint(b, wiretag) - b = appendVarint(b, uint64(len(v))) - b = append(b, v...) - } - return b, nil -} - -// makeGroupMarshaler returns the sizer and marshaler for a group. -// u is the marshal info of the underlying message. -func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - return u.size(p) + 2*tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - var err error - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, p, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - return b, err - } -} - -// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. -// u is the marshal info of the underlying message. -func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - n += u.size(v) + 2*tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) // start group - b, err = u.marshal(b, v, deterministic) - b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMessageMarshaler returns the sizer and marshaler for a message field. -// u is the marshal info of the message. -func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.size(p) - return siz + SizeVarint(uint64(siz)) + tagsize - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - p := ptr.getPointer() - if p.isNil() { - return b, nil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(p) - b = appendVarint(b, uint64(siz)) - return u.marshal(b, p, deterministic) - } -} - -// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. -// u is the marshal info of the message. -func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { - return func(ptr pointer, tagsize int) int { - s := ptr.getPointerSlice() - n := 0 - for _, v := range s { - if v.isNil() { - continue - } - siz := u.size(v) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { - s := ptr.getPointerSlice() - var err error - var nerr nonFatal - for _, v := range s { - if v.isNil() { - return b, errRepeatedHasNil - } - b = appendVarint(b, wiretag) - siz := u.cachedsize(v) - b = appendVarint(b, uint64(siz)) - b, err = u.marshal(b, v, deterministic) - - if !nerr.Merge(err) { - if err == ErrNil { - err = errRepeatedHasNil - } - return b, err - } - } - return b, nerr.E - } -} - -// makeMapMarshaler returns the sizer and marshaler for a map field. -// f is the pointer to the reflect data structure of the field. -func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { - // figure out key and value type - t := f.Type - keyType := t.Key() - valType := t.Elem() - keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") - valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") - keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map - valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map - keyWireTag := 1<<3 | wiretype(keyTags[0]) - valWireTag := 2<<3 | wiretype(valTags[0]) - - // We create an interface to get the addresses of the map key and value. - // If value is pointer-typed, the interface is a direct interface, the - // idata itself is the value. Otherwise, the idata is the pointer to the - // value. - // Key cannot be pointer-typed. - valIsPtr := valType.Kind() == reflect.Ptr - - // If value is a message with nested maps, calling - // valSizer in marshal may be quadratic. We should use - // cached version in marshal (but not in size). - // If value is not message type, we don't have size cache, - // but it cannot be nested either. Just use valSizer. - valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { - u := getMarshalInfo(valType.Elem()) - valCachedSizer = func(ptr pointer, tagsize int) int { - // Same as message sizer, but use cache. - p := ptr.getPointer() - if p.isNil() { - return 0 - } - siz := u.cachedsize(p) - return siz + SizeVarint(uint64(siz)) + tagsize - } - } - return func(ptr pointer, tagsize int) int { - m := ptr.asPointerTo(t).Elem() // the map - n := 0 - for _, k := range m.MapKeys() { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - n += siz + SizeVarint(uint64(siz)) + tagsize - } - return n - }, - func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { - m := ptr.asPointerTo(t).Elem() // the map - var err error - keys := m.MapKeys() - if len(keys) > 1 && deterministic { - sort.Sort(mapKeys(keys)) - } - - var nerr nonFatal - for _, k := range keys { - ki := k.Interface() - vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value - b = appendVarint(b, tag) - siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) - b = appendVarint(b, uint64(siz)) - b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) - if !nerr.Merge(err) { - return b, err - } - b, err = valMarshaler(b, vaddr, valWireTag, deterministic) - if err != ErrNil && !nerr.Merge(err) { // allow nil value in map - return b, err - } - } - return b, nerr.E - } -} - -// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. -// fi is the marshal info of the field. -// f is the pointer to the reflect data structure of the field. -func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { - // Oneof field is an interface. We need to get the actual data type on the fly. - t := f.Type - return func(ptr pointer, _ int) int { - p := ptr.getInterfacePointer() - if p.isNil() { - return 0 - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - e := fi.oneofElems[telem] - return e.sizer(p, e.tagsize) - }, - func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { - p := ptr.getInterfacePointer() - if p.isNil() { - return b, nil - } - v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct - telem := v.Type() - if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { - return b, errOneofHasNil - } - e := fi.oneofElems[telem] - return e.marshaler(b, p, e.wiretag, deterministic) - } -} - -// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. -func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - mu.Unlock() - return n -} - -// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. -func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - // Not sure this is required, but the old code does it. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// message set format is: -// message MessageSet { -// repeated group Item = 1 { -// required int32 type_id = 2; -// required string message = 3; -// }; -// } - -// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field -// in message set format (above). -func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { - m, mu := ext.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - - n := 0 - for id, e := range m { - n += 2 // start group, end group. tag = 1 (size=1) - n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - siz := len(msgWithLen) - n += siz + 1 // message, tag = 3 (size=1) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, 1) // message, tag = 3 (size=1) - } - mu.Unlock() - return n -} - -// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) -// to the end of byte slice b. -func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { - m, mu := ext.extensionsRead() - if m == nil { - return b, nil - } - mu.Lock() - defer mu.Unlock() - - var err error - var nerr nonFatal - - // Fast-path for common cases: zero or one extensions. - // Don't bother sorting the keys. - if len(m) <= 1 { - for id, e := range m { - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - if !nerr.Merge(err) { - return b, err - } - b = append(b, 1<<3|WireEndGroup) - } - return b, nerr.E - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, id := range keys { - e := m[int32(id)] - b = append(b, 1<<3|WireStartGroup) - b = append(b, 2<<3|WireVarint) - b = appendVarint(b, uint64(id)) - - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint - b = append(b, 3<<3|WireBytes) - b = append(b, msgWithLen...) - b = append(b, 1<<3|WireEndGroup) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) - b = append(b, 1<<3|WireEndGroup) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// sizeV1Extensions computes the size of encoded data for a V1-API extension field. -func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { - if m == nil { - return 0 - } - - n := 0 - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - n += ei.sizer(p, ei.tagsize) - } - return n -} - -// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. -func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { - if m == nil { - return b, nil - } - - // Sort the keys to provide a deterministic encoding. - keys := make([]int, 0, len(m)) - for k := range m { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - var err error - var nerr nonFatal - for _, k := range keys { - e := m[int32(k)] - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - b = append(b, e.enc...) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - ei := u.getExtElemInfo(e.desc) - v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) - b, err = ei.marshaler(b, p, ei.wiretag, deterministic) - if !nerr.Merge(err) { - return b, err - } - } - return b, nerr.E -} - -// newMarshaler is the interface representing objects that can marshal themselves. -// -// This exists to support protoc-gen-go generated messages. -// The proto package will stop type-asserting to this interface in the future. -// -// DO NOT DEPEND ON THIS. -type newMarshaler interface { - XXX_Size() int - XXX_Marshal(b []byte, deterministic bool) ([]byte, error) -} - -// Size returns the encoded size of a protocol buffer message. -// This is the main entry point. -func Size(pb Message) int { - if m, ok := pb.(newMarshaler); ok { - return m.XXX_Size() - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, _ := m.Marshal() - return len(b) - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return 0 - } - var info InternalMessageInfo - return info.Size(pb) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, returning the data. -// This is the main entry point. -func Marshal(pb Message) ([]byte, error) { - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - b := make([]byte, 0, siz) - return m.XXX_Marshal(b, false) - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - return m.Marshal() - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return nil, ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - b := make([]byte, 0, siz) - return info.Marshal(b, pb, false) -} - -// Marshal takes a protocol buffer message -// and encodes it into the wire format, writing the result to the -// Buffer. -// This is an alternative entry point. It is not necessary to use -// a Buffer for most applications. -func (p *Buffer) Marshal(pb Message) error { - var err error - if m, ok := pb.(newMarshaler); ok { - siz := m.XXX_Size() - p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) - return err - } - if m, ok := pb.(Marshaler); ok { - // If the message can marshal itself, let it do it, for compatibility. - // NOTE: This is not efficient. - b, err := m.Marshal() - p.buf = append(p.buf, b...) - return err - } - // in case somehow we didn't generate the wrapper - if pb == nil { - return ErrNil - } - var info InternalMessageInfo - siz := info.Size(pb) - p.grow(siz) // make sure buf has enough capacity - p.buf, err = info.Marshal(p.buf, pb, p.deterministic) - return err -} - -// grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After grow(n), at least n bytes can be written to the -// buffer without another allocation. -func (p *Buffer) grow(n int) { - need := len(p.buf) + n - if need <= cap(p.buf) { - return - } - newCap := len(p.buf) * 2 - if newCap < need { - newCap = need - } - p.buf = append(make([]byte, 0, newCap), p.buf...) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go deleted file mode 100644 index 5525def..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ /dev/null @@ -1,654 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" -) - -// Merge merges the src message into dst. -// This assumes that dst and src of the same type and are non-nil. -func (a *InternalMessageInfo) Merge(dst, src Message) { - mi := atomicLoadMergeInfo(&a.merge) - if mi == nil { - mi = getMergeInfo(reflect.TypeOf(dst).Elem()) - atomicStoreMergeInfo(&a.merge, mi) - } - mi.merge(toPointer(&dst), toPointer(&src)) -} - -type mergeInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []mergeFieldInfo - unrecognized field // Offset of XXX_unrecognized -} - -type mergeFieldInfo struct { - field field // Offset of field, guaranteed to be valid - - // isPointer reports whether the value in the field is a pointer. - // This is true for the following situations: - // * Pointer to struct - // * Pointer to basic type (proto2 only) - // * Slice (first value in slice header is a pointer) - // * String (first value in string header is a pointer) - isPointer bool - - // basicWidth reports the width of the field assuming that it is directly - // embedded in the struct (as is the case for basic types in proto3). - // The possible values are: - // 0: invalid - // 1: bool - // 4: int32, uint32, float32 - // 8: int64, uint64, float64 - basicWidth int - - // Where dst and src are pointers to the types being merged. - merge func(dst, src pointer) -} - -var ( - mergeInfoMap = map[reflect.Type]*mergeInfo{} - mergeInfoLock sync.Mutex -) - -func getMergeInfo(t reflect.Type) *mergeInfo { - mergeInfoLock.Lock() - defer mergeInfoLock.Unlock() - mi := mergeInfoMap[t] - if mi == nil { - mi = &mergeInfo{typ: t} - mergeInfoMap[t] = mi - } - return mi -} - -// merge merges src into dst assuming they are both of type *mi.typ. -func (mi *mergeInfo) merge(dst, src pointer) { - if dst.isNil() { - panic("proto: nil destination") - } - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&mi.initialized) == 0 { - mi.computeMergeInfo() - } - - for _, fi := range mi.fields { - sfp := src.offset(fi.field) - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string - continue - } - if fi.basicWidth > 0 { - switch { - case fi.basicWidth == 1 && !*sfp.toBool(): - continue - case fi.basicWidth == 4 && *sfp.toUint32() == 0: - continue - case fi.basicWidth == 8 && *sfp.toUint64() == 0: - continue - } - } - } - - dfp := dst.offset(fi.field) - fi.merge(dfp, sfp) - } - - // TODO: Make this faster? - out := dst.asPointerTo(mi.typ).Elem() - in := src.asPointerTo(mi.typ).Elem() - if emIn, err := extendable(in.Addr().Interface()); err == nil { - emOut, _ := extendable(out.Addr().Interface()) - mIn, muIn := emIn.extensionsRead() - if mIn != nil { - mOut := emOut.extensionsWrite() - muIn.Lock() - mergeExtension(mOut, mIn) - muIn.Unlock() - } - } - - if mi.unrecognized.IsValid() { - if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { - *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) - } - } -} - -func (mi *mergeInfo) computeMergeInfo() { - mi.lock.Lock() - defer mi.lock.Unlock() - if mi.initialized != 0 { - return - } - t := mi.typ - n := t.NumField() - - props := GetProperties(t) - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - mfi := mergeFieldInfo{field: toField(&f)} - tf := f.Type - - // As an optimization, we can avoid the merge function call cost - // if we know for sure that the source will have no effect - // by checking if it is the zero value. - if unsafeAllowed { - switch tf.Kind() { - case reflect.Ptr, reflect.Slice, reflect.String: - // As a special case, we assume slices and strings are pointers - // since we know that the first field in the SliceSlice or - // StringHeader is a data pointer. - mfi.isPointer = true - case reflect.Bool: - mfi.basicWidth = 1 - case reflect.Int32, reflect.Uint32, reflect.Float32: - mfi.basicWidth = 4 - case reflect.Int64, reflect.Uint64, reflect.Float64: - mfi.basicWidth = 8 - } - } - - // Unwrap tf to get at its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + tf.Name()) - } - - switch tf.Kind() { - case reflect.Int32: - switch { - case isSlice: // E.g., []int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Slice is not defined (see pointer_reflect.go). - /* - sfsp := src.toInt32Slice() - if *sfsp != nil { - dfsp := dst.toInt32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - */ - sfs := src.getInt32Slice() - if sfs != nil { - dfs := dst.getInt32Slice() - dfs = append(dfs, sfs...) - if dfs == nil { - dfs = []int32{} - } - dst.setInt32Slice(dfs) - } - } - case isPointer: // E.g., *int32 - mfi.merge = func(dst, src pointer) { - // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). - /* - sfpp := src.toInt32Ptr() - if *sfpp != nil { - dfpp := dst.toInt32Ptr() - if *dfpp == nil { - *dfpp = Int32(**sfpp) - } else { - **dfpp = **sfpp - } - } - */ - sfp := src.getInt32Ptr() - if sfp != nil { - dfp := dst.getInt32Ptr() - if dfp == nil { - dst.setInt32Ptr(*sfp) - } else { - *dfp = *sfp - } - } - } - default: // E.g., int32 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt32(); v != 0 { - *dst.toInt32() = v - } - } - } - case reflect.Int64: - switch { - case isSlice: // E.g., []int64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toInt64Slice() - if *sfsp != nil { - dfsp := dst.toInt64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []int64{} - } - } - } - case isPointer: // E.g., *int64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toInt64Ptr() - if *sfpp != nil { - dfpp := dst.toInt64Ptr() - if *dfpp == nil { - *dfpp = Int64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., int64 - mfi.merge = func(dst, src pointer) { - if v := *src.toInt64(); v != 0 { - *dst.toInt64() = v - } - } - } - case reflect.Uint32: - switch { - case isSlice: // E.g., []uint32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint32Slice() - if *sfsp != nil { - dfsp := dst.toUint32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint32{} - } - } - } - case isPointer: // E.g., *uint32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint32Ptr() - if *sfpp != nil { - dfpp := dst.toUint32Ptr() - if *dfpp == nil { - *dfpp = Uint32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint32 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint32(); v != 0 { - *dst.toUint32() = v - } - } - } - case reflect.Uint64: - switch { - case isSlice: // E.g., []uint64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toUint64Slice() - if *sfsp != nil { - dfsp := dst.toUint64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []uint64{} - } - } - } - case isPointer: // E.g., *uint64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toUint64Ptr() - if *sfpp != nil { - dfpp := dst.toUint64Ptr() - if *dfpp == nil { - *dfpp = Uint64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., uint64 - mfi.merge = func(dst, src pointer) { - if v := *src.toUint64(); v != 0 { - *dst.toUint64() = v - } - } - } - case reflect.Float32: - switch { - case isSlice: // E.g., []float32 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat32Slice() - if *sfsp != nil { - dfsp := dst.toFloat32Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float32{} - } - } - } - case isPointer: // E.g., *float32 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat32Ptr() - if *sfpp != nil { - dfpp := dst.toFloat32Ptr() - if *dfpp == nil { - *dfpp = Float32(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float32 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat32(); v != 0 { - *dst.toFloat32() = v - } - } - } - case reflect.Float64: - switch { - case isSlice: // E.g., []float64 - mfi.merge = func(dst, src pointer) { - sfsp := src.toFloat64Slice() - if *sfsp != nil { - dfsp := dst.toFloat64Slice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []float64{} - } - } - } - case isPointer: // E.g., *float64 - mfi.merge = func(dst, src pointer) { - sfpp := src.toFloat64Ptr() - if *sfpp != nil { - dfpp := dst.toFloat64Ptr() - if *dfpp == nil { - *dfpp = Float64(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., float64 - mfi.merge = func(dst, src pointer) { - if v := *src.toFloat64(); v != 0 { - *dst.toFloat64() = v - } - } - } - case reflect.Bool: - switch { - case isSlice: // E.g., []bool - mfi.merge = func(dst, src pointer) { - sfsp := src.toBoolSlice() - if *sfsp != nil { - dfsp := dst.toBoolSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []bool{} - } - } - } - case isPointer: // E.g., *bool - mfi.merge = func(dst, src pointer) { - sfpp := src.toBoolPtr() - if *sfpp != nil { - dfpp := dst.toBoolPtr() - if *dfpp == nil { - *dfpp = Bool(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., bool - mfi.merge = func(dst, src pointer) { - if v := *src.toBool(); v { - *dst.toBool() = v - } - } - } - case reflect.String: - switch { - case isSlice: // E.g., []string - mfi.merge = func(dst, src pointer) { - sfsp := src.toStringSlice() - if *sfsp != nil { - dfsp := dst.toStringSlice() - *dfsp = append(*dfsp, *sfsp...) - if *dfsp == nil { - *dfsp = []string{} - } - } - } - case isPointer: // E.g., *string - mfi.merge = func(dst, src pointer) { - sfpp := src.toStringPtr() - if *sfpp != nil { - dfpp := dst.toStringPtr() - if *dfpp == nil { - *dfpp = String(**sfpp) - } else { - **dfpp = **sfpp - } - } - } - default: // E.g., string - mfi.merge = func(dst, src pointer) { - if v := *src.toString(); v != "" { - *dst.toString() = v - } - } - } - case reflect.Slice: - isProto3 := props.Prop[i].proto3 - switch { - case isPointer: - panic("bad pointer in byte slice case in " + tf.Name()) - case tf.Elem().Kind() != reflect.Uint8: - panic("bad element kind in byte slice case in " + tf.Name()) - case isSlice: // E.g., [][]byte - mfi.merge = func(dst, src pointer) { - sbsp := src.toBytesSlice() - if *sbsp != nil { - dbsp := dst.toBytesSlice() - for _, sb := range *sbsp { - if sb == nil { - *dbsp = append(*dbsp, nil) - } else { - *dbsp = append(*dbsp, append([]byte{}, sb...)) - } - } - if *dbsp == nil { - *dbsp = [][]byte{} - } - } - } - default: // E.g., []byte - mfi.merge = func(dst, src pointer) { - sbp := src.toBytes() - if *sbp != nil { - dbp := dst.toBytes() - if !isProto3 || len(*sbp) > 0 { - *dbp = append([]byte{}, *sbp...) - } - } - } - } - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) - case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sps := src.getPointerSlice() - if sps != nil { - dps := dst.getPointerSlice() - for _, sp := range sps { - var dp pointer - if !sp.isNil() { - dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) - } - dps = append(dps, dp) - } - if dps == nil { - dps = []pointer{} - } - dst.setPointerSlice(dps) - } - } - default: // E.g., *pb.T - mi := getMergeInfo(tf) - mfi.merge = func(dst, src pointer) { - sp := src.getPointer() - if !sp.isNil() { - dp := dst.getPointer() - if dp.isNil() { - dp = valToPointer(reflect.New(tf)) - dst.setPointer(dp) - } - mi.merge(dp, sp) - } - } - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic("bad pointer or slice in map case in " + tf.Name()) - default: // E.g., map[K]V - mfi.merge = func(dst, src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - dm := dst.asPointerTo(tf).Elem() - if dm.IsNil() { - dm.Set(reflect.MakeMap(tf)) - } - - switch tf.Elem().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(Clone(val.Interface().(Message))) - dm.SetMapIndex(key, val) - } - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) - dm.SetMapIndex(key, val) - } - default: // Basic type (e.g., string) - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - dm.SetMapIndex(key, val) - } - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic("bad pointer or slice in interface case in " + tf.Name()) - default: // E.g., interface{} - // TODO: Make this faster? - mfi.merge = func(dst, src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - du := dst.asPointerTo(tf).Elem() - typ := su.Elem().Type() - if du.IsNil() || du.Elem().Type() != typ { - du.Set(reflect.New(typ.Elem())) // Initialize interface if empty - } - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - dv := du.Elem().Elem().Field(0) - if dv.Kind() == reflect.Ptr && dv.IsNil() { - dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - Merge(dv.Interface().(Message), sv.Interface().(Message)) - case reflect.Slice: // E.g. Bytes type (e.g., []byte) - dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) - default: // Basic type (e.g., string) - dv.Set(sv) - } - } - } - } - default: - panic(fmt.Sprintf("merger not found for type:%s", tf)) - } - mi.fields = append(mi.fields, mfi) - } - - mi.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - mi.unrecognized = toField(&f) - } - - atomic.StoreInt32(&mi.initialized, 1) -} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go deleted file mode 100644 index acee2fc..0000000 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ /dev/null @@ -1,2053 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -import ( - "errors" - "fmt" - "io" - "math" - "reflect" - "strconv" - "strings" - "sync" - "sync/atomic" - "unicode/utf8" -) - -// Unmarshal is the entry point from the generated .pb.go files. -// This function is not intended to be used by non-generated code. -// This function is not subject to any compatibility guarantee. -// msg contains a pointer to a protocol buffer struct. -// b is the data to be unmarshaled into the protocol buffer. -// a is a pointer to a place to store cached unmarshal information. -func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { - // Load the unmarshal information for this message type. - // The atomic load ensures memory consistency. - u := atomicLoadUnmarshalInfo(&a.unmarshal) - if u == nil { - // Slow path: find unmarshal info for msg, update a with it. - u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) - atomicStoreUnmarshalInfo(&a.unmarshal, u) - } - // Then do the unmarshaling. - err := u.unmarshal(toPointer(&msg), b) - return err -} - -type unmarshalInfo struct { - typ reflect.Type // type of the protobuf struct - - // 0 = only typ field is initialized - // 1 = completely initialized - initialized int32 - lock sync.Mutex // prevents double initialization - dense []unmarshalFieldInfo // fields indexed by tag # - sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # - reqFields []string // names of required fields - reqMask uint64 // 1< 0 { - // Read tag and wire type. - // Special case 1 and 2 byte varints. - var x uint64 - if b[0] < 128 { - x = uint64(b[0]) - b = b[1:] - } else if len(b) >= 2 && b[1] < 128 { - x = uint64(b[0]&0x7f) + uint64(b[1])<<7 - b = b[2:] - } else { - var n int - x, n = decodeVarint(b) - if n == 0 { - return io.ErrUnexpectedEOF - } - b = b[n:] - } - tag := x >> 3 - wire := int(x) & 7 - - // Dispatch on the tag to one of the unmarshal* functions below. - var f unmarshalFieldInfo - if tag < uint64(len(u.dense)) { - f = u.dense[tag] - } else { - f = u.sparse[tag] - } - if fn := f.unmarshal; fn != nil { - var err error - b, err = fn(b, m.offset(f.field), wire) - if err == nil { - reqMask |= f.reqMask - continue - } - if r, ok := err.(*RequiredNotSetError); ok { - // Remember this error, but keep parsing. We need to produce - // a full parse even if a required field is missing. - if errLater == nil { - errLater = r - } - reqMask |= f.reqMask - continue - } - if err != errInternalBadWireType { - if err == errInvalidUTF8 { - if errLater == nil { - fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name - errLater = &invalidUTF8Error{fullName} - } - continue - } - return err - } - // Fragments with bad wire type are treated as unknown fields. - } - - // Unknown tag. - if !u.unrecognized.IsValid() { - // Don't keep unrecognized data; just skip it. - var err error - b, err = skipField(b, wire) - if err != nil { - return err - } - continue - } - // Keep unrecognized data around. - // maybe in extensions, maybe in the unrecognized field. - z := m.offset(u.unrecognized).toBytes() - var emap map[int32]Extension - var e Extension - for _, r := range u.extensionRanges { - if uint64(r.Start) <= tag && tag <= uint64(r.End) { - if u.extensions.IsValid() { - mp := m.offset(u.extensions).toExtensions() - emap = mp.extensionsWrite() - e = emap[int32(tag)] - z = &e.enc - break - } - if u.oldExtensions.IsValid() { - p := m.offset(u.oldExtensions).toOldExtensions() - emap = *p - if emap == nil { - emap = map[int32]Extension{} - *p = emap - } - e = emap[int32(tag)] - z = &e.enc - break - } - panic("no extensions field available") - } - } - - // Use wire type to skip data. - var err error - b0 := b - b, err = skipField(b, wire) - if err != nil { - return err - } - *z = encodeVarint(*z, tag<<3|uint64(wire)) - *z = append(*z, b0[:len(b0)-len(b)]...) - - if emap != nil { - emap[int32(tag)] = e - } - } - if reqMask != u.reqMask && errLater == nil { - // A required field of this message is missing. - for _, n := range u.reqFields { - if reqMask&1 == 0 { - errLater = &RequiredNotSetError{n} - } - reqMask >>= 1 - } - } - return errLater -} - -// computeUnmarshalInfo fills in u with information for use -// in unmarshaling protocol buffers of type u.typ. -func (u *unmarshalInfo) computeUnmarshalInfo() { - u.lock.Lock() - defer u.lock.Unlock() - if u.initialized != 0 { - return - } - t := u.typ - n := t.NumField() - - // Set up the "not found" value for the unrecognized byte buffer. - // This is the default for proto3. - u.unrecognized = invalidField - u.extensions = invalidField - u.oldExtensions = invalidField - - // List of the generated type and offset for each oneof field. - type oneofField struct { - ityp reflect.Type // interface type of oneof field - field field // offset in containing message - } - var oneofFields []oneofField - - for i := 0; i < n; i++ { - f := t.Field(i) - if f.Name == "XXX_unrecognized" { - // The byte slice used to hold unrecognized input is special. - if f.Type != reflect.TypeOf(([]byte)(nil)) { - panic("bad type for XXX_unrecognized field: " + f.Type.Name()) - } - u.unrecognized = toField(&f) - continue - } - if f.Name == "XXX_InternalExtensions" { - // Ditto here. - if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { - panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) - } - u.extensions = toField(&f) - if f.Tag.Get("protobuf_messageset") == "1" { - u.isMessageSet = true - } - continue - } - if f.Name == "XXX_extensions" { - // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) - } - u.oldExtensions = toField(&f) - continue - } - if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { - continue - } - - oneof := f.Tag.Get("protobuf_oneof") - if oneof != "" { - oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) - // The rest of oneof processing happens below. - continue - } - - tags := f.Tag.Get("protobuf") - tagArray := strings.Split(tags, ",") - if len(tagArray) < 2 { - panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) - } - tag, err := strconv.Atoi(tagArray[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tagArray[1]) - } - - name := "" - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - } - - // Extract unmarshaling function from the field (its type and tags). - unmarshal := fieldUnmarshaler(&f) - - // Required field? - var reqMask uint64 - if tagArray[2] == "req" { - bit := len(u.reqFields) - u.reqFields = append(u.reqFields, name) - reqMask = uint64(1) << uint(bit) - // TODO: if we have more than 64 required fields, we end up - // not verifying that all required fields are present. - // Fix this, perhaps using a count of required fields? - } - - // Store the info in the correct slot in the message. - u.setTag(tag, toField(&f), unmarshal, reqMask, name) - } - - // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } - } - - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } - } - - } - - // Get extension ranges, if any. - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") - if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { - panic("a message with extensions, but no extensions field in " + t.Name()) - } - u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) - } - - // Explicitly disallow tag 0. This will ensure we flag an error - // when decoding a buffer of all zeros. Without this code, we - // would decode and skip an all-zero buffer of even length. - // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. - u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { - return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) - }, 0, "") - - // Set mask for required field check. - u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? - for len(u.dense) <= tag { - u.dense = append(u.dense, unmarshalFieldInfo{}) - } - u.dense[tag] = i - return - } - if u.sparse == nil { - u.sparse = map[uint64]unmarshalFieldInfo{} - } - u.sparse[uint64(tag)] = i -} - -// fieldUnmarshaler returns an unmarshaler for the given field. -func fieldUnmarshaler(f *reflect.StructField) unmarshaler { - if f.Type.Kind() == reflect.Map { - return makeUnmarshalMap(f) - } - return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) -} - -// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. -func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { - tagArray := strings.Split(tags, ",") - encoding := tagArray[0] - name := "unknown" - proto3 := false - validateUTF8 := true - for _, tag := range tagArray[3:] { - if strings.HasPrefix(tag, "name=") { - name = tag[5:] - } - if tag == "proto3" { - proto3 = true - } - } - validateUTF8 = validateUTF8 && proto3 - - // Figure out packaging (pointer, slice, or both) - slice := false - pointer := false - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - slice = true - t = t.Elem() - } - if t.Kind() == reflect.Ptr { - pointer = true - t = t.Elem() - } - - // We'll never have both pointer and slice for basic types. - if pointer && slice && t.Kind() != reflect.Struct { - panic("both pointer and slice for basic type in " + t.Name()) - } - - switch t.Kind() { - case reflect.Bool: - if pointer { - return unmarshalBoolPtr - } - if slice { - return unmarshalBoolSlice - } - return unmarshalBoolValue - case reflect.Int32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixedS32Ptr - } - if slice { - return unmarshalFixedS32Slice - } - return unmarshalFixedS32Value - case "varint": - // this could be int32 or enum - if pointer { - return unmarshalInt32Ptr - } - if slice { - return unmarshalInt32Slice - } - return unmarshalInt32Value - case "zigzag32": - if pointer { - return unmarshalSint32Ptr - } - if slice { - return unmarshalSint32Slice - } - return unmarshalSint32Value - } - case reflect.Int64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixedS64Ptr - } - if slice { - return unmarshalFixedS64Slice - } - return unmarshalFixedS64Value - case "varint": - if pointer { - return unmarshalInt64Ptr - } - if slice { - return unmarshalInt64Slice - } - return unmarshalInt64Value - case "zigzag64": - if pointer { - return unmarshalSint64Ptr - } - if slice { - return unmarshalSint64Slice - } - return unmarshalSint64Value - } - case reflect.Uint32: - switch encoding { - case "fixed32": - if pointer { - return unmarshalFixed32Ptr - } - if slice { - return unmarshalFixed32Slice - } - return unmarshalFixed32Value - case "varint": - if pointer { - return unmarshalUint32Ptr - } - if slice { - return unmarshalUint32Slice - } - return unmarshalUint32Value - } - case reflect.Uint64: - switch encoding { - case "fixed64": - if pointer { - return unmarshalFixed64Ptr - } - if slice { - return unmarshalFixed64Slice - } - return unmarshalFixed64Value - case "varint": - if pointer { - return unmarshalUint64Ptr - } - if slice { - return unmarshalUint64Slice - } - return unmarshalUint64Value - } - case reflect.Float32: - if pointer { - return unmarshalFloat32Ptr - } - if slice { - return unmarshalFloat32Slice - } - return unmarshalFloat32Value - case reflect.Float64: - if pointer { - return unmarshalFloat64Ptr - } - if slice { - return unmarshalFloat64Slice - } - return unmarshalFloat64Value - case reflect.Map: - panic("map type in typeUnmarshaler in " + t.Name()) - case reflect.Slice: - if pointer { - panic("bad pointer in slice case in " + t.Name()) - } - if slice { - return unmarshalBytesSlice - } - return unmarshalBytesValue - case reflect.String: - if validateUTF8 { - if pointer { - return unmarshalUTF8StringPtr - } - if slice { - return unmarshalUTF8StringSlice - } - return unmarshalUTF8StringValue - } - if pointer { - return unmarshalStringPtr - } - if slice { - return unmarshalStringSlice - } - return unmarshalStringValue - case reflect.Struct: - // message or group field - if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) - } - switch encoding { - case "bytes": - if slice { - return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) - case "group": - if slice { - return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) - } - return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) - } - } - panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) -} - -// Below are all the unmarshalers for individual fields of various types. - -func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64() = v - return b, nil -} - -func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x) - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64() = v - return b, nil -} - -func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - *f.toInt64Ptr() = &v - return b, nil -} - -func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int64(x>>1) ^ int64(x)<<63>>63 - s := f.toInt64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64() = v - return b, nil -} - -func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - *f.toUint64Ptr() = &v - return b, nil -} - -func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint64(x) - s := f.toUint64Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - *f.toInt32() = v - return b, nil -} - -func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x) - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - *f.toInt32() = v - return b, nil -} - -func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.setInt32Ptr(v) - return b, nil -} - -func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := int32(x>>1) ^ int32(x)<<31>>31 - f.appendInt32Slice(v) - return b, nil -} - -func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32() = v - return b, nil -} - -func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - *f.toUint32Ptr() = &v - return b, nil -} - -func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - v := uint32(x) - s := f.toUint32Slice() - *s = append(*s, v) - return b, nil -} - -func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64() = v - return b[8:], nil -} - -func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - *f.toUint64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 - s := f.toUint64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64() = v - return b[8:], nil -} - -func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - *f.toInt64Ptr() = &v - return b[8:], nil -} - -func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 - s := f.toInt64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32() = v - return b[4:], nil -} - -func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - *f.toUint32Ptr() = &v - return b[4:], nil -} - -func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 - s := f.toUint32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - *f.toInt32() = v - return b[4:], nil -} - -func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.setInt32Ptr(v) - return b[4:], nil -} - -func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 - f.appendInt32Slice(v) - return b[4:], nil -} - -func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - // Note: any length varint is allowed, even though any sane - // encoder will use one byte. - // See https://github.com/golang/protobuf/issues/76 - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - // TODO: check if x>1? Tests seem to indicate no. - v := x != 0 - *f.toBool() = v - return b[n:], nil -} - -func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - *f.toBoolPtr() = &v - return b[n:], nil -} - -func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - x, n = decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - b = b[n:] - } - return res, nil - } - if w != WireVarint { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - v := x != 0 - s := f.toBoolSlice() - *s = append(*s, v) - return b[n:], nil -} - -func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64() = v - return b[8:], nil -} - -func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - *f.toFloat64Ptr() = &v - return b[8:], nil -} - -func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - b = b[8:] - } - return res, nil - } - if w != WireFixed64 { - return b, errInternalBadWireType - } - if len(b) < 8 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) - s := f.toFloat64Slice() - *s = append(*s, v) - return b[8:], nil -} - -func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32() = v - return b[4:], nil -} - -func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - *f.toFloat32Ptr() = &v - return b[4:], nil -} - -func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { - if w == WireBytes { // packed - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - res := b[x:] - b = b[:x] - for len(b) > 0 { - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - b = b[4:] - } - return res, nil - } - if w != WireFixed32 { - return b, errInternalBadWireType - } - if len(b) < 4 { - return nil, io.ErrUnexpectedEOF - } - v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) - s := f.toFloat32Slice() - *s = append(*s, v) - return b[4:], nil -} - -func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - return b[x:], nil -} - -func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - return b[x:], nil -} - -func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - return b[x:], nil -} - -func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toString() = v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - *f.toStringPtr() = &v - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := string(b[:x]) - s := f.toStringSlice() - *s = append(*s, v) - if !utf8.ValidString(v) { - return b[x:], errInvalidUTF8 - } - return b[x:], nil -} - -var emptyBuf [0]byte - -func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // The use of append here is a trick which avoids the zeroing - // that would be required if we used a make/copy pair. - // We append to emptyBuf instead of nil because we want - // a non-nil result even when the length is 0. - v := append(emptyBuf[:], b[:x]...) - *f.toBytes() = v - return b[x:], nil -} - -func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := append(emptyBuf[:], b[:x]...) - s := f.toBytesSlice() - *s = append(*s, v) - return b[x:], nil -} - -func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - // First read the message field to see if something is there. - // The semantics of multiple submessages are weird. Instead of - // the last one winning (as it is for all other fields), multiple - // submessages are merged. - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[x:], err - } -} - -func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireBytes { - return b, errInternalBadWireType - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[x:], err - } -} - -func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := f.getPointer() - if v.isNil() { - v = valToPointer(reflect.New(sub.typ)) - f.setPointer(v) - } - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - return b[y:], err - } -} - -func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { - return func(b []byte, f pointer, w int) ([]byte, error) { - if w != WireStartGroup { - return b, errInternalBadWireType - } - x, y := findEndGroup(b) - if x < 0 { - return nil, io.ErrUnexpectedEOF - } - v := valToPointer(reflect.New(sub.typ)) - err := sub.unmarshal(v, b[:x]) - if err != nil { - if r, ok := err.(*RequiredNotSetError); ok { - r.field = name + "." + r.field - } else { - return nil, err - } - } - f.appendPointer(v) - return b[y:], err - } -} - -func makeUnmarshalMap(f *reflect.StructField) unmarshaler { - t := f.Type - kt := t.Key() - vt := t.Elem() - unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) - return func(b []byte, f pointer, w int) ([]byte, error) { - // The map entry is a submessage. Figure out how big it is. - if w != WireBytes { - return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) - } - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - if x > uint64(len(b)) { - return nil, io.ErrUnexpectedEOF - } - r := b[x:] // unused data to return - b = b[:x] // data for map entry - - // Note: we could use #keys * #values ~= 200 functions - // to do map decoding without reflection. Probably not worth it. - // Maps will be somewhat slow. Oh well. - - // Read key and value from data. - var nerr nonFatal - k := reflect.New(kt) - v := reflect.New(vt) - for len(b) > 0 { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - wire := int(x) & 7 - b = b[n:] - - var err error - switch x >> 3 { - case 1: - b, err = unmarshalKey(b, valToPointer(k), wire) - case 2: - b, err = unmarshalVal(b, valToPointer(v), wire) - default: - err = errInternalBadWireType // skip unknown tag - } - - if nerr.Merge(err) { - continue - } - if err != errInternalBadWireType { - return nil, err - } - - // Skip past unknown fields. - b, err = skipField(b, wire) - if err != nil { - return nil, err - } - } - - // Get map, allocate if needed. - m := f.asPointerTo(t).Elem() // an addressable map[K]T - if m.IsNil() { - m.Set(reflect.MakeMap(t)) - } - - // Insert into map. - m.SetMapIndex(k.Elem(), v.Elem()) - - return r, nerr.E - } -} - -// makeUnmarshalOneof makes an unmarshaler for oneof fields. -// for: -// message Msg { -// oneof F { -// int64 X = 1; -// float64 Y = 2; -// } -// } -// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). -// ityp is the interface type of the oneof field (e.g. isMsg_F). -// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). -// Note that this function will be called once for each case in the oneof. -func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { - sf := typ.Field(0) - field0 := toField(&sf) - return func(b []byte, f pointer, w int) ([]byte, error) { - // Allocate holder for value. - v := reflect.New(typ) - - // Unmarshal data into holder. - // We unmarshal into the first field of the holder object. - var err error - var nerr nonFatal - b, err = unmarshal(b, valToPointer(v).offset(field0), w) - if !nerr.Merge(err) { - return nil, err - } - - // Write pointer to holder into target field. - f.asPointerTo(ityp).Elem().Set(v) - - return b, nerr.E - } -} - -// Error used by decode internally. -var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") - -// skipField skips past a field of type wire and returns the remaining bytes. -func skipField(b []byte, wire int) ([]byte, error) { - switch wire { - case WireVarint: - _, k := decodeVarint(b) - if k == 0 { - return b, io.ErrUnexpectedEOF - } - b = b[k:] - case WireFixed32: - if len(b) < 4 { - return b, io.ErrUnexpectedEOF - } - b = b[4:] - case WireFixed64: - if len(b) < 8 { - return b, io.ErrUnexpectedEOF - } - b = b[8:] - case WireBytes: - m, k := decodeVarint(b) - if k == 0 || uint64(len(b)-k) < m { - return b, io.ErrUnexpectedEOF - } - b = b[uint64(k)+m:] - case WireStartGroup: - _, i := findEndGroup(b) - if i == -1 { - return b, io.ErrUnexpectedEOF - } - b = b[i:] - default: - return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) - } - return b, nil -} - -// findEndGroup finds the index of the next EndGroup tag. -// Groups may be nested, so the "next" EndGroup tag is the first -// unpaired EndGroup. -// findEndGroup returns the indexes of the start and end of the EndGroup tag. -// Returns (-1,-1) if it can't find one. -func findEndGroup(b []byte) (int, int) { - depth := 1 - i := 0 - for { - x, n := decodeVarint(b[i:]) - if n == 0 { - return -1, -1 - } - j := i - i += n - switch x & 7 { - case WireVarint: - _, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - case WireFixed32: - if len(b)-4 < i { - return -1, -1 - } - i += 4 - case WireFixed64: - if len(b)-8 < i { - return -1, -1 - } - i += 8 - case WireBytes: - m, k := decodeVarint(b[i:]) - if k == 0 { - return -1, -1 - } - i += k - if uint64(len(b)-i) < m { - return -1, -1 - } - i += int(m) - case WireStartGroup: - depth++ - case WireEndGroup: - depth-- - if depth == 0 { - return j, i - } - default: - return -1, -1 - } - } -} - -// encodeVarint appends a varint-encoded integer to b and returns the result. -func encodeVarint(b []byte, x uint64) []byte { - for x >= 1<<7 { - b = append(b, byte(x&0x7f|0x80)) - x >>= 7 - } - return append(b, byte(x)) -} - -// decodeVarint reads a varint-encoded integer from b. -// Returns the decoded integer and the number of bytes read. -// If there is an error, it returns 0,0. -func decodeVarint(b []byte) (uint64, int) { - var x, y uint64 - if len(b) == 0 { - goto bad - } - x = uint64(b[0]) - if x < 0x80 { - return x, 1 - } - x -= 0x80 - - if len(b) <= 1 { - goto bad - } - y = uint64(b[1]) - x += y << 7 - if y < 0x80 { - return x, 2 - } - x -= 0x80 << 7 - - if len(b) <= 2 { - goto bad - } - y = uint64(b[2]) - x += y << 14 - if y < 0x80 { - return x, 3 - } - x -= 0x80 << 14 - - if len(b) <= 3 { - goto bad - } - y = uint64(b[3]) - x += y << 21 - if y < 0x80 { - return x, 4 - } - x -= 0x80 << 21 - - if len(b) <= 4 { - goto bad - } - y = uint64(b[4]) - x += y << 28 - if y < 0x80 { - return x, 5 - } - x -= 0x80 << 28 - - if len(b) <= 5 { - goto bad - } - y = uint64(b[5]) - x += y << 35 - if y < 0x80 { - return x, 6 - } - x -= 0x80 << 35 - - if len(b) <= 6 { - goto bad - } - y = uint64(b[6]) - x += y << 42 - if y < 0x80 { - return x, 7 - } - x -= 0x80 << 42 - - if len(b) <= 7 { - goto bad - } - y = uint64(b[7]) - x += y << 49 - if y < 0x80 { - return x, 8 - } - x -= 0x80 << 49 - - if len(b) <= 8 { - goto bad - } - y = uint64(b[8]) - x += y << 56 - if y < 0x80 { - return x, 9 - } - x -= 0x80 << 56 - - if len(b) <= 9 { - goto bad - } - y = uint64(b[9]) - x += y << 63 - if y < 2 { - return x, 10 - } - -bad: - return 0, 0 -} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go deleted file mode 100644 index 1aaee72..0000000 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ /dev/null @@ -1,843 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for writing the text protocol buffer format. - -import ( - "bufio" - "bytes" - "encoding" - "errors" - "fmt" - "io" - "log" - "math" - "reflect" - "sort" - "strings" -) - -var ( - newline = []byte("\n") - spaces = []byte(" ") - endBraceNewline = []byte("}\n") - backslashN = []byte{'\\', 'n'} - backslashR = []byte{'\\', 'r'} - backslashT = []byte{'\\', 't'} - backslashDQ = []byte{'\\', '"'} - backslashBS = []byte{'\\', '\\'} - posInf = []byte("inf") - negInf = []byte("-inf") - nan = []byte("nan") -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -// textWriter is an io.Writer that tracks its indentation level. -type textWriter struct { - ind int - complete bool // if the current position is a complete line - compact bool // whether to write out as a one-liner - w writer -} - -func (w *textWriter) WriteString(s string) (n int, err error) { - if !strings.Contains(s, "\n") { - if !w.compact && w.complete { - w.writeIndent() - } - w.complete = false - return io.WriteString(w.w, s) - } - // WriteString is typically called without newlines, so this - // codepath and its copy are rare. We copy to avoid - // duplicating all of Write's logic here. - return w.Write([]byte(s)) -} - -func (w *textWriter) Write(p []byte) (n int, err error) { - newlines := bytes.Count(p, newline) - if newlines == 0 { - if !w.compact && w.complete { - w.writeIndent() - } - n, err = w.w.Write(p) - w.complete = false - return n, err - } - - frags := bytes.SplitN(p, newline, newlines+1) - if w.compact { - for i, frag := range frags { - if i > 0 { - if err := w.w.WriteByte(' '); err != nil { - return n, err - } - n++ - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - } - return n, nil - } - - for i, frag := range frags { - if w.complete { - w.writeIndent() - } - nn, err := w.w.Write(frag) - n += nn - if err != nil { - return n, err - } - if i+1 < len(frags) { - if err := w.w.WriteByte('\n'); err != nil { - return n, err - } - n++ - } - } - w.complete = len(frags[len(frags)-1]) == 0 - return n, nil -} - -func (w *textWriter) WriteByte(c byte) error { - if w.compact && c == '\n' { - c = ' ' - } - if !w.compact && w.complete { - w.writeIndent() - } - err := w.w.WriteByte(c) - w.complete = c == '\n' - return err -} - -func (w *textWriter) indent() { w.ind++ } - -func (w *textWriter) unindent() { - if w.ind == 0 { - log.Print("proto: textWriter unindented too far") - return - } - w.ind-- -} - -func writeName(w *textWriter, props *Properties) error { - if _, err := w.WriteString(props.OrigName); err != nil { - return err - } - if props.Wire != "group" { - return w.WriteByte(':') - } - return nil -} - -func requiresQuotes(u string) bool { - // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. - for _, ch := range u { - switch { - case ch == '.' || ch == '/' || ch == '_': - continue - case '0' <= ch && ch <= '9': - continue - case 'A' <= ch && ch <= 'Z': - continue - case 'a' <= ch && ch <= 'z': - continue - default: - return true - } - } - return false -} - -// isAny reports whether sv is a google.protobuf.Any message -func isAny(sv reflect.Value) bool { - type wkt interface { - XXX_WellKnownType() string - } - t, ok := sv.Addr().Interface().(wkt) - return ok && t.XXX_WellKnownType() == "Any" -} - -// writeProto3Any writes an expanded google.protobuf.Any message. -// -// It returns (false, nil) if sv value can't be unmarshaled (e.g. because -// required messages are not linked in). -// -// It returns (true, error) when sv was written in expanded format or an error -// was encountered. -func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { - turl := sv.FieldByName("TypeUrl") - val := sv.FieldByName("Value") - if !turl.IsValid() || !val.IsValid() { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - b, ok := val.Interface().([]byte) - if !ok { - return true, errors.New("proto: invalid google.protobuf.Any message") - } - - parts := strings.Split(turl.String(), "/") - mt := MessageType(parts[len(parts)-1]) - if mt == nil { - return false, nil - } - m := reflect.New(mt.Elem()) - if err := Unmarshal(b, m.Interface().(Message)); err != nil { - return false, nil - } - w.Write([]byte("[")) - u := turl.String() - if requiresQuotes(u) { - writeString(w, u) - } else { - w.Write([]byte(u)) - } - if w.compact { - w.Write([]byte("]:<")) - } else { - w.Write([]byte("]: <\n")) - w.ind++ - } - if err := tm.writeStruct(w, m.Elem()); err != nil { - return true, err - } - if w.compact { - w.Write([]byte("> ")) - } else { - w.ind-- - w.Write([]byte(">\n")) - } - return true, nil -} - -func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { - if tm.ExpandAny && isAny(sv) { - if canExpand, err := tm.writeProto3Any(w, sv); canExpand { - return err - } - } - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < sv.NumField(); i++ { - fv := sv.Field(i) - props := sprops.Prop[i] - name := st.Field(i).Name - - if name == "XXX_NoUnkeyedLiteral" { - continue - } - - if strings.HasPrefix(name, "XXX_") { - // There are two XXX_ fields: - // XXX_unrecognized []byte - // XXX_extensions map[int32]proto.Extension - // The first is handled here; - // the second is handled at the bottom of this function. - if name == "XXX_unrecognized" && !fv.IsNil() { - if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Field not filled in. This could be an optional field or - // a required field that wasn't filled in. Either way, there - // isn't anything we can show for it. - continue - } - if fv.Kind() == reflect.Slice && fv.IsNil() { - // Repeated field that is empty, or a bytes field that is unused. - continue - } - - if props.Repeated && fv.Kind() == reflect.Slice { - // Repeated field. - for j := 0; j < fv.Len(); j++ { - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - v := fv.Index(j) - if v.Kind() == reflect.Ptr && v.IsNil() { - // A nil message in a repeated field is not valid, - // but we can handle that more gracefully than panicking. - if _, err := w.Write([]byte("\n")); err != nil { - return err - } - continue - } - if err := tm.writeAny(w, v, props); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if fv.Kind() == reflect.Map { - // Map fields are rendered as a repeated struct with key/value fields. - keys := fv.MapKeys() - sort.Sort(mapKeys(keys)) - for _, key := range keys { - val := fv.MapIndex(key) - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - // open struct - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - // key - if _, err := w.WriteString("key:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - // nil values aren't legal, but we can avoid panicking because of them. - if val.Kind() != reflect.Ptr || !val.IsNil() { - // value - if _, err := w.WriteString("value:"); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, val, props.MapValProp); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - // close struct - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - } - continue - } - if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { - // empty bytes field - continue - } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { - // proto3 non-repeated scalar field; skip if zero value - if isProto3Zero(fv) { - continue - } - } - - if fv.Kind() == reflect.Interface { - // Check if it is a oneof. - if st.Field(i).Tag.Get("protobuf_oneof") != "" { - // fv is nil, or holds a pointer to generated struct. - // That generated struct has exactly one field, - // which has a protobuf struct tag. - if fv.IsNil() { - continue - } - inner := fv.Elem().Elem() // interface -> *T -> T - tag := inner.Type().Field(0).Tag.Get("protobuf") - props = new(Properties) // Overwrite the outer props var, but not its pointee. - props.Parse(tag) - // Write the value in the oneof, not the oneof itself. - fv = inner.Field(0) - - // Special case to cope with malformed messages gracefully: - // If the value in the oneof is a nil pointer, don't panic - // in writeAny. - if fv.Kind() == reflect.Ptr && fv.IsNil() { - // Use errors.New so writeAny won't render quotes. - msg := errors.New("/* nil */") - fv = reflect.ValueOf(&msg).Elem() - } - } - } - - if err := writeName(w, props); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { - return err - } - - if err := w.WriteByte('\n'); err != nil { - return err - } - } - - // Extensions (the XXX_extensions field). - pv := sv.Addr() - if _, err := extendable(pv.Interface()); err == nil { - if err := tm.writeExtensions(w, pv); err != nil { - return err - } - } - - return nil -} - -// writeAny writes an arbitrary field. -func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { - v = reflect.Indirect(v) - - // Floats have special cases. - if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { - x := v.Float() - var b []byte - switch { - case math.IsInf(x, 1): - b = posInf - case math.IsInf(x, -1): - b = negInf - case math.IsNaN(x): - b = nan - } - if b != nil { - _, err := w.Write(b) - return err - } - // Other values are handled below. - } - - // We don't attempt to serialise every possible value type; only those - // that can occur in protocol buffers. - switch v.Kind() { - case reflect.Slice: - // Should only be a []byte; repeated fields are handled in writeStruct. - if err := writeString(w, string(v.Bytes())); err != nil { - return err - } - case reflect.String: - if err := writeString(w, v.String()); err != nil { - return err - } - case reflect.Struct: - // Required/optional group/message. - var bra, ket byte = '<', '>' - if props != nil && props.Wire == "group" { - bra, ket = '{', '}' - } - if err := w.WriteByte(bra); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if v.CanAddr() { - // Calling v.Interface on a struct causes the reflect package to - // copy the entire struct. This is racy with the new Marshaler - // since we atomically update the XXX_sizecache. - // - // Thus, we retrieve a pointer to the struct if possible to avoid - // a race since v.Interface on the pointer doesn't copy the struct. - // - // If v is not addressable, then we are not worried about a race - // since it implies that the binary Marshaler cannot possibly be - // mutating this value. - v = v.Addr() - } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = w.Write(text); err != nil { - return err - } - } else { - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - if err := tm.writeStruct(w, v); err != nil { - return err - } - } - w.unindent() - if err := w.WriteByte(ket); err != nil { - return err - } - default: - _, err := fmt.Fprint(w, v.Interface()) - return err - } - return nil -} - -// equivalent to C's isprint. -func isprint(c byte) bool { - return c >= 0x20 && c < 0x7f -} - -// writeString writes a string in the protocol buffer text format. -// It is similar to strconv.Quote except we don't use Go escape sequences, -// we treat the string as a byte sequence, and we use octal escapes. -// These differences are to maintain interoperability with the other -// languages' implementations of the text format. -func writeString(w *textWriter, s string) error { - // use WriteByte here to get any needed indent - if err := w.WriteByte('"'); err != nil { - return err - } - // Loop over the bytes, not the runes. - for i := 0; i < len(s); i++ { - var err error - // Divergence from C++: we don't escape apostrophes. - // There's no need to escape them, and the C++ parser - // copes with a naked apostrophe. - switch c := s[i]; c { - case '\n': - _, err = w.w.Write(backslashN) - case '\r': - _, err = w.w.Write(backslashR) - case '\t': - _, err = w.w.Write(backslashT) - case '"': - _, err = w.w.Write(backslashDQ) - case '\\': - _, err = w.w.Write(backslashBS) - default: - if isprint(c) { - err = w.w.WriteByte(c) - } else { - _, err = fmt.Fprintf(w.w, "\\%03o", c) - } - } - if err != nil { - return err - } - } - return w.WriteByte('"') -} - -func writeUnknownStruct(w *textWriter, data []byte) (err error) { - if !w.compact { - if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { - return err - } - } - b := NewBuffer(data) - for b.index < len(b.buf) { - x, err := b.DecodeVarint() - if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err - } - wire, tag := x&7, x>>3 - if wire == WireEndGroup { - w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err - } - continue - } - if _, err := fmt.Fprint(w, tag); err != nil { - return err - } - if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { - return err - } - } - if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { - return err - } - } - switch wire { - case WireBytes: - buf, e := b.DecodeRawBytes(false) - if e == nil { - _, err = fmt.Fprintf(w, "%q", buf) - } else { - _, err = fmt.Fprintf(w, "/* %v */", e) - } - case WireFixed32: - x, err = b.DecodeFixed32() - err = writeUnknownInt(w, x, err) - case WireFixed64: - x, err = b.DecodeFixed64() - err = writeUnknownInt(w, x, err) - case WireStartGroup: - err = w.WriteByte('{') - w.indent() - case WireVarint: - x, err = b.DecodeVarint() - err = writeUnknownInt(w, x, err) - default: - _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) - } - if err != nil { - return err - } - if err = w.WriteByte('\n'); err != nil { - return err - } - } - return nil -} - -func writeUnknownInt(w *textWriter, x uint64, err error) error { - if err == nil { - _, err = fmt.Fprint(w, x) - } else { - _, err = fmt.Fprintf(w, "/* %v */", err) - } - return err -} - -type int32Slice []int32 - -func (s int32Slice) Len() int { return len(s) } -func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } -func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// writeExtensions writes all the extensions in pv. -// pv is assumed to be a pointer to a protocol message struct that is extendable. -func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { - emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) - - // Order the extensions by ID. - // This isn't strictly necessary, but it will give us - // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } - mu.Lock() - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) - mu.Unlock() - - for _, extNum := range ids { - ext := m[extNum] - var desc *ExtensionDesc - if emap != nil { - desc = emap[extNum] - } - if desc == nil { - // Unknown extension. - if err := writeUnknownStruct(w, ext.enc); err != nil { - return err - } - continue - } - - pb, err := GetExtension(ep, desc) - if err != nil { - return fmt.Errorf("failed getting extension: %v", err) - } - - // Repeated extensions will appear as a slice. - if !desc.repeated() { - if err := tm.writeExtension(w, desc.Name, pb); err != nil { - return err - } - } else { - v := reflect.ValueOf(pb) - for i := 0; i < v.Len(); i++ { - if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { - return err - } - } - } - } - return nil -} - -func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { - if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte(' '); err != nil { - return err - } - } - if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { - return err - } - if err := w.WriteByte('\n'); err != nil { - return err - } - return nil -} - -func (w *textWriter) writeIndent() { - if !w.complete { - return - } - remain := w.ind * 2 - for remain > 0 { - n := remain - if n > len(spaces) { - n = len(spaces) - } - w.w.Write(spaces[:n]) - remain -= n - } - w.complete = false -} - -// TextMarshaler is a configurable text format marshaler. -type TextMarshaler struct { - Compact bool // use compact text format (one line). - ExpandAny bool // expand google.protobuf.Any messages of known types -} - -// Marshal writes a given protocol buffer in text format. -// The only errors returned are from w. -func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { - val := reflect.ValueOf(pb) - if pb == nil || val.IsNil() { - w.Write([]byte("")) - return nil - } - var bw *bufio.Writer - ww, ok := w.(writer) - if !ok { - bw = bufio.NewWriter(w) - ww = bw - } - aw := &textWriter{ - w: ww, - complete: true, - compact: tm.Compact, - } - - if etm, ok := pb.(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() - if err != nil { - return err - } - if _, err = aw.Write(text); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil - } - // Dereference the received pointer so we don't have outer < and >. - v := reflect.Indirect(val) - if err := tm.writeStruct(aw, v); err != nil { - return err - } - if bw != nil { - return bw.Flush() - } - return nil -} - -// Text is the same as Marshal, but returns the string directly. -func (tm *TextMarshaler) Text(pb Message) string { - var buf bytes.Buffer - tm.Marshal(&buf, pb) - return buf.String() -} - -var ( - defaultTextMarshaler = TextMarshaler{} - compactTextMarshaler = TextMarshaler{Compact: true} -) - -// TODO: consider removing some of the Marshal functions below. - -// MarshalText writes a given protocol buffer in text format. -// The only errors returned are from w. -func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } - -// MarshalTextString is the same as MarshalText, but returns the string directly. -func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } - -// CompactText writes a given protocol buffer in compact text format (one line). -func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } - -// CompactTextString is the same as CompactText, but returns the string directly. -func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go deleted file mode 100644 index bb55a3a..0000000 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ /dev/null @@ -1,880 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package proto - -// Functions for parsing the Text protocol buffer format. -// TODO: message sets. - -import ( - "encoding" - "errors" - "fmt" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -// Error string emitted when deserializing Any and fields are already set -const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" - -type ParseError struct { - Message string - Line int // 1-based line number - Offset int // 0-based byte offset from start of input -} - -func (p *ParseError) Error() string { - if p.Line == 1 { - // show offset only for first line - return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) - } - return fmt.Sprintf("line %d: %v", p.Line, p.Message) -} - -type token struct { - value string - err *ParseError - line int // line number - offset int // byte number from start of input, not start of line - unquoted string // the unquoted version of value, if it was a quoted string -} - -func (t *token) String() string { - if t.err == nil { - return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) - } - return fmt.Sprintf("parse error: %v", t.err) -} - -type textParser struct { - s string // remaining input - done bool // whether the parsing is finished (success or error) - backed bool // whether back() was called - offset, line int - cur token -} - -func newTextParser(s string) *textParser { - p := new(textParser) - p.s = s - p.line = 1 - p.cur.line = 1 - return p -} - -func (p *textParser) errorf(format string, a ...interface{}) *ParseError { - pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} - p.cur.err = pe - p.done = true - return pe -} - -// Numbers and identifiers are matched by [-+._A-Za-z0-9] -func isIdentOrNumberChar(c byte) bool { - switch { - case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': - return true - case '0' <= c && c <= '9': - return true - } - switch c { - case '-', '+', '.', '_': - return true - } - return false -} - -func isWhitespace(c byte) bool { - switch c { - case ' ', '\t', '\n', '\r': - return true - } - return false -} - -func isQuote(c byte) bool { - switch c { - case '"', '\'': - return true - } - return false -} - -func (p *textParser) skipWhitespace() { - i := 0 - for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { - if p.s[i] == '#' { - // comment; skip to end of line or input - for i < len(p.s) && p.s[i] != '\n' { - i++ - } - if i == len(p.s) { - break - } - } - if p.s[i] == '\n' { - p.line++ - } - i++ - } - p.offset += i - p.s = p.s[i:len(p.s)] - if len(p.s) == 0 { - p.done = true - } -} - -func (p *textParser) advance() { - // Skip whitespace - p.skipWhitespace() - if p.done { - return - } - - // Start of non-whitespace - p.cur.err = nil - p.cur.offset, p.cur.line = p.offset, p.line - p.cur.unquoted = "" - switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': - // Single symbol - p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] - case '"', '\'': - // Quoted string - i := 1 - for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { - if p.s[i] == '\\' && i+1 < len(p.s) { - // skip escaped char - i++ - } - i++ - } - if i >= len(p.s) || p.s[i] != p.s[0] { - p.errorf("unmatched quote") - return - } - unq, err := unquoteC(p.s[1:i], rune(p.s[0])) - if err != nil { - p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) - return - } - p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] - p.cur.unquoted = unq - default: - i := 0 - for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { - i++ - } - if i == 0 { - p.errorf("unexpected byte %#x", p.s[0]) - return - } - p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] - } - p.offset += len(p.cur.value) -} - -var ( - errBadUTF8 = errors.New("proto: bad UTF-8") -) - -func unquoteC(s string, quote rune) (string, error) { - // This is based on C++'s tokenizer.cc. - // Despite its name, this is *not* parsing C syntax. - // For instance, "\0" is an invalid quoted string. - - // Avoid allocation in trivial cases. - simple := true - for _, r := range s { - if r == '\\' || r == quote { - simple = false - break - } - } - if simple { - return s, nil - } - - buf := make([]byte, 0, 3*len(s)/2) - for len(s) > 0 { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", errBadUTF8 - } - s = s[n:] - if r != '\\' { - if r < utf8.RuneSelf { - buf = append(buf, byte(r)) - } else { - buf = append(buf, string(r)...) - } - continue - } - - ch, tail, err := unescape(s) - if err != nil { - return "", err - } - buf = append(buf, ch...) - s = tail - } - return string(buf), nil -} - -func unescape(s string) (ch string, tail string, err error) { - r, n := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && n == 1 { - return "", "", errBadUTF8 - } - s = s[n:] - switch r { - case 'a': - return "\a", s, nil - case 'b': - return "\b", s, nil - case 'f': - return "\f", s, nil - case 'n': - return "\n", s, nil - case 'r': - return "\r", s, nil - case 't': - return "\t", s, nil - case 'v': - return "\v", s, nil - case '?': - return "?", s, nil // trigraph workaround - case '\'', '"', '\\': - return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7': - if len(s) < 2 { - return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) - } - ss := string(r) + s[:2] - s = s[2:] - i, err := strconv.ParseUint(ss, 8, 8) - if err != nil { - return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) - } - return string([]byte{byte(i)}), s, nil - case 'x', 'X', 'u', 'U': - var n int - switch r { - case 'x', 'X': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) - } - ss := s[:n] - s = s[n:] - i, err := strconv.ParseUint(ss, 16, 64) - if err != nil { - return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) - } - if r == 'x' || r == 'X' { - return string([]byte{byte(i)}), s, nil - } - if i > utf8.MaxRune { - return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) - } - return string(i), s, nil - } - return "", "", fmt.Errorf(`unknown escape \%c`, r) -} - -// Back off the parser by one token. Can only be done between calls to next(). -// It makes the next advance() a no-op. -func (p *textParser) back() { p.backed = true } - -// Advances the parser and returns the new current token. -func (p *textParser) next() *token { - if p.backed || p.done { - p.backed = false - return &p.cur - } - p.advance() - if p.done { - p.cur.value = "" - } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { - // Look for multiple quoted strings separated by whitespace, - // and concatenate them. - cat := p.cur - for { - p.skipWhitespace() - if p.done || !isQuote(p.s[0]) { - break - } - p.advance() - if p.cur.err != nil { - return &p.cur - } - cat.value += " " + p.cur.value - cat.unquoted += p.cur.unquoted - } - p.done = false // parser may have seen EOF, but we want to return cat - p.cur = cat - } - return &p.cur -} - -func (p *textParser) consumeToken(s string) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != s { - p.back() - return p.errorf("expected %q, found %q", s, tok.value) - } - return nil -} - -// Return a RequiredNotSetError indicating which required field was not set. -func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { - st := sv.Type() - sprops := GetProperties(st) - for i := 0; i < st.NumField(); i++ { - if !isNil(sv.Field(i)) { - continue - } - - props := sprops.Prop[i] - if props.Required { - return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} - } - } - return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen -} - -// Returns the index in the struct for the named field, as well as the parsed tag properties. -func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { - i, ok := sprops.decoderOrigNames[name] - if ok { - return i, sprops.Prop[i], true - } - return -1, nil, false -} - -// Consume a ':' from the input stream (if the next token is a colon), -// returning an error if a colon is needed but not present. -func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ":" { - // Colon is optional when the field is a group or message. - needColon := true - switch props.Wire { - case "group": - needColon = false - case "bytes": - // A "bytes" field is either a message, a string, or a repeated field; - // those three become *T, *string and []T respectively, so we can check for - // this field being a pointer to a non-string. - if typ.Kind() == reflect.Ptr { - // *T or *string - if typ.Elem().Kind() == reflect.String { - break - } - } else if typ.Kind() == reflect.Slice { - // []T or []*T - if typ.Elem().Kind() != reflect.Ptr { - break - } - } else if typ.Kind() == reflect.String { - // The proto3 exception is for a string field, - // which requires a colon. - break - } - needColon = false - } - if needColon { - return p.errorf("expected ':', found %q", tok.value) - } - p.back() - } - return nil -} - -func (p *textParser) readStruct(sv reflect.Value, terminator string) error { - st := sv.Type() - sprops := GetProperties(st) - reqCount := sprops.reqCount - var reqFieldErr error - fieldSet := make(map[string]bool) - // A struct is a sequence of "name: value", terminated by one of - // '>' or '}', or the end of the input. A name may also be - // "[extension]" or "[type/url]". - // - // The whole struct can also be an expanded Any message, like: - // [type/url] < ... struct contents ... > - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - if tok.value == "[" { - // Looks like an extension or an Any. - // - // TODO: Check whether we need to handle - // namespace rooted names (e.g. ".something.Foo"). - extName, err := p.consumeExtName() - if err != nil { - return err - } - - if s := strings.LastIndex(extName, "/"); s >= 0 { - // If it contains a slash, it's an Any type URL. - messageName := extName[s+1:] - mt := MessageType(messageName) - if mt == nil { - return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) - } - tok = p.next() - if tok.err != nil { - return tok.err - } - // consume an optional colon - if tok.value == ":" { - tok = p.next() - if tok.err != nil { - return tok.err - } - } - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - v := reflect.New(mt.Elem()) - if pe := p.readStruct(v.Elem(), terminator); pe != nil { - return pe - } - b, err := Marshal(v.Interface().(Message)) - if err != nil { - return p.errorf("failed to marshal message of type %q: %v", messageName, err) - } - if fieldSet["type_url"] { - return p.errorf(anyRepeatedlyUnpacked, "type_url") - } - if fieldSet["value"] { - return p.errorf(anyRepeatedlyUnpacked, "value") - } - sv.FieldByName("TypeUrl").SetString(extName) - sv.FieldByName("Value").SetBytes(b) - fieldSet["type_url"] = true - fieldSet["value"] = true - continue - } - - var desc *ExtensionDesc - // This could be faster, but it's functional. - // TODO: Do something smarter than a linear scan. - for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == extName { - desc = d - break - } - } - if desc == nil { - return p.errorf("unrecognized extension %q", extName) - } - - props := &Properties{} - props.Parse(desc.Tag) - - typ := reflect.TypeOf(desc.ExtensionType) - if err := p.checkForColon(props, typ); err != nil { - return err - } - - rep := desc.repeated() - - // Read the extension structure, and set it in - // the value we're constructing. - var ext reflect.Value - if !rep { - ext = reflect.New(typ).Elem() - } else { - ext = reflect.New(typ.Elem()).Elem() - } - if err := p.readAny(ext, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - ep := sv.Addr().Interface().(Message) - if !rep { - SetExtension(ep, desc, ext.Interface()) - } else { - old, err := GetExtension(ep, desc) - var sl reflect.Value - if err == nil { - sl = reflect.ValueOf(old) // existing slice - } else { - sl = reflect.MakeSlice(typ, 0, 1) - } - sl = reflect.Append(sl, ext) - SetExtension(ep, desc, sl.Interface()) - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - continue - } - - // This is a normal, non-extension field. - name := tok.value - var dst reflect.Value - fi, props, ok := structFieldByName(sprops, name) - if ok { - dst = sv.Field(fi) - } else if oop, ok := sprops.OneofTypes[name]; ok { - // It is a oneof. - props = oop.Prop - nv := reflect.New(oop.Type.Elem()) - dst = nv.Elem().Field(0) - field := sv.Field(oop.Field) - if !field.IsNil() { - return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) - } - field.Set(nv) - } - if !dst.IsValid() { - return p.errorf("unknown field name %q in %v", name, st) - } - - if dst.Kind() == reflect.Map { - // Consume any colon. - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Construct the map if it doesn't already exist. - if dst.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) - } - key := reflect.New(dst.Type().Key()).Elem() - val := reflect.New(dst.Type().Elem()).Elem() - - // The map entry should be this sequence of tokens: - // < key : KEY value : VALUE > - // However, implementations may omit key or value, and technically - // we should support them in any order. See b/28924776 for a time - // this went wrong. - - tok := p.next() - var terminator string - switch tok.value { - case "<": - terminator = ">" - case "{": - terminator = "}" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - for { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == terminator { - break - } - switch tok.value { - case "key": - if err := p.consumeToken(":"); err != nil { - return err - } - if err := p.readAny(key, props.MapKeyProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - case "value": - if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { - return err - } - if err := p.readAny(val, props.MapValProp); err != nil { - return err - } - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - default: - p.back() - return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) - } - } - - dst.SetMapIndex(key, val) - continue - } - - // Check that it's not already set if it's not a repeated field. - if !props.Repeated && fieldSet[name] { - return p.errorf("non-repeated field %q was repeated", name) - } - - if err := p.checkForColon(props, dst.Type()); err != nil { - return err - } - - // Parse into the field. - fieldSet[name] = true - if err := p.readAny(dst, props); err != nil { - if _, ok := err.(*RequiredNotSetError); !ok { - return err - } - reqFieldErr = err - } - if props.Required { - reqCount-- - } - - if err := p.consumeOptionalSeparator(); err != nil { - return err - } - - } - - if reqCount > 0 { - return p.missingRequiredFieldError(sv) - } - return reqFieldErr -} - -// consumeExtName consumes extension name or expanded Any type URL and the -// following ']'. It returns the name or URL consumed. -func (p *textParser) consumeExtName() (string, error) { - tok := p.next() - if tok.err != nil { - return "", tok.err - } - - // If extension name or type url is quoted, it's a single token. - if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { - name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) - if err != nil { - return "", err - } - return name, p.consumeToken("]") - } - - // Consume everything up to "]" - var parts []string - for tok.value != "]" { - parts = append(parts, tok.value) - tok = p.next() - if tok.err != nil { - return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) - } - if p.done && tok.value != "]" { - return "", p.errorf("unclosed type_url or extension name") - } - } - return strings.Join(parts, ""), nil -} - -// consumeOptionalSeparator consumes an optional semicolon or comma. -// It is used in readStruct to provide backward compatibility. -func (p *textParser) consumeOptionalSeparator() error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value != ";" && tok.value != "," { - p.back() - } - return nil -} - -func (p *textParser) readAny(v reflect.Value, props *Properties) error { - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "" { - return p.errorf("unexpected EOF") - } - - switch fv := v; fv.Kind() { - case reflect.Slice: - at := v.Type() - if at.Elem().Kind() == reflect.Uint8 { - // Special case for []byte - if tok.value[0] != '"' && tok.value[0] != '\'' { - // Deliberately written out here, as the error after - // this switch statement would write "invalid []byte: ...", - // which is not as user-friendly. - return p.errorf("invalid string: %v", tok.value) - } - bytes := []byte(tok.unquoted) - fv.Set(reflect.ValueOf(bytes)) - return nil - } - // Repeated field. - if tok.value == "[" { - // Repeated field with list notation, like [1,2,3]. - for { - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - err := p.readAny(fv.Index(fv.Len()-1), props) - if err != nil { - return err - } - tok := p.next() - if tok.err != nil { - return tok.err - } - if tok.value == "]" { - break - } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) - } - } - return nil - } - // One value of the repeated field. - p.back() - fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) - return p.readAny(fv.Index(fv.Len()-1), props) - case reflect.Bool: - // true/1/t/True or false/f/0/False. - switch tok.value { - case "true", "1", "t", "True": - fv.SetBool(true) - return nil - case "false", "0", "f", "False": - fv.SetBool(false) - return nil - } - case reflect.Float32, reflect.Float64: - v := tok.value - // Ignore 'f' for compatibility with output generated by C++, but don't - // remove 'f' when the value is "-inf" or "inf". - if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { - v = v[:len(v)-1] - } - if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { - fv.SetFloat(f) - return nil - } - case reflect.Int32: - if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { - fv.SetInt(x) - return nil - } - - if len(props.Enum) == 0 { - break - } - m, ok := enumValueMaps[props.Enum] - if !ok { - break - } - x, ok := m[tok.value] - if !ok { - break - } - fv.SetInt(int64(x)) - return nil - case reflect.Int64: - if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { - fv.SetInt(x) - return nil - } - - case reflect.Ptr: - // A basic field (indirected through pointer), or a repeated message/group - p.back() - fv.Set(reflect.New(fv.Type().Elem())) - return p.readAny(fv.Elem(), props) - case reflect.String: - if tok.value[0] == '"' || tok.value[0] == '\'' { - fv.SetString(tok.unquoted) - return nil - } - case reflect.Struct: - var terminator string - switch tok.value { - case "{": - terminator = "}" - case "<": - terminator = ">" - default: - return p.errorf("expected '{' or '<', found %q", tok.value) - } - // TODO: Handle nested messages which implement encoding.TextUnmarshaler. - return p.readStruct(fv, terminator) - case reflect.Uint32: - if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) - return nil - } - case reflect.Uint64: - if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { - fv.SetUint(x) - return nil - } - } - return p.errorf("invalid %v: %v", v.Type(), tok.value) -} - -// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb -// before starting to unmarshal, so any existing data in pb is always removed. -// If a required field is not set and no other error occurs, -// UnmarshalText returns *RequiredNotSetError. -func UnmarshalText(s string, pb Message) error { - if um, ok := pb.(encoding.TextUnmarshaler); ok { - return um.UnmarshalText([]byte(s)) - } - pb.Reset() - v := reflect.ValueOf(pb) - return newTextParser(s).readStruct(v.Elem(), "") -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index 70276e8..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,141 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements functions to marshal proto.Message to/from -// google.protobuf.Any message. - -import ( - "fmt" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" -) - -const googleApis = "type.googleapis.com/" - -// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. -// -// Note that regular type assertions should be done using the Is -// function. AnyMessageName is provided for less common use cases like filtering a -// sequence of Any messages based on a set of allowed message type names. -func AnyMessageName(any *any.Any) (string, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - slash := strings.LastIndex(any.TypeUrl, "/") - if slash < 0 { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return any.TypeUrl[slash+1:], nil -} - -// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. -func MarshalAny(pb proto.Message) (*any.Any, error) { - value, err := proto.Marshal(pb) - if err != nil { - return nil, err - } - return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in a google.protobuf.Any -// message. The allocated message is stored in the embedded proto.Message. -// -// Example: -// -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct { - proto.Message -} - -// Empty returns a new proto.Message of the type specified in a -// google.protobuf.Any message. It returns an error if corresponding message -// type isn't linked in. -func Empty(any *any.Any) (proto.Message, error) { - aname, err := AnyMessageName(any) - if err != nil { - return nil, err - } - - t := proto.MessageType(aname) - if t == nil { - return nil, fmt.Errorf("any: message type %q isn't linked in", aname) - } - return reflect.New(t.Elem()).Interface().(proto.Message), nil -} - -// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any -// message and places the decoded result in pb. It returns an error if type of -// contents of Any message does not match type of pb message. -// -// pb can be a proto.Message, or a *DynamicAny. -func UnmarshalAny(any *any.Any, pb proto.Message) error { - if d, ok := pb.(*DynamicAny); ok { - if d.Message == nil { - var err error - d.Message, err = Empty(any) - if err != nil { - return err - } - } - return UnmarshalAny(any, d.Message) - } - - aname, err := AnyMessageName(any) - if err != nil { - return err - } - - mname := proto.MessageName(pb) - if aname != mname { - return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) - } - return proto.Unmarshal(any.Value, pb) -} - -// Is returns true if any value contains a given message type. -func Is(any *any.Any, pb proto.Message) bool { - // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), - // but it avoids scanning TypeUrl for the slash. - if any == nil { - return false - } - name := proto.MessageName(pb) - prefix := len(any.TypeUrl) - len(name) - return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 78ee523..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,200 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -package any - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { - return fileDescriptor_b53526c13ae22eb4, []int{0} -} - -func (*Any) XXX_WellKnownType() string { return "Any" } - -func (m *Any) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Any.Unmarshal(m, b) -} -func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Any.Marshal(b, m, deterministic) -} -func (m *Any) XXX_Merge(src proto.Message) { - xxx_messageInfo_Any.Merge(m, src) -} -func (m *Any) XXX_Size() int { - return xxx_messageInfo_Any.Size(m) -} -func (m *Any) XXX_DiscardUnknown() { - xxx_messageInfo_Any.DiscardUnknown(m) -} - -var xxx_messageInfo_Any proto.InternalMessageInfo - -func (m *Any) GetTypeUrl() string { - if m != nil { - return m.TypeUrl - } - return "" -} - -func (m *Any) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*Any)(nil), "google.protobuf.Any") -} - -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } - -var fileDescriptor_b53526c13ae22eb4 = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, - 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, - 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, - 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, - 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, - 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, - 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, - 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, - 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, - 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, - 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto deleted file mode 100644 index 4932942..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ /dev/null @@ -1,154 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "github.com/golang/protobuf/ptypes/any"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index c0d595d..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,35 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package ptypes contains code for interacting with well-known types. -*/ -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 26d1ca2..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,102 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements conversions between google.protobuf.Duration -// and time.Duration. - -import ( - "errors" - "fmt" - "time" - - durpb "github.com/golang/protobuf/ptypes/duration" -) - -const ( - // Range of a durpb.Duration in seconds, as specified in - // google/protobuf/duration.proto. This is about 10,000 years in seconds. - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// validateDuration determines whether the durpb.Duration is valid according to the -// definition in google/protobuf/duration.proto. A valid durpb.Duration -// may still be too large to fit into a time.Duration (the range of durpb.Duration -// is about 10,000 years, and the range of time.Duration is about 290). -func validateDuration(d *durpb.Duration) error { - if d == nil { - return errors.New("duration: nil Duration") - } - if d.Seconds < minSeconds || d.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", d) - } - if d.Nanos <= -1e9 || d.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", d) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", d) - } - return nil -} - -// Duration converts a durpb.Duration to a time.Duration. Duration -// returns an error if the durpb.Duration is invalid or is too large to be -// represented in a time.Duration. -func Duration(p *durpb.Duration) (time.Duration, error) { - if err := validateDuration(p); err != nil { - return 0, err - } - d := time.Duration(p.Seconds) * time.Second - if int64(d/time.Second) != p.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - if p.Nanos != 0 { - d += time.Duration(p.Nanos) * time.Nanosecond - if (d < 0) != (p.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durpb.Duration. -func DurationProto(d time.Duration) *durpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durpb.Duration{ - Seconds: secs, - Nanos: int32(nanos), - } -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index 0d681ee..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -package duration - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_23597b2ebd7ac6c5, []int{0} -} - -func (*Duration) XXX_WellKnownType() string { return "Duration" } - -func (m *Duration) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Duration.Unmarshal(m, b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Duration.Marshal(b, m, deterministic) -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return xxx_messageInfo_Duration.Size(m) -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) -} - -var xxx_messageInfo_Duration proto.InternalMessageInfo - -func (m *Duration) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Duration) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") -} - -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } - -var fileDescriptor_23597b2ebd7ac6c5 = []byte{ - // 190 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, - 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, - 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, - 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, - 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, - 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, - 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, - 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, - 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, - 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto deleted file mode 100644 index 975fce4..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ /dev/null @@ -1,117 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/duration"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 8da0df0..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,132 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package ptypes - -// This file implements operations on google.protobuf.Timestamp. - -import ( - "errors" - "fmt" - "time" - - tspb "github.com/golang/protobuf/ptypes/timestamp" -) - -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range -// [0001-01-01, 10000-01-01) and has a Nanos field -// in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes -// the problem. -// -// Every valid Timestamp can be represented by a time.Time, but the converse is not true. -func validateTimestamp(ts *tspb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} - -// Timestamp converts a google.protobuf.Timestamp proto to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return value -// is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *tspb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *tspb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*tspb.Timestamp, error) { - ts := &tspb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid -// Timestamps, it returns an error message in parentheses. -func TimestampString(ts *tspb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index 31cd846..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,179 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -package timestamp - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_292007bbfe81227e, []int{0} -} - -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } - -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Timestamp.Unmarshal(m, b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return xxx_messageInfo_Timestamp.Size(m) -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *Timestamp) GetSeconds() int64 { - if m != nil { - return m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil { - return m.Nanos - } - return 0 -} - -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") -} - -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } - -var fileDescriptor_292007bbfe81227e = []byte{ - // 191 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, - 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, - 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, - 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, - 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, - 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, - 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, - 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, - 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, - 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto deleted file mode 100644 index eafb3fa..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ /dev/null @@ -1,135 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "github.com/golang/protobuf/ptypes/timestamp"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone -// or calendar, represented as seconds and fractions of seconds at -// nanosecond resolution in UTC Epoch time. It is encoded using the -// Proleptic Gregorian Calendar which extends the Gregorian calendar -// backwards to year one. It is encoded assuming all minutes are 60 -// seconds long, i.e. leap seconds are "smeared" so that no leap second -// table is needed for interpretation. Range is from -// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. -// By restricting to that range, we ensure that we can convert to -// and from RFC 3339 date strings. -// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) -// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one -// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore deleted file mode 100644 index 042091d..0000000 --- a/vendor/github.com/golang/snappy/.gitignore +++ /dev/null @@ -1,16 +0,0 @@ -cmd/snappytool/snappytool -testdata/bench - -# These explicitly listed benchmark data files are for an obsolete version of -# snappy_test.go. -testdata/alice29.txt -testdata/asyoulik.txt -testdata/fireworks.jpeg -testdata/geo.protodata -testdata/html -testdata/html_x_4 -testdata/kppkn.gtb -testdata/lcet10.txt -testdata/paper-100k.pdf -testdata/plrabn12.txt -testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS deleted file mode 100644 index bcfa195..0000000 --- a/vendor/github.com/golang/snappy/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of Snappy-Go authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as -# Name or Organization -# The email address is not required for organizations. - -# Please keep the list sorted. - -Damian Gryski -Google Inc. -Jan Mercl <0xjnml@gmail.com> -Rodolfo Carvalho -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS deleted file mode 100644 index 931ae31..0000000 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ /dev/null @@ -1,37 +0,0 @@ -# This is the official list of people who can contribute -# (and typically have contributed) code to the Snappy-Go repository. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# The submission process automatically checks to make sure -# that people submitting code are listed in this file (by email address). -# -# Names should be added to this file only after verifying that -# the individual or the individual's organization has agreed to -# the appropriate Contributor License Agreement, found here: -# -# http://code.google.com/legal/individual-cla-v1.0.html -# http://code.google.com/legal/corporate-cla-v1.0.html -# -# The agreement for individuals can be filled out on the web. -# -# When adding J Random Contributor's name to this file, -# either J's name or J's organization's name should be -# added to the AUTHORS file, depending on whether the -# individual or corporate CLA was used. - -# Names should be added to this file like so: -# Name - -# Please keep the list sorted. - -Damian Gryski -Jan Mercl <0xjnml@gmail.com> -Kai Backman -Marc-Antoine Ruel -Nigel Tao -Rob Pike -Rodolfo Carvalho -Russ Cox -Sebastien Binet diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE deleted file mode 100644 index 6050c10..0000000 --- a/vendor/github.com/golang/snappy/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README deleted file mode 100644 index cea1287..0000000 --- a/vendor/github.com/golang/snappy/README +++ /dev/null @@ -1,107 +0,0 @@ -The Snappy compression format in the Go programming language. - -To download and install from source: -$ go get github.com/golang/snappy - -Unless otherwise noted, the Snappy-Go source files are distributed -under the BSD-style license found in the LICENSE file. - - - -Benchmarks. - -The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten -or so files, the same set used by the C++ Snappy code (github.com/google/snappy -and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ -3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: - -"go test -test.bench=." - -_UFlat0-8 2.19GB/s ± 0% html -_UFlat1-8 1.41GB/s ± 0% urls -_UFlat2-8 23.5GB/s ± 2% jpg -_UFlat3-8 1.91GB/s ± 0% jpg_200 -_UFlat4-8 14.0GB/s ± 1% pdf -_UFlat5-8 1.97GB/s ± 0% html4 -_UFlat6-8 814MB/s ± 0% txt1 -_UFlat7-8 785MB/s ± 0% txt2 -_UFlat8-8 857MB/s ± 0% txt3 -_UFlat9-8 719MB/s ± 1% txt4 -_UFlat10-8 2.84GB/s ± 0% pb -_UFlat11-8 1.05GB/s ± 0% gaviota - -_ZFlat0-8 1.04GB/s ± 0% html -_ZFlat1-8 534MB/s ± 0% urls -_ZFlat2-8 15.7GB/s ± 1% jpg -_ZFlat3-8 740MB/s ± 3% jpg_200 -_ZFlat4-8 9.20GB/s ± 1% pdf -_ZFlat5-8 991MB/s ± 0% html4 -_ZFlat6-8 379MB/s ± 0% txt1 -_ZFlat7-8 352MB/s ± 0% txt2 -_ZFlat8-8 396MB/s ± 1% txt3 -_ZFlat9-8 327MB/s ± 1% txt4 -_ZFlat10-8 1.33GB/s ± 1% pb -_ZFlat11-8 605MB/s ± 1% gaviota - - - -"go test -test.bench=. -tags=noasm" - -_UFlat0-8 621MB/s ± 2% html -_UFlat1-8 494MB/s ± 1% urls -_UFlat2-8 23.2GB/s ± 1% jpg -_UFlat3-8 1.12GB/s ± 1% jpg_200 -_UFlat4-8 4.35GB/s ± 1% pdf -_UFlat5-8 609MB/s ± 0% html4 -_UFlat6-8 296MB/s ± 0% txt1 -_UFlat7-8 288MB/s ± 0% txt2 -_UFlat8-8 309MB/s ± 1% txt3 -_UFlat9-8 280MB/s ± 1% txt4 -_UFlat10-8 753MB/s ± 0% pb -_UFlat11-8 400MB/s ± 0% gaviota - -_ZFlat0-8 409MB/s ± 1% html -_ZFlat1-8 250MB/s ± 1% urls -_ZFlat2-8 12.3GB/s ± 1% jpg -_ZFlat3-8 132MB/s ± 0% jpg_200 -_ZFlat4-8 2.92GB/s ± 0% pdf -_ZFlat5-8 405MB/s ± 1% html4 -_ZFlat6-8 179MB/s ± 1% txt1 -_ZFlat7-8 170MB/s ± 1% txt2 -_ZFlat8-8 189MB/s ± 1% txt3 -_ZFlat9-8 164MB/s ± 1% txt4 -_ZFlat10-8 479MB/s ± 1% pb -_ZFlat11-8 270MB/s ± 1% gaviota - - - -For comparison (Go's encoded output is byte-for-byte identical to C++'s), here -are the numbers from C++ Snappy's - -make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log - -BM_UFlat/0 2.4GB/s html -BM_UFlat/1 1.4GB/s urls -BM_UFlat/2 21.8GB/s jpg -BM_UFlat/3 1.5GB/s jpg_200 -BM_UFlat/4 13.3GB/s pdf -BM_UFlat/5 2.1GB/s html4 -BM_UFlat/6 1.0GB/s txt1 -BM_UFlat/7 959.4MB/s txt2 -BM_UFlat/8 1.0GB/s txt3 -BM_UFlat/9 864.5MB/s txt4 -BM_UFlat/10 2.9GB/s pb -BM_UFlat/11 1.2GB/s gaviota - -BM_ZFlat/0 944.3MB/s html (22.31 %) -BM_ZFlat/1 501.6MB/s urls (47.78 %) -BM_ZFlat/2 14.3GB/s jpg (99.95 %) -BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) -BM_ZFlat/4 8.3GB/s pdf (83.30 %) -BM_ZFlat/5 903.5MB/s html4 (22.52 %) -BM_ZFlat/6 336.0MB/s txt1 (57.88 %) -BM_ZFlat/7 312.3MB/s txt2 (61.91 %) -BM_ZFlat/8 353.1MB/s txt3 (54.99 %) -BM_ZFlat/9 289.9MB/s txt4 (66.26 %) -BM_ZFlat/10 1.2GB/s pb (19.68 %) -BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go deleted file mode 100644 index 72efb03..0000000 --- a/vendor/github.com/golang/snappy/decode.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - for { - if r.i < r.j { - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil - } - if !r.readFull(r.buf[:4], true) { - return 0, r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return 0, r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return 0, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return 0, r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return 0, r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return 0, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.decoded[:n], false) { - return 0, r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return 0, r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return 0, r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return 0, r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return 0, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return 0, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return 0, r.err - } - } -} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go deleted file mode 100644 index fcd192b..0000000 --- a/vendor/github.com/golang/snappy/decode_amd64.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// decode has the same semantics as in decode_other.go. -// -//go:noescape -func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s deleted file mode 100644 index e6179f6..0000000 --- a/vendor/github.com/golang/snappy/decode_amd64.s +++ /dev/null @@ -1,490 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The asm code generally follows the pure Go code in decode_other.go, except -// where marked with a "!!!". - -// func decode(dst, src []byte) int -// -// All local variables fit into registers. The non-zero stack size is only to -// spill registers and push args when issuing a CALL. The register allocation: -// - AX scratch -// - BX scratch -// - CX length or x -// - DX offset -// - SI &src[s] -// - DI &dst[d] -// + R8 dst_base -// + R9 dst_len -// + R10 dst_base + dst_len -// + R11 src_base -// + R12 src_len -// + R13 src_base + src_len -// - R14 used by doCopy -// - R15 used by doCopy -// -// The registers R8-R13 (marked with a "+") are set at the start of the -// function, and after a CALL returns, and are not otherwise modified. -// -// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. -// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. -TEXT ·decode(SB), NOSPLIT, $48-56 - // Initialize SI, DI and R8-R13. - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, DI - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, SI - MOVQ R11, R13 - ADDQ R12, R13 - -loop: - // for s < len(src) - CMPQ SI, R13 - JEQ end - - // CX = uint32(src[s]) - // - // switch src[s] & 0x03 - MOVBLZX (SI), CX - MOVL CX, BX - ANDL $3, BX - CMPL BX, $1 - JAE tagCopy - - // ---------------------------------------- - // The code below handles literal tags. - - // case tagLiteral: - // x := uint32(src[s] >> 2) - // switch - SHRL $2, CX - CMPL CX, $60 - JAE tagLit60Plus - - // case x < 60: - // s++ - INCQ SI - -doLit: - // This is the end of the inner "switch", when we have a literal tag. - // - // We assume that CX == x and x fits in a uint32, where x is the variable - // used in the pure Go decode_other.go code. - - // length = int(x) + 1 - // - // Unlike the pure Go code, we don't need to check if length <= 0 because - // CX can hold 64 bits, so the increment cannot overflow. - INCQ CX - - // Prepare to check if copying length bytes will run past the end of dst or - // src. - // - // AX = len(dst) - d - // BX = len(src) - s - MOVQ R10, AX - SUBQ DI, AX - MOVQ R13, BX - SUBQ SI, BX - - // !!! Try a faster technique for short (16 or fewer bytes) copies. - // - // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { - // goto callMemmove // Fall back on calling runtime·memmove. - // } - // - // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s - // against 21 instead of 16, because it cannot assume that all of its input - // is contiguous in memory and so it needs to leave enough source bytes to - // read the next tag without refilling buffers, but Go's Decode assumes - // contiguousness (the src argument is a []byte). - CMPQ CX, $16 - JGT callMemmove - CMPQ AX, $16 - JLT callMemmove - CMPQ BX, $16 - JLT callMemmove - - // !!! Implement the copy from src to dst as a 16-byte load and store. - // (Decode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only length bytes, but that's - // OK. If the input is a valid Snappy encoding then subsequent iterations - // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a - // non-nil error), so the overrun will be ignored. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(SI), X0 - MOVOU X0, 0(DI) - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -callMemmove: - // if length > len(dst)-d || length > len(src)-s { etc } - CMPQ CX, AX - JGT errCorrupt - CMPQ CX, BX - JGT errCorrupt - - // copy(dst[d:], src[s:s+length]) - // - // This means calling runtime·memmove(&dst[d], &src[s], length), so we push - // DI, SI and CX as arguments. Coincidentally, we also need to spill those - // three registers to the stack, to save local variables across the CALL. - MOVQ DI, 0(SP) - MOVQ SI, 8(SP) - MOVQ CX, 16(SP) - MOVQ DI, 24(SP) - MOVQ SI, 32(SP) - MOVQ CX, 40(SP) - CALL runtime·memmove(SB) - - // Restore local variables: unspill registers from the stack and - // re-calculate R8-R13. - MOVQ 24(SP), DI - MOVQ 32(SP), SI - MOVQ 40(SP), CX - MOVQ dst_base+0(FP), R8 - MOVQ dst_len+8(FP), R9 - MOVQ R8, R10 - ADDQ R9, R10 - MOVQ src_base+24(FP), R11 - MOVQ src_len+32(FP), R12 - MOVQ R11, R13 - ADDQ R12, R13 - - // d += length - // s += length - ADDQ CX, DI - ADDQ CX, SI - JMP loop - -tagLit60Plus: - // !!! This fragment does the - // - // s += x - 58; if uint(s) > uint(len(src)) { etc } - // - // checks. In the asm version, we code it once instead of once per switch case. - ADDQ CX, SI - SUBQ $58, SI - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // case x == 60: - CMPL CX, $61 - JEQ tagLit61 - JA tagLit62Plus - - // x = uint32(src[s-1]) - MOVBLZX -1(SI), CX - JMP doLit - -tagLit61: - // case x == 61: - // x = uint32(src[s-2]) | uint32(src[s-1])<<8 - MOVWLZX -2(SI), CX - JMP doLit - -tagLit62Plus: - CMPL CX, $62 - JA tagLit63 - - // case x == 62: - // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - MOVWLZX -3(SI), CX - MOVBLZX -1(SI), BX - SHLL $16, BX - ORL BX, CX - JMP doLit - -tagLit63: - // case x == 63: - // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - MOVL -4(SI), CX - JMP doLit - -// The code above handles literal tags. -// ---------------------------------------- -// The code below handles copy tags. - -tagCopy4: - // case tagCopy4: - // s += 5 - ADDQ $5, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-5])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - MOVLQZX -4(SI), DX - JMP doCopy - -tagCopy2: - // case tagCopy2: - // s += 3 - ADDQ $3, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // length = 1 + int(src[s-3])>>2 - SHRQ $2, CX - INCQ CX - - // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - MOVWQZX -2(SI), DX - JMP doCopy - -tagCopy: - // We have a copy tag. We assume that: - // - BX == src[s] & 0x03 - // - CX == src[s] - CMPQ BX, $2 - JEQ tagCopy2 - JA tagCopy4 - - // case tagCopy1: - // s += 2 - ADDQ $2, SI - - // if uint(s) > uint(len(src)) { etc } - MOVQ SI, BX - SUBQ R11, BX - CMPQ BX, R12 - JA errCorrupt - - // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - MOVQ CX, DX - ANDQ $0xe0, DX - SHLQ $3, DX - MOVBQZX -1(SI), BX - ORQ BX, DX - - // length = 4 + int(src[s-2])>>2&0x7 - SHRQ $2, CX - ANDQ $7, CX - ADDQ $4, CX - -doCopy: - // This is the end of the outer "switch", when we have a copy tag. - // - // We assume that: - // - CX == length && CX > 0 - // - DX == offset - - // if offset <= 0 { etc } - CMPQ DX, $0 - JLE errCorrupt - - // if d < offset { etc } - MOVQ DI, BX - SUBQ R8, BX - CMPQ BX, DX - JLT errCorrupt - - // if length > len(dst)-d { etc } - MOVQ R10, BX - SUBQ DI, BX - CMPQ CX, BX - JGT errCorrupt - - // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length - // - // Set: - // - R14 = len(dst)-d - // - R15 = &dst[d-offset] - MOVQ R10, R14 - SUBQ DI, R14 - MOVQ DI, R15 - SUBQ DX, R15 - - // !!! Try a faster technique for short (16 or fewer bytes) forward copies. - // - // First, try using two 8-byte load/stores, similar to the doLit technique - // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is - // still OK if offset >= 8. Note that this has to be two 8-byte load/stores - // and not one 16-byte load/store, and the first store has to be before the - // second load, due to the overlap if offset is in the range [8, 16). - // - // if length > 16 || offset < 8 || len(dst)-d < 16 { - // goto slowForwardCopy - // } - // copy 16 bytes - // d += length - CMPQ CX, $16 - JGT slowForwardCopy - CMPQ DX, $8 - JLT slowForwardCopy - CMPQ R14, $16 - JLT slowForwardCopy - MOVQ 0(R15), AX - MOVQ AX, 0(DI) - MOVQ 8(R15), BX - MOVQ BX, 8(DI) - ADDQ CX, DI - JMP loop - -slowForwardCopy: - // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we - // can still try 8-byte load stores, provided we can overrun up to 10 extra - // bytes. As above, the overrun will be fixed up by subsequent iterations - // of the outermost loop. - // - // The C++ snappy code calls this technique IncrementalCopyFastPath. Its - // commentary says: - // - // ---- - // - // The main part of this loop is a simple copy of eight bytes at a time - // until we've copied (at least) the requested amount of bytes. However, - // if d and d-offset are less than eight bytes apart (indicating a - // repeating pattern of length < 8), we first need to expand the pattern in - // order to get the correct results. For instance, if the buffer looks like - // this, with the eight-byte and patterns marked as - // intervals: - // - // abxxxxxxxxxxxx - // [------] d-offset - // [------] d - // - // a single eight-byte copy from to will repeat the pattern - // once, after which we can move two bytes without moving : - // - // ababxxxxxxxxxx - // [------] d-offset - // [------] d - // - // and repeat the exercise until the two no longer overlap. - // - // This allows us to do very well in the special case of one single byte - // repeated many times, without taking a big hit for more general cases. - // - // The worst case of extra writing past the end of the match occurs when - // offset == 1 and length == 1; the last copy will read from byte positions - // [0..7] and write to [4..11], whereas it was only supposed to write to - // position 1. Thus, ten excess bytes. - // - // ---- - // - // That "10 byte overrun" worst case is confirmed by Go's - // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy - // and finishSlowForwardCopy algorithm. - // - // if length > len(dst)-d-10 { - // goto verySlowForwardCopy - // } - SUBQ $10, R14 - CMPQ CX, R14 - JGT verySlowForwardCopy - -makeOffsetAtLeast8: - // !!! As above, expand the pattern so that offset >= 8 and we can use - // 8-byte load/stores. - // - // for offset < 8 { - // copy 8 bytes from dst[d-offset:] to dst[d:] - // length -= offset - // d += offset - // offset += offset - // // The two previous lines together means that d-offset, and therefore - // // R15, is unchanged. - // } - CMPQ DX, $8 - JGE fixUpSlowForwardCopy - MOVQ (R15), BX - MOVQ BX, (DI) - SUBQ DX, CX - ADDQ DX, DI - ADDQ DX, DX - JMP makeOffsetAtLeast8 - -fixUpSlowForwardCopy: - // !!! Add length (which might be negative now) to d (implied by DI being - // &dst[d]) so that d ends up at the right place when we jump back to the - // top of the loop. Before we do that, though, we save DI to AX so that, if - // length is positive, copying the remaining length bytes will write to the - // right place. - MOVQ DI, AX - ADDQ CX, DI - -finishSlowForwardCopy: - // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative - // length means that we overrun, but as above, that will be fixed up by - // subsequent iterations of the outermost loop. - CMPQ CX, $0 - JLE loop - MOVQ (R15), BX - MOVQ BX, (AX) - ADDQ $8, R15 - ADDQ $8, AX - SUBQ $8, CX - JMP finishSlowForwardCopy - -verySlowForwardCopy: - // verySlowForwardCopy is a simple implementation of forward copy. In C - // parlance, this is a do/while loop instead of a while loop, since we know - // that length > 0. In Go syntax: - // - // for { - // dst[d] = dst[d - offset] - // d++ - // length-- - // if length == 0 { - // break - // } - // } - MOVB (R15), BX - MOVB BX, (DI) - INCQ R15 - INCQ DI - DECQ CX - JNZ verySlowForwardCopy - JMP loop - -// The code above handles copy tags. -// ---------------------------------------- - -end: - // This is the end of the "for s < len(src)". - // - // if d != len(dst) { etc } - CMPQ DI, R10 - JNE errCorrupt - - // return 0 - MOVQ $0, ret+48(FP) - RET - -errCorrupt: - // return decodeErrCodeCorrupt - MOVQ $1, ret+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go deleted file mode 100644 index 8c9f204..0000000 --- a/vendor/github.com/golang/snappy/decode_other.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike - // the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - for end := d + length; d != end; d++ { - dst[d] = dst[d-offset] - } - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go deleted file mode 100644 index 8d393e9..0000000 --- a/vendor/github.com/golang/snappy/encode.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snappy - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go deleted file mode 100644 index 150d91b..0000000 --- a/vendor/github.com/golang/snappy/encode_amd64.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -package snappy - -// emitLiteral has the same semantics as in encode_other.go. -// -//go:noescape -func emitLiteral(dst, lit []byte) int - -// emitCopy has the same semantics as in encode_other.go. -// -//go:noescape -func emitCopy(dst []byte, offset, length int) int - -// extendMatch has the same semantics as in encode_other.go. -// -//go:noescape -func extendMatch(src []byte, i, j int) int - -// encodeBlock has the same semantics as in encode_other.go. -// -//go:noescape -func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s deleted file mode 100644 index adfd979..0000000 --- a/vendor/github.com/golang/snappy/encode_amd64.s +++ /dev/null @@ -1,730 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" - -// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a -// Go toolchain regression. See https://github.com/golang/go/issues/15426 and -// https://github.com/golang/snappy/issues/29 -// -// As a workaround, the package was built with a known good assembler, and -// those instructions were disassembled by "objdump -d" to yield the -// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 -// style comments, in AT&T asm syntax. Note that rsp here is a physical -// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). -// The instructions were then encoded as "BYTE $0x.." sequences, which assemble -// fine on Go 1.6. - -// The asm code generally follows the pure Go code in encode_other.go, except -// where marked with a "!!!". - -// ---------------------------------------------------------------------------- - -// func emitLiteral(dst, lit []byte) int -// -// All local variables fit into registers. The register allocation: -// - AX len(lit) -// - BX n -// - DX return value -// - DI &dst[i] -// - R10 &lit[0] -// -// The 24 bytes of stack space is to call runtime·memmove. -// -// The unusual register allocation of local variables, such as R10 for the -// source pointer, matches the allocation used at the call site in encodeBlock, -// which makes it easier to manually inline this function. -TEXT ·emitLiteral(SB), NOSPLIT, $24-56 - MOVQ dst_base+0(FP), DI - MOVQ lit_base+24(FP), R10 - MOVQ lit_len+32(FP), AX - MOVQ AX, DX - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT oneByte - CMPL BX, $256 - JLT twoBytes - -threeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - ADDQ $3, DX - JMP memmove - -twoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - ADDQ $2, DX - JMP memmove - -oneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - ADDQ $1, DX - -memmove: - MOVQ DX, ret+48(FP) - - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - CALL runtime·memmove(SB) - RET - -// ---------------------------------------------------------------------------- - -// func emitCopy(dst []byte, offset, length int) int -// -// All local variables fit into registers. The register allocation: -// - AX length -// - SI &dst[0] -// - DI &dst[i] -// - R11 offset -// -// The unusual register allocation of local variables, such as R11 for the -// offset, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·emitCopy(SB), NOSPLIT, $0-48 - MOVQ dst_base+0(FP), DI - MOVQ DI, SI - MOVQ offset+24(FP), R11 - MOVQ length+32(FP), AX - -loop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT step1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP loop0 - -step1: - // if length > 64 { etc } - CMPL AX, $64 - JLE step2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -step2: - // if length >= 12 || offset >= 2048 { goto step3 } - CMPL AX, $12 - JGE step3 - CMPL R11, $2048 - JGE step3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -step3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - - // Return the number of bytes written. - SUBQ SI, DI - MOVQ DI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func extendMatch(src []byte, i, j int) int -// -// All local variables fit into registers. The register allocation: -// - DX &src[0] -// - SI &src[j] -// - R13 &src[len(src) - 8] -// - R14 &src[len(src)] -// - R15 &src[i] -// -// The unusual register allocation of local variables, such as R15 for a source -// pointer, matches the allocation used at the call site in encodeBlock, which -// makes it easier to manually inline this function. -TEXT ·extendMatch(SB), NOSPLIT, $0-48 - MOVQ src_base+0(FP), DX - MOVQ src_len+8(FP), R14 - MOVQ i+24(FP), R15 - MOVQ j+32(FP), SI - ADDQ DX, R14 - ADDQ DX, R15 - ADDQ DX, SI - MOVQ R14, R13 - SUBQ $8, R13 - -cmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA cmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE bsf - ADDQ $8, R15 - ADDQ $8, SI - JMP cmp8 - -bsf: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -cmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE extendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE extendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP cmp1 - -extendMatchEnd: - // Convert from &src[ret] to ret. - SUBQ DX, SI - MOVQ SI, ret+40(FP) - RET - -// ---------------------------------------------------------------------------- - -// func encodeBlock(dst, src []byte) (d int) -// -// All local variables fit into registers, other than "var table". The register -// allocation: -// - AX . . -// - BX . . -// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). -// - DX 64 &src[0], tableSize -// - SI 72 &src[s] -// - DI 80 &dst[d] -// - R9 88 sLimit -// - R10 . &src[nextEmit] -// - R11 96 prevHash, currHash, nextHash, offset -// - R12 104 &src[base], skip -// - R13 . &src[nextS], &src[len(src) - 8] -// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x -// - R15 112 candidate -// -// The second column (56, 64, etc) is the stack offset to spill the registers -// when calling other functions. We could pack this slightly tighter, but it's -// simpler to have a dedicated spill map independent of the function called. -// -// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An -// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill -// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. -TEXT ·encodeBlock(SB), 0, $32888-56 - MOVQ dst_base+0(FP), DI - MOVQ src_base+24(FP), SI - MOVQ src_len+32(FP), R14 - - // shift, tableSize := uint32(32-8), 1<<8 - MOVQ $24, CX - MOVQ $256, DX - -calcShift: - // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - // shift-- - // } - CMPQ DX, $16384 - JGE varTable - CMPQ DX, R14 - JGE varTable - SUBQ $1, CX - SHLQ $1, DX - JMP calcShift - -varTable: - // var table [maxTableSize]uint16 - // - // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU - // writes 16 bytes, so we can do only tableSize/8 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - SHRQ $3, DX - LEAQ table-32768(SP), BX - PXOR X0, X0 - -memclr: - MOVOU X0, 0(BX) - ADDQ $16, BX - SUBQ $1, DX - JNZ memclr - - // !!! DX = &src[0] - MOVQ SI, DX - - // sLimit := len(src) - inputMargin - MOVQ R14, R9 - SUBQ $15, R9 - - // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't - // change for the rest of the function. - MOVQ CX, 56(SP) - MOVQ DX, 64(SP) - MOVQ R9, 88(SP) - - // nextEmit := 0 - MOVQ DX, R10 - - // s := 1 - ADDQ $1, SI - - // nextHash := hash(load32(src, s), shift) - MOVL 0(SI), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - -outer: - // for { etc } - - // skip := 32 - MOVQ $32, R12 - - // nextS := s - MOVQ SI, R13 - - // candidate := 0 - MOVQ $0, R15 - -inner0: - // for { etc } - - // s := nextS - MOVQ R13, SI - - // bytesBetweenHashLookups := skip >> 5 - MOVQ R12, R14 - SHRQ $5, R14 - - // nextS = s + bytesBetweenHashLookups - ADDQ R14, R13 - - // skip += bytesBetweenHashLookups - ADDQ R14, R12 - - // if nextS > sLimit { goto emitRemainder } - MOVQ R13, AX - SUBQ DX, AX - CMPQ AX, R9 - JA emitRemainder - - // candidate = int(table[nextHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[nextHash] = uint16(s) - MOVQ SI, AX - SUBQ DX, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // nextHash = hash(load32(src, nextS), shift) - MOVL 0(R13), R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // if load32(src, s) != load32(src, candidate) { continue } break - MOVL 0(SI), AX - MOVL (DX)(R15*1), BX - CMPL AX, BX - JNE inner0 - -fourByteMatch: - // As per the encode_other.go code: - // - // A 4-byte match has been found. We'll later see etc. - - // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment - // on inputMargin in encode.go. - MOVQ SI, AX - SUBQ R10, AX - CMPQ AX, $16 - JLE emitLiteralFastPath - - // ---------------------------------------- - // Begin inline of the emitLiteral call. - // - // d += emitLiteral(dst[d:], src[nextEmit:s]) - - MOVL AX, BX - SUBL $1, BX - - CMPL BX, $60 - JLT inlineEmitLiteralOneByte - CMPL BX, $256 - JLT inlineEmitLiteralTwoBytes - -inlineEmitLiteralThreeBytes: - MOVB $0xf4, 0(DI) - MOVW BX, 1(DI) - ADDQ $3, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralTwoBytes: - MOVB $0xf0, 0(DI) - MOVB BX, 1(DI) - ADDQ $2, DI - JMP inlineEmitLiteralMemmove - -inlineEmitLiteralOneByte: - SHLB $2, BX - MOVB BX, 0(DI) - ADDQ $1, DI - -inlineEmitLiteralMemmove: - // Spill local variables (registers) onto the stack; call; unspill. - // - // copy(dst[i:], lit) - // - // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push - // DI, R10 and AX as arguments. - MOVQ DI, 0(SP) - MOVQ R10, 8(SP) - MOVQ AX, 16(SP) - ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". - MOVQ SI, 72(SP) - MOVQ DI, 80(SP) - MOVQ R15, 112(SP) - CALL runtime·memmove(SB) - MOVQ 56(SP), CX - MOVQ 64(SP), DX - MOVQ 72(SP), SI - MOVQ 80(SP), DI - MOVQ 88(SP), R9 - MOVQ 112(SP), R15 - JMP inner1 - -inlineEmitLiteralEnd: - // End inline of the emitLiteral call. - // ---------------------------------------- - -emitLiteralFastPath: - // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". - MOVB AX, BX - SUBB $1, BX - SHLB $2, BX - MOVB BX, (DI) - ADDQ $1, DI - - // !!! Implement the copy from lit to dst as a 16-byte load and store. - // (Encode's documentation says that dst and src must not overlap.) - // - // This always copies 16 bytes, instead of only len(lit) bytes, but that's - // OK. Subsequent iterations will fix up the overrun. - // - // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or - // 16-byte loads and stores. This technique probably wouldn't be as - // effective on architectures that are fussier about alignment. - MOVOU 0(R10), X0 - MOVOU X0, 0(DI) - ADDQ AX, DI - -inner1: - // for { etc } - - // base := s - MOVQ SI, R12 - - // !!! offset := base - candidate - MOVQ R12, R11 - SUBQ R15, R11 - SUBQ DX, R11 - - // ---------------------------------------- - // Begin inline of the extendMatch call. - // - // s = extendMatch(src, candidate+4, s+4) - - // !!! R14 = &src[len(src)] - MOVQ src_len+32(FP), R14 - ADDQ DX, R14 - - // !!! R13 = &src[len(src) - 8] - MOVQ R14, R13 - SUBQ $8, R13 - - // !!! R15 = &src[candidate + 4] - ADDQ $4, R15 - ADDQ DX, R15 - - // !!! s += 4 - ADDQ $4, SI - -inlineExtendMatchCmp8: - // As long as we are 8 or more bytes before the end of src, we can load and - // compare 8 bytes at a time. If those 8 bytes are equal, repeat. - CMPQ SI, R13 - JA inlineExtendMatchCmp1 - MOVQ (R15), AX - MOVQ (SI), BX - CMPQ AX, BX - JNE inlineExtendMatchBSF - ADDQ $8, R15 - ADDQ $8, SI - JMP inlineExtendMatchCmp8 - -inlineExtendMatchBSF: - // If those 8 bytes were not equal, XOR the two 8 byte values, and return - // the index of the first byte that differs. The BSF instruction finds the - // least significant 1 bit, the amd64 architecture is little-endian, and - // the shift by 3 converts a bit index to a byte index. - XORQ AX, BX - BSFQ BX, BX - SHRQ $3, BX - ADDQ BX, SI - JMP inlineExtendMatchEnd - -inlineExtendMatchCmp1: - // In src's tail, compare 1 byte at a time. - CMPQ SI, R14 - JAE inlineExtendMatchEnd - MOVB (R15), AX - MOVB (SI), BX - CMPB AX, BX - JNE inlineExtendMatchEnd - ADDQ $1, R15 - ADDQ $1, SI - JMP inlineExtendMatchCmp1 - -inlineExtendMatchEnd: - // End inline of the extendMatch call. - // ---------------------------------------- - - // ---------------------------------------- - // Begin inline of the emitCopy call. - // - // d += emitCopy(dst[d:], base-candidate, s-base) - - // !!! length := s - base - MOVQ SI, AX - SUBQ R12, AX - -inlineEmitCopyLoop0: - // for length >= 68 { etc } - CMPL AX, $68 - JLT inlineEmitCopyStep1 - - // Emit a length 64 copy, encoded as 3 bytes. - MOVB $0xfe, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $64, AX - JMP inlineEmitCopyLoop0 - -inlineEmitCopyStep1: - // if length > 64 { etc } - CMPL AX, $64 - JLE inlineEmitCopyStep2 - - // Emit a length 60 copy, encoded as 3 bytes. - MOVB $0xee, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - SUBL $60, AX - -inlineEmitCopyStep2: - // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } - CMPL AX, $12 - JGE inlineEmitCopyStep3 - CMPL R11, $2048 - JGE inlineEmitCopyStep3 - - // Emit the remaining copy, encoded as 2 bytes. - MOVB R11, 1(DI) - SHRL $8, R11 - SHLB $5, R11 - SUBB $4, AX - SHLB $2, AX - ORB AX, R11 - ORB $1, R11 - MOVB R11, 0(DI) - ADDQ $2, DI - JMP inlineEmitCopyEnd - -inlineEmitCopyStep3: - // Emit the remaining copy, encoded as 3 bytes. - SUBL $1, AX - SHLB $2, AX - ORB $2, AX - MOVB AX, 0(DI) - MOVW R11, 1(DI) - ADDQ $3, DI - -inlineEmitCopyEnd: - // End inline of the emitCopy call. - // ---------------------------------------- - - // nextEmit = s - MOVQ SI, R10 - - // if s >= sLimit { goto emitRemainder } - MOVQ SI, AX - SUBQ DX, AX - CMPQ AX, R9 - JAE emitRemainder - - // As per the encode_other.go code: - // - // We could immediately etc. - - // x := load64(src, s-1) - MOVQ -1(SI), R14 - - // prevHash := hash(uint32(x>>0), shift) - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // table[prevHash] = uint16(s-1) - MOVQ SI, AX - SUBQ DX, AX - SUBQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // currHash := hash(uint32(x>>8), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // candidate = int(table[currHash]) - // XXX: MOVWQZX table-32768(SP)(R11*2), R15 - // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 - BYTE $0x4e - BYTE $0x0f - BYTE $0xb7 - BYTE $0x7c - BYTE $0x5c - BYTE $0x78 - - // table[currHash] = uint16(s) - ADDQ $1, AX - - // XXX: MOVW AX, table-32768(SP)(R11*2) - // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) - BYTE $0x66 - BYTE $0x42 - BYTE $0x89 - BYTE $0x44 - BYTE $0x5c - BYTE $0x78 - - // if uint32(x>>8) == load32(src, candidate) { continue } - MOVL (DX)(R15*1), BX - CMPL R14, BX - JEQ inner1 - - // nextHash = hash(uint32(x>>16), shift) - SHRQ $8, R14 - MOVL R14, R11 - IMULL $0x1e35a7bd, R11 - SHRL CX, R11 - - // s++ - ADDQ $1, SI - - // break out of the inner1 for loop, i.e. continue the outer loop. - JMP outer - -emitRemainder: - // if nextEmit < len(src) { etc } - MOVQ src_len+32(FP), AX - ADDQ DX, AX - CMPQ R10, AX - JEQ encodeBlockEnd - - // d += emitLiteral(dst[d:], src[nextEmit:]) - // - // Push args. - MOVQ DI, 0(SP) - MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. - MOVQ R10, 24(SP) - SUBQ R10, AX - MOVQ AX, 32(SP) - MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. - - // Spill local variables (registers) onto the stack; call; unspill. - MOVQ DI, 80(SP) - CALL ·emitLiteral(SB) - MOVQ 80(SP), DI - - // Finish the "d +=" part of "d += emitLiteral(etc)". - ADDQ 48(SP), DI - -encodeBlockEnd: - MOVQ dst_base+0(FP), AX - SUBQ AX, DI - MOVQ DI, d+48(FP) - RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go deleted file mode 100644 index dbcae90..0000000 --- a/vendor/github.com/golang/snappy/encode_other.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !amd64 appengine !gc noasm - -package snappy - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -// extendMatch returns the largest k such that k <= len(src) and that -// src[i:i+k-j] and src[j:k] have the same contents. -// -// It assumes that: -// 0 <= i && i < j && j <= len(src) -func extendMatch(src []byte, i, j int) int { - for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { - } - return j -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod deleted file mode 100644 index f6406bb..0000000 --- a/vendor/github.com/golang/snappy/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/golang/snappy diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go deleted file mode 100644 index ece692e..0000000 --- a/vendor/github.com/golang/snappy/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snappy implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snappy // import "github.com/golang/snappy" - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/mailru/easyjson/.gitignore b/vendor/github.com/mailru/easyjson/.gitignore deleted file mode 100644 index fbfaf7a..0000000 --- a/vendor/github.com/mailru/easyjson/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.root -*_easyjson.go -*.iml -.idea -*.swp -bin/* diff --git a/vendor/github.com/mailru/easyjson/.travis.yml b/vendor/github.com/mailru/easyjson/.travis.yml deleted file mode 100644 index 0ececa0..0000000 --- a/vendor/github.com/mailru/easyjson/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - tip - - stable - -matrix: - allow_failures: - - go: tip - -install: - - go get golang.org/x/lint/golint diff --git a/vendor/github.com/mailru/easyjson/LICENSE b/vendor/github.com/mailru/easyjson/LICENSE deleted file mode 100644 index fbff658..0000000 --- a/vendor/github.com/mailru/easyjson/LICENSE +++ /dev/null @@ -1,7 +0,0 @@ -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mailru/easyjson/Makefile b/vendor/github.com/mailru/easyjson/Makefile deleted file mode 100644 index 80449f0..0000000 --- a/vendor/github.com/mailru/easyjson/Makefile +++ /dev/null @@ -1,56 +0,0 @@ -all: test - -clean: - rm -rf bin - rm -rf tests/*_easyjson.go - rm -rf benchmark/*_easyjson.go - -build: - go build -i -o ./bin/easyjson ./easyjson - -generate: build - bin/easyjson -stubs \ - ./tests/snake.go \ - ./tests/data.go \ - ./tests/omitempty.go \ - ./tests/nothing.go \ - ./tests/named_type.go \ - ./tests/custom_map_key_type.go \ - ./tests/embedded_type.go \ - ./tests/reference_to_pointer.go \ - ./tests/html.go \ - ./tests/unknown_fields.go \ - - bin/easyjson -all ./tests/data.go - bin/easyjson -all ./tests/nothing.go - bin/easyjson -all ./tests/errors.go - bin/easyjson -all ./tests/html.go - bin/easyjson -snake_case ./tests/snake.go - bin/easyjson -omit_empty ./tests/omitempty.go - bin/easyjson -build_tags=use_easyjson ./benchmark/data.go - bin/easyjson ./tests/nested_easy.go - bin/easyjson ./tests/named_type.go - bin/easyjson ./tests/custom_map_key_type.go - bin/easyjson ./tests/embedded_type.go - bin/easyjson ./tests/reference_to_pointer.go - bin/easyjson ./tests/key_marshaler_map.go - bin/easyjson -disallow_unknown_fields ./tests/disallow_unknown.go - bin/easyjson ./tests/unknown_fields.go - -test: generate - go test \ - ./tests \ - ./jlexer \ - ./gen \ - ./buffer - cd benchmark && go test -benchmem -tags use_easyjson -bench . - golint -set_exit_status ./tests/*_easyjson.go - -bench-other: generate - cd benchmark && make - -bench-python: - benchmark/ujson.sh - - -.PHONY: clean generate test build diff --git a/vendor/github.com/mailru/easyjson/README.md b/vendor/github.com/mailru/easyjson/README.md deleted file mode 100644 index 3bdcf2d..0000000 --- a/vendor/github.com/mailru/easyjson/README.md +++ /dev/null @@ -1,336 +0,0 @@ -# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson) - -Package easyjson provides a fast and easy way to marshal/unmarshal Go structs -to/from JSON without the use of reflection. In performance tests, easyjson -outperforms the standard `encoding/json` package by a factor of 4-5x, and other -JSON encoding packages by a factor of 2-3x. - -easyjson aims to keep generated Go code simple enough so that it can be easily -optimized or fixed. Another goal is to provide users with the ability to -customize the generated code by providing options not available with the -standard `encoding/json` package, such as generating "snake_case" names or -enabling `omitempty` behavior by default. - -## Usage -```sh -# install -go get -u github.com/mailru/easyjson/... - -# run -easyjson -all .go -``` - -The above will generate `_easyjson.go` containing the appropriate marshaler and -unmarshaler funcs for all structs contained in `.go`. - -Please note that easyjson requires a full Go build environment and the `GOPATH` -environment variable to be set. This is because easyjson code generation -invokes `go run` on a temporary file (an approach to code generation borrowed -from [ffjson](https://github.com/pquerna/ffjson)). - -## Options -```txt -Usage of easyjson: - -all - generate marshaler/unmarshalers for all structs in a file - -build_tags string - build tags to add to generated file - -leave_temps - do not delete temporary files - -no_std_marshalers - don't generate MarshalJSON/UnmarshalJSON funcs - -noformat - do not run 'gofmt -w' on output file - -omit_empty - omit empty fields by default - -output_filename string - specify the filename of the output - -pkg - process the whole package instead of just the given file - -snake_case - use snake_case names instead of CamelCase by default - -lower_camel_case - use lowerCamelCase instead of CamelCase by default - -stubs - only generate stubs for marshaler/unmarshaler funcs - -disallow_unknown_fields - return error if some unknown field in json appeared -``` - -Using `-all` will generate marshalers/unmarshalers for all Go structs in the -file. If `-all` is not provided, then only those structs whose preceding -comment starts with `easyjson:json` will have marshalers/unmarshalers -generated. For example: - -```go -//easyjson:json -type A struct {} -``` - -Additional option notes: - -* `-snake_case` tells easyjson to generate snake\_case field names by default - (unless overridden by a field tag). The CamelCase to snake\_case conversion - algorithm should work in most cases (ie, HTTPVersion will be converted to - "http_version"). - -* `-build_tags` will add the specified build tags to generated Go sources. - -## Generated Marshaler/Unmarshaler Funcs - -For Go struct types, easyjson generates the funcs `MarshalEasyJSON` / -`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisify -the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in -conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary -reflection / type assertions during marshaling/unmarshaling to/from JSON for Go -structs. - -easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct -types compatible with the standard `json.Marshaler` and `json.Unmarshaler` -interfaces. Please be aware that using the standard `json.Marshal` / -`json.Unmarshal` for marshaling/unmarshaling will incur a significant -performance penalty when compared to using `easyjson.Marshal` / -`easyjson.Unmarshal`. - -Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and -`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers -and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter` -which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc -listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of -utility funcs that are available. - -## Controlling easyjson Marshaling and Unmarshaling Behavior - -Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs -that satisify the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces. -These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined -for a Go type. - -Go types can also satisify the `easyjson.Optional` interface, which allows the -type to define its own `omitempty` logic. - -## Type Wrappers - -easyjson provides additional type wrappers defined in the `easyjson/opt` -package. These wrap the standard Go primitives and in turn satisify the -easyjson interfaces. - -The `easyjson/opt` type wrappers are useful when needing to distinguish between -a missing value and/or when needing to specifying a default value. Type -wrappers allow easyjson to avoid additional pointers and heap allocations and -can significantly increase performance when used properly. - -## Memory Pooling - -easyjson uses a buffer pool that allocates data in increasing chunks from 128 -to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of -`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory -allocation and to allow larger reusable buffers. - -easyjson's custom allocation buffer pool is defined in the `easyjson/buffer` -package, and the default behavior pool behavior can be modified (if necessary) -through a call to `buffer.Init()` prior to any marshaling or unmarshaling. -Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer) -for more information. - -## Issues, Notes, and Limitations - -* easyjson is still early in its development. As such, there are likely to be - bugs and missing features when compared to `encoding/json`. In the case of a - missing feature or bug, please create a GitHub issue. Pull requests are - welcome! - -* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive - matching is not currently provided due to the significant performance hit - when doing case-insensitive key matching. In the future, case-insensitive - object key matching may be provided via an option to the generator. - -* easyjson makes use of `unsafe`, which simplifies the code and - provides significant performance benefits by allowing no-copy - conversion from `[]byte` to `string`. That said, `unsafe` is used - only when unmarshaling and parsing JSON, and any `unsafe` operations - / memory allocations done will be safely deallocated by - easyjson. Set the build tag `easyjson_nounsafe` to compile it - without `unsafe`. - -* easyjson is compatible with Google App Engine. The `appengine` build - tag (set by App Engine's environment) will automatically disable the - use of `unsafe`, which is not allowed in App Engine's Standard - Environment. Note that the use with App Engine is still experimental. - -* Floats are formatted using the default precision from Go's `strconv` package. - As such, easyjson will not correctly handle high precision floats when - marshaling/unmarshaling JSON. Note, however, that there are very few/limited - uses where this behavior is not sufficient for general use. That said, a - different package may be needed if precise marshaling/unmarshaling of high - precision floats to/from JSON is required. - -* While unmarshaling, the JSON parser does the minimal amount of work needed to - skip over unmatching parens, and as such full validation is not done for the - entire JSON value being unmarshaled/parsed. - -* Currently there is no true streaming support for encoding/decoding as - typically for many uses/protocols the final, marshaled length of the JSON - needs to be known prior to sending the data. Currently this is not possible - with easyjson's architecture. - -* easyjson parser and codegen based on reflection, so it wont works on `package main` - files, because they cant be imported by parser. - -## Benchmarks - -Most benchmarks were done using the example -[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets) -(9k after eliminating whitespace). This example is similar to real-world data, -is well-structured, and contains a healthy variety of different types, making -it ideal for JSON serialization benchmarks. - -Note: - -* For small request benchmarks, an 80 byte portion of the above example was - used. - -* For large request marshaling benchmarks, a struct containing 50 regular - samples was used, making a ~500kB output JSON. - -* Benchmarks are showing the results of easyjson's default behaviour, - which makes use of `unsafe`. - -Benchmarks are available in the repository and can be run by invoking `make`. - -### easyjson vs. encoding/json - -easyjson is roughly 5-6 times faster than the standard `encoding/json` for -unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent -marshaling is 6-7x faster if marshaling to a writer. - -### easyjson vs. ffjson - -easyjson uses the same approach for JSON marshaling as -[ffjson](https://github.com/pquerna/ffjson), but takes a significantly -different approach to lexing and parsing JSON during unmarshaling. This means -easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for -non-concurrent unmarshaling. - -As of this writing, `ffjson` seems to have issues when used concurrently: -specifically, large request pooling hurts `ffjson`'s performance and causes -scalability issues. These issues with `ffjson` can likely be fixed, but as of -writing remain outstanding/known issues with `ffjson`. - -easyjson and `ffjson` have similar performance for small requests, however -easyjson outperforms `ffjson` by roughly 2-5x times for large requests when -used with a writer. - -### easyjson vs. go/codec - -[go/codec](https://github.com/ugorji/go) provides -compile-time helpers for JSON generation. In this case, helpers do not work -like marshalers as they are encoding-independent. - -easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks -and about 3x faster for concurrent encoding (without marshaling to a writer). - -In an attempt to measure marshaling performance of `go/codec` (as opposed to -allocations/memcpy/writer interface invocations), a benchmark was done with -resetting length of a byte slice rather than resetting the whole slice to nil. -However, the optimization in this exact form may not be applicable in practice, -since the memory is not freed between marshaling operations. - -### easyjson vs 'ujson' python module - -[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it -is interesting to see how plain golang compares to that. It is imporant to note -that the resulting object for python is slower to access, since the library -parses JSON object into dictionaries. - -easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for -marshaling. - -### Benchmark Results - -`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6. -`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6. - -#### Unmarshaling - -| lib | json size | MB/s | allocs/op | B/op | -|:---------|:----------|-----:|----------:|------:| -| standard | regular | 22 | 218 | 10229 | -| standard | small | 9.7 | 14 | 720 | -| | | | | | -| easyjson | regular | 125 | 128 | 9794 | -| easyjson | small | 67 | 3 | 128 | -| | | | | | -| ffjson | regular | 66 | 141 | 9985 | -| ffjson | small | 17.6 | 10 | 488 | -| | | | | | -| codec | regular | 55 | 434 | 19299 | -| codec | small | 29 | 7 | 336 | -| | | | | | -| ujson | regular | 103 | N/A | N/A | - -#### Marshaling, one goroutine. - -| lib | json size | MB/s | allocs/op | B/op | -|:----------|:----------|-----:|----------:|------:| -| standard | regular | 75 | 9 | 23256 | -| standard | small | 32 | 3 | 328 | -| standard | large | 80 | 17 | 1.2M | -| | | | | | -| easyjson | regular | 213 | 9 | 10260 | -| easyjson* | regular | 263 | 8 | 742 | -| easyjson | small | 125 | 1 | 128 | -| easyjson | large | 212 | 33 | 490k | -| easyjson* | large | 262 | 25 | 2879 | -| | | | | | -| ffjson | regular | 122 | 153 | 21340 | -| ffjson** | regular | 146 | 152 | 4897 | -| ffjson | small | 36 | 5 | 384 | -| ffjson** | small | 64 | 4 | 128 | -| ffjson | large | 134 | 7317 | 818k | -| ffjson** | large | 125 | 7320 | 827k | -| | | | | | -| codec | regular | 80 | 17 | 33601 | -| codec*** | regular | 108 | 9 | 1153 | -| codec | small | 42 | 3 | 304 | -| codec*** | small | 56 | 1 | 48 | -| codec | large | 73 | 483 | 2.5M | -| codec*** | large | 103 | 451 | 66007 | -| | | | | | -| ujson | regular | 92 | N/A | N/A | - -\* marshaling to a writer, -\*\* using `ffjson.Pool()`, -\*\*\* reusing output slice instead of resetting it to nil - -#### Marshaling, concurrent. - -| lib | json size | MB/s | allocs/op | B/op | -|:----------|:----------|-----:|----------:|------:| -| standard | regular | 252 | 9 | 23257 | -| standard | small | 124 | 3 | 328 | -| standard | large | 289 | 17 | 1.2M | -| | | | | | -| easyjson | regular | 792 | 9 | 10597 | -| easyjson* | regular | 1748 | 8 | 779 | -| easyjson | small | 333 | 1 | 128 | -| easyjson | large | 718 | 36 | 548k | -| easyjson* | large | 2134 | 25 | 4957 | -| | | | | | -| ffjson | regular | 301 | 153 | 21629 | -| ffjson** | regular | 707 | 152 | 5148 | -| ffjson | small | 62 | 5 | 384 | -| ffjson** | small | 282 | 4 | 128 | -| ffjson | large | 438 | 7330 | 1.0M | -| ffjson** | large | 131 | 7319 | 820k | -| | | | | | -| codec | regular | 183 | 17 | 33603 | -| codec*** | regular | 671 | 9 | 1157 | -| codec | small | 147 | 3 | 304 | -| codec*** | small | 299 | 1 | 48 | -| codec | large | 190 | 483 | 2.5M | -| codec*** | large | 752 | 451 | 77574 | - -\* marshaling to a writer, -\*\* using `ffjson.Pool()`, -\*\*\* reusing output slice instead of resetting it to nil diff --git a/vendor/github.com/mailru/easyjson/buffer/pool.go b/vendor/github.com/mailru/easyjson/buffer/pool.go deleted file mode 100644 index 07fb4bc..0000000 --- a/vendor/github.com/mailru/easyjson/buffer/pool.go +++ /dev/null @@ -1,270 +0,0 @@ -// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to -// reduce copying and to allow reuse of individual chunks. -package buffer - -import ( - "io" - "sync" -) - -// PoolConfig contains configuration for the allocation and reuse strategy. -type PoolConfig struct { - StartSize int // Minimum chunk size that is allocated. - PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. - MaxSize int // Maximum chunk size that will be allocated. -} - -var config = PoolConfig{ - StartSize: 128, - PooledSize: 512, - MaxSize: 32768, -} - -// Reuse pool: chunk size -> pool. -var buffers = map[int]*sync.Pool{} - -func initBuffers() { - for l := config.PooledSize; l <= config.MaxSize; l *= 2 { - buffers[l] = new(sync.Pool) - } -} - -func init() { - initBuffers() -} - -// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. -func Init(cfg PoolConfig) { - config = cfg - initBuffers() -} - -// putBuf puts a chunk to reuse pool if it can be reused. -func putBuf(buf []byte) { - size := cap(buf) - if size < config.PooledSize { - return - } - if c := buffers[size]; c != nil { - c.Put(buf[:0]) - } -} - -// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. -func getBuf(size int) []byte { - if size < config.PooledSize { - return make([]byte, 0, size) - } - - if c := buffers[size]; c != nil { - v := c.Get() - if v != nil { - return v.([]byte) - } - } - return make([]byte, 0, size) -} - -// Buffer is a buffer optimized for serialization without extra copying. -type Buffer struct { - - // Buf is the current chunk that can be used for serialization. - Buf []byte - - toPool []byte - bufs [][]byte -} - -// EnsureSpace makes sure that the current chunk contains at least s free bytes, -// possibly creating a new chunk. -func (b *Buffer) EnsureSpace(s int) { - if cap(b.Buf)-len(b.Buf) >= s { - return - } - l := len(b.Buf) - if l > 0 { - if cap(b.toPool) != cap(b.Buf) { - // Chunk was reallocated, toPool can be pooled. - putBuf(b.toPool) - } - if cap(b.bufs) == 0 { - b.bufs = make([][]byte, 0, 8) - } - b.bufs = append(b.bufs, b.Buf) - l = cap(b.toPool) * 2 - } else { - l = config.StartSize - } - - if l > config.MaxSize { - l = config.MaxSize - } - b.Buf = getBuf(l) - b.toPool = b.Buf -} - -// AppendByte appends a single byte to buffer. -func (b *Buffer) AppendByte(data byte) { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } - b.Buf = append(b.Buf, data) -} - -// AppendBytes appends a byte slice to buffer. -func (b *Buffer) AppendBytes(data []byte) { - for len(data) > 0 { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// AppendBytes appends a string to buffer. -func (b *Buffer) AppendString(data string) { - for len(data) > 0 { - if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. - b.EnsureSpace(1) - } - - sz := cap(b.Buf) - len(b.Buf) - if sz > len(data) { - sz = len(data) - } - - b.Buf = append(b.Buf, data[:sz]...) - data = data[sz:] - } -} - -// Size computes the size of a buffer by adding sizes of every chunk. -func (b *Buffer) Size() int { - size := len(b.Buf) - for _, buf := range b.bufs { - size += len(buf) - } - return size -} - -// DumpTo outputs the contents of a buffer to a writer and resets the buffer. -func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { - var n int - for _, buf := range b.bufs { - if err == nil { - n, err = w.Write(buf) - written += n - } - putBuf(buf) - } - - if err == nil { - n, err = w.Write(b.Buf) - written += n - } - putBuf(b.toPool) - - b.bufs = nil - b.Buf = nil - b.toPool = nil - - return -} - -// BuildBytes creates a single byte slice with all the contents of the buffer. Data is -// copied if it does not fit in a single chunk. You can optionally provide one byte -// slice as argument that it will try to reuse. -func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { - if len(b.bufs) == 0 { - ret := b.Buf - b.toPool = nil - b.Buf = nil - return ret - } - - var ret []byte - size := b.Size() - - // If we got a buffer as argument and it is big enought, reuse it. - if len(reuse) == 1 && cap(reuse[0]) >= size { - ret = reuse[0][:0] - } else { - ret = make([]byte, 0, size) - } - for _, buf := range b.bufs { - ret = append(ret, buf...) - putBuf(buf) - } - - ret = append(ret, b.Buf...) - putBuf(b.toPool) - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} - -type readCloser struct { - offset int - bufs [][]byte -} - -func (r *readCloser) Read(p []byte) (n int, err error) { - for _, buf := range r.bufs { - // Copy as much as we can. - x := copy(p[n:], buf[r.offset:]) - n += x // Increment how much we filled. - - // Did we empty the whole buffer? - if r.offset+x == len(buf) { - // On to the next buffer. - r.offset = 0 - r.bufs = r.bufs[1:] - - // We can release this buffer. - putBuf(buf) - } else { - r.offset += x - } - - if n == len(p) { - break - } - } - // No buffers left or nothing read? - if len(r.bufs) == 0 { - err = io.EOF - } - return -} - -func (r *readCloser) Close() error { - // Release all remaining buffers. - for _, buf := range r.bufs { - putBuf(buf) - } - // In case Close gets called multiple times. - r.bufs = nil - - return nil -} - -// ReadCloser creates an io.ReadCloser with all the contents of the buffer. -func (b *Buffer) ReadCloser() io.ReadCloser { - ret := &readCloser{0, append(b.bufs, b.Buf)} - - b.bufs = nil - b.toPool = nil - b.Buf = nil - - return ret -} diff --git a/vendor/github.com/mailru/easyjson/go.mod b/vendor/github.com/mailru/easyjson/go.mod deleted file mode 100644 index 7bc4a65..0000000 --- a/vendor/github.com/mailru/easyjson/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/mailru/easyjson - -go 1.12 diff --git a/vendor/github.com/mailru/easyjson/helpers.go b/vendor/github.com/mailru/easyjson/helpers.go deleted file mode 100644 index 04ac635..0000000 --- a/vendor/github.com/mailru/easyjson/helpers.go +++ /dev/null @@ -1,88 +0,0 @@ -// Package easyjson contains marshaler/unmarshaler interfaces and helper functions. -package easyjson - -import ( - "io" - "io/ioutil" - "net/http" - "strconv" - - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// Marshaler is an easyjson-compatible marshaler interface. -type Marshaler interface { - MarshalEasyJSON(w *jwriter.Writer) -} - -// Marshaler is an easyjson-compatible unmarshaler interface. -type Unmarshaler interface { - UnmarshalEasyJSON(w *jlexer.Lexer) -} - -// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic. -type Optional interface { - IsDefined() bool -} - -// UnknownsUnmarshaler provides a method to unmarshal unknown struct fileds and save them as you want -type UnknownsUnmarshaler interface { - UnmarshalUnknown(in *jlexer.Lexer, key string) -} - -// UnknownsMarshaler provides a method to write additional struct fields -type UnknownsMarshaler interface { - MarshalUnknowns(w *jwriter.Writer, first bool) -} - -// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied -// from a chain of smaller chunks. -func Marshal(v Marshaler) ([]byte, error) { - w := jwriter.Writer{} - v.MarshalEasyJSON(&w) - return w.BuildBytes() -} - -// MarshalToWriter marshals the data to an io.Writer. -func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) { - jw := jwriter.Writer{} - v.MarshalEasyJSON(&jw) - return jw.DumpTo(w) -} - -// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the -// http.ResponseWriter, and send the data to the writer. started will be equal to -// false if an error occurred before any http.ResponseWriter methods were actually -// invoked (in this case a 500 reply is possible). -func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) { - jw := jwriter.Writer{} - v.MarshalEasyJSON(&jw) - if jw.Error != nil { - return false, 0, jw.Error - } - w.Header().Set("Content-Type", "application/json") - w.Header().Set("Content-Length", strconv.Itoa(jw.Size())) - - started = true - written, err = jw.DumpTo(w) - return -} - -// Unmarshal decodes the JSON in data into the object. -func Unmarshal(data []byte, v Unmarshaler) error { - l := jlexer.Lexer{Data: data} - v.UnmarshalEasyJSON(&l) - return l.Error() -} - -// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object. -func UnmarshalFromReader(r io.Reader, v Unmarshaler) error { - data, err := ioutil.ReadAll(r) - if err != nil { - return err - } - l := jlexer.Lexer{Data: data} - v.UnmarshalEasyJSON(&l) - return l.Error() -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go deleted file mode 100644 index ff7b27c..0000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go +++ /dev/null @@ -1,24 +0,0 @@ -// This file will only be included to the build if neither -// easyjson_nounsafe nor appengine build tag is set. See README notes -// for more details. - -//+build !easyjson_nounsafe -//+build !appengine - -package jlexer - -import ( - "reflect" - "unsafe" -) - -// bytesToStr creates a string pointing at the slice to avoid copying. -// -// Warning: the string returned by the function should be used with care, as the whole input data -// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data -// may be garbage-collected even when the string exists. -func bytesToStr(data []byte) string { - h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) - shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} - return *(*string)(unsafe.Pointer(&shdr)) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go deleted file mode 100644 index 864d1be..0000000 --- a/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go +++ /dev/null @@ -1,13 +0,0 @@ -// This file is included to the build if any of the buildtags below -// are defined. Refer to README notes for more details. - -//+build easyjson_nounsafe appengine - -package jlexer - -// bytesToStr creates a string normally from []byte -// -// Note that this method is roughly 1.5x slower than using the 'unsafe' method. -func bytesToStr(data []byte) string { - return string(data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/error.go b/vendor/github.com/mailru/easyjson/jlexer/error.go deleted file mode 100644 index e90ec40..0000000 --- a/vendor/github.com/mailru/easyjson/jlexer/error.go +++ /dev/null @@ -1,15 +0,0 @@ -package jlexer - -import "fmt" - -// LexerError implements the error interface and represents all possible errors that can be -// generated during parsing the JSON data. -type LexerError struct { - Reason string - Offset int - Data string -} - -func (l *LexerError) Error() string { - return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go deleted file mode 100644 index ddd376b..0000000 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ /dev/null @@ -1,1182 +0,0 @@ -// Package jlexer contains a JSON lexer implementation. -// -// It is expected that it is mostly used with generated parser code, so the interface is tuned -// for a parser that knows what kind of data is expected. -package jlexer - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// tokenKind determines type of a token. -type tokenKind byte - -const ( - tokenUndef tokenKind = iota // No token. - tokenDelim // Delimiter: one of '{', '}', '[' or ']'. - tokenString // A string literal, e.g. "abc\u1234" - tokenNumber // Number literal, e.g. 1.5e5 - tokenBool // Boolean literal: true or false. - tokenNull // null keyword. -) - -// token describes a single token: type, position in the input and value. -type token struct { - kind tokenKind // Type of a token. - - boolValue bool // Value if a boolean literal token. - byteValue []byte // Raw value of a token. - delimValue byte -} - -// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. -type Lexer struct { - Data []byte // Input data given to the lexer. - - start int // Start of the current token. - pos int // Current unscanned position in the input stream. - token token // Last scanned token, if token.kind != tokenUndef. - - firstElement bool // Whether current element is the first in array or an object. - wantSep byte // A comma or a colon character, which need to occur before a token. - - UseMultipleErrors bool // If we want to use multiple errors. - fatalError error // Fatal error occurred during lexing. It is usually a syntax error. - multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. -} - -// FetchToken scans the input for the next token. -func (r *Lexer) FetchToken() { - r.token.kind = tokenUndef - r.start = r.pos - - // Check if r.Data has r.pos element - // If it doesn't, it mean corrupted input data - if len(r.Data) < r.pos { - r.errParse("Unexpected end of data") - return - } - // Determine the type of a token by skipping whitespace and reading the - // first character. - for _, c := range r.Data[r.pos:] { - switch c { - case ':', ',': - if r.wantSep == c { - r.pos++ - r.start++ - r.wantSep = 0 - } else { - r.errSyntax() - } - - case ' ', '\t', '\r', '\n': - r.pos++ - r.start++ - - case '"': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenString - r.fetchString() - return - - case '{', '[': - if r.wantSep != 0 { - r.errSyntax() - } - r.firstElement = true - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '}', ']': - if !r.firstElement && (r.wantSep != ',') { - r.errSyntax() - } - r.wantSep = 0 - r.token.kind = tokenDelim - r.token.delimValue = r.Data[r.pos] - r.pos++ - return - - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': - if r.wantSep != 0 { - r.errSyntax() - } - r.token.kind = tokenNumber - r.fetchNumber() - return - - case 'n': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenNull - r.fetchNull() - return - - case 't': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = true - r.fetchTrue() - return - - case 'f': - if r.wantSep != 0 { - r.errSyntax() - } - - r.token.kind = tokenBool - r.token.boolValue = false - r.fetchFalse() - return - - default: - r.errSyntax() - return - } - } - r.fatalError = io.EOF - return -} - -// isTokenEnd returns true if the char can follow a non-delimiter token -func isTokenEnd(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' -} - -// fetchNull fetches and checks remaining bytes of null keyword. -func (r *Lexer) fetchNull() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'u' || - r.Data[r.pos-2] != 'l' || - r.Data[r.pos-1] != 'l' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchTrue fetches and checks remaining bytes of true keyword. -func (r *Lexer) fetchTrue() { - r.pos += 4 - if r.pos > len(r.Data) || - r.Data[r.pos-3] != 'r' || - r.Data[r.pos-2] != 'u' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 4 - r.errSyntax() - } -} - -// fetchFalse fetches and checks remaining bytes of false keyword. -func (r *Lexer) fetchFalse() { - r.pos += 5 - if r.pos > len(r.Data) || - r.Data[r.pos-4] != 'a' || - r.Data[r.pos-3] != 'l' || - r.Data[r.pos-2] != 's' || - r.Data[r.pos-1] != 'e' || - (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { - - r.pos -= 5 - r.errSyntax() - } -} - -// fetchNumber scans a number literal token. -func (r *Lexer) fetchNumber() { - hasE := false - afterE := false - hasDot := false - - r.pos++ - for i, c := range r.Data[r.pos:] { - switch { - case c >= '0' && c <= '9': - afterE = false - case c == '.' && !hasDot: - hasDot = true - case (c == 'e' || c == 'E') && !hasE: - hasE = true - hasDot = true - afterE = true - case (c == '+' || c == '-') && afterE: - afterE = false - default: - r.pos += i - if !isTokenEnd(c) { - r.errSyntax() - } else { - r.token.byteValue = r.Data[r.start:r.pos] - } - return - } - } - - r.pos = len(r.Data) - r.token.byteValue = r.Data[r.start:] -} - -// findStringLen tries to scan into the string literal for ending quote char to determine required size. -// The size will be exact if no escapes are present and may be inexact if there are escaped chars. -func findStringLen(data []byte) (isValid, hasEscapes bool, length int) { - delta := 0 - - for i := 0; i < len(data); i++ { - switch data[i] { - case '\\': - i++ - delta++ - if i < len(data) && data[i] == 'u' { - delta++ - } - case '"': - return true, (delta > 0), (i - delta) - } - } - - return false, false, len(data) -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - var val rune - for i := 2; i < len(s) && i < 6; i++ { - var v byte - c := s[i] - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - v = c - '0' - case 'a', 'b', 'c', 'd', 'e', 'f': - v = c - 'a' + 10 - case 'A', 'B', 'C', 'D', 'E', 'F': - v = c - 'A' + 10 - default: - return -1 - } - - val <<= 4 - val |= rune(v) - } - return val -} - -// processEscape processes a single escape sequence and returns number of bytes processed. -func (r *Lexer) processEscape(data []byte) (int, error) { - if len(data) < 2 { - return 0, fmt.Errorf("syntax error at %v", string(data)) - } - - c := data[1] - switch c { - case '"', '/', '\\': - r.token.byteValue = append(r.token.byteValue, c) - return 2, nil - case 'b': - r.token.byteValue = append(r.token.byteValue, '\b') - return 2, nil - case 'f': - r.token.byteValue = append(r.token.byteValue, '\f') - return 2, nil - case 'n': - r.token.byteValue = append(r.token.byteValue, '\n') - return 2, nil - case 'r': - r.token.byteValue = append(r.token.byteValue, '\r') - return 2, nil - case 't': - r.token.byteValue = append(r.token.byteValue, '\t') - return 2, nil - case 'u': - rr := getu4(data) - if rr < 0 { - return 0, errors.New("syntax error") - } - - read := 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(data[read:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - read += 6 - rr = dec - } else { - rr = unicode.ReplacementChar - } - } - var d [4]byte - s := utf8.EncodeRune(d[:], rr) - r.token.byteValue = append(r.token.byteValue, d[:s]...) - return read, nil - } - - return 0, errors.New("syntax error") -} - -// fetchString scans a string literal token. -func (r *Lexer) fetchString() { - r.pos++ - data := r.Data[r.pos:] - - isValid, hasEscapes, length := findStringLen(data) - if !isValid { - r.pos += length - r.errParse("unterminated string literal") - return - } - if !hasEscapes { - r.token.byteValue = data[:length] - r.pos += length + 1 - return - } - - r.token.byteValue = make([]byte, 0, length) - p := 0 - for i := 0; i < len(data); { - switch data[i] { - case '"': - r.pos += i + 1 - r.token.byteValue = append(r.token.byteValue, data[p:i]...) - i++ - return - - case '\\': - r.token.byteValue = append(r.token.byteValue, data[p:i]...) - off, err := r.processEscape(data[i:]) - if err != nil { - r.errParse(err.Error()) - return - } - i += off - p = i - - default: - i++ - } - } - r.errParse("unterminated string literal") -} - -// scanToken scans the next token if no token is currently available in the lexer. -func (r *Lexer) scanToken() { - if r.token.kind != tokenUndef || r.fatalError != nil { - return - } - - r.FetchToken() -} - -// consume resets the current token to allow scanning the next one. -func (r *Lexer) consume() { - r.token.kind = tokenUndef - r.token.delimValue = 0 -} - -// Ok returns true if no error (including io.EOF) was encountered during scanning. -func (r *Lexer) Ok() bool { - return r.fatalError == nil -} - -const maxErrorContextLen = 13 - -func (r *Lexer) errParse(what string) { - if r.fatalError == nil { - var str string - if len(r.Data)-r.pos <= maxErrorContextLen { - str = string(r.Data) - } else { - str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: what, - Offset: r.pos, - Data: str, - } - } -} - -func (r *Lexer) errSyntax() { - r.errParse("syntax error") -} - -func (r *Lexer) errInvalidToken(expected string) { - if r.fatalError != nil { - return - } - if r.UseMultipleErrors { - r.pos = r.start - r.consume() - r.SkipRecursive() - switch expected { - case "[": - r.token.delimValue = ']' - r.token.kind = tokenDelim - case "{": - r.token.delimValue = '}' - r.token.kind = tokenDelim - } - r.addNonfatalError(&LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - }) - return - } - - var str string - if len(r.token.byteValue) <= maxErrorContextLen { - str = string(r.token.byteValue) - } else { - str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." - } - r.fatalError = &LexerError{ - Reason: fmt.Sprintf("expected %s", expected), - Offset: r.pos, - Data: str, - } -} - -func (r *Lexer) GetPos() int { - return r.pos -} - -// Delim consumes a token and verifies that it is the given delimiter. -func (r *Lexer) Delim(c byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() || r.token.delimValue != c { - r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. - r.errInvalidToken(string([]byte{c})) - } else { - r.consume() - } -} - -// IsDelim returns true if there was no scanning error and next token is the given delimiter. -func (r *Lexer) IsDelim(c byte) bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - return !r.Ok() || r.token.delimValue == c -} - -// Null verifies that the next token is null and consumes it. -func (r *Lexer) Null() { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenNull { - r.errInvalidToken("null") - } - r.consume() -} - -// IsNull returns true if the next token is a null keyword. -func (r *Lexer) IsNull() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - return r.Ok() && r.token.kind == tokenNull -} - -// Skip skips a single token. -func (r *Lexer) Skip() { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - r.consume() -} - -// SkipRecursive skips next array or object completely, or just skips a single token if not -// an array/object. -// -// Note: no syntax validation is performed on the skipped data. -func (r *Lexer) SkipRecursive() { - r.scanToken() - var start, end byte - - switch r.token.delimValue { - case '{': - start, end = '{', '}' - case '[': - start, end = '[', ']' - default: - r.consume() - return - } - - r.consume() - - level := 1 - inQuotes := false - wasEscape := false - - for i, c := range r.Data[r.pos:] { - switch { - case c == start && !inQuotes: - level++ - case c == end && !inQuotes: - level-- - if level == 0 { - r.pos += i + 1 - return - } - case c == '\\' && inQuotes: - wasEscape = !wasEscape - continue - case c == '"' && inQuotes: - inQuotes = wasEscape - case c == '"': - inQuotes = true - } - wasEscape = false - } - r.pos = len(r.Data) - r.fatalError = &LexerError{ - Reason: "EOF reached while skipping array/object or token", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - } -} - -// Raw fetches the next item recursively as a data slice -func (r *Lexer) Raw() []byte { - r.SkipRecursive() - if !r.Ok() { - return nil - } - return r.Data[r.start:r.pos] -} - -// IsStart returns whether the lexer is positioned at the start -// of an input string. -func (r *Lexer) IsStart() bool { - return r.pos == 0 -} - -// Consumed reads all remaining bytes from the input, publishing an error if -// there is anything but whitespace remaining. -func (r *Lexer) Consumed() { - if r.pos > len(r.Data) || !r.Ok() { - return - } - - for _, c := range r.Data[r.pos:] { - if c != ' ' && c != '\t' && c != '\r' && c != '\n' { - r.AddError(&LexerError{ - Reason: "invalid character '" + string(c) + "' after top-level value", - Offset: r.pos, - Data: string(r.Data[r.pos:]), - }) - return - } - - r.pos++ - r.start++ - } -} - -func (r *Lexer) unsafeString() (string, []byte) { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "", nil - } - bytes := r.token.byteValue - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret, bytes -} - -// UnsafeString returns the string value if the token is a string literal. -// -// Warning: returned string may point to the input buffer, so the string should not outlive -// the input buffer. Intended pattern of usage is as an argument to a switch statement. -func (r *Lexer) UnsafeString() string { - ret, _ := r.unsafeString() - return ret -} - -// UnsafeBytes returns the byte slice if the token is a string literal. -func (r *Lexer) UnsafeBytes() []byte { - _, ret := r.unsafeString() - return ret -} - -// String reads a string literal. -func (r *Lexer) String() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return "" - } - ret := string(r.token.byteValue) - r.consume() - return ret -} - -// Bytes reads a string literal and base64 decodes it into a byte slice. -func (r *Lexer) Bytes() []byte { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenString { - r.errInvalidToken("string") - return nil - } - ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) - n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) - if err != nil { - r.fatalError = &LexerError{ - Reason: err.Error(), - } - return nil - } - - r.consume() - return ret[:n] -} - -// Bool reads a true or false boolean keyword. -func (r *Lexer) Bool() bool { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenBool { - r.errInvalidToken("bool") - return false - } - ret := r.token.boolValue - r.consume() - return ret -} - -func (r *Lexer) number() string { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() || r.token.kind != tokenNumber { - r.errInvalidToken("number") - return "" - } - ret := bytesToStr(r.token.byteValue) - r.consume() - return ret -} - -func (r *Lexer) Uint8() uint8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16() uint16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32() uint32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64() uint64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Uint() uint { - return uint(r.Uint64()) -} - -func (r *Lexer) Int8() int8 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int8(n) -} - -func (r *Lexer) Int16() int16 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int16(n) -} - -func (r *Lexer) Int32() int32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return int32(n) -} - -func (r *Lexer) Int64() int64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Int() int { - return int(r.Int64()) -} - -func (r *Lexer) Uint8Str() uint8 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint8(n) -} - -func (r *Lexer) Uint16Str() uint16 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint16(n) -} - -func (r *Lexer) Uint32Str() uint32 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return uint32(n) -} - -func (r *Lexer) Uint64Str() uint64 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) UintStr() uint { - return uint(r.Uint64Str()) -} - -func (r *Lexer) UintptrStr() uintptr { - return uintptr(r.Uint64Str()) -} - -func (r *Lexer) Int8Str() int8 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 8) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int8(n) -} - -func (r *Lexer) Int16Str() int16 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 16) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int16(n) -} - -func (r *Lexer) Int32Str() int32 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return int32(n) -} - -func (r *Lexer) Int64Str() int64 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) IntStr() int { - return int(r.Int64Str()) -} - -func (r *Lexer) Float32() float32 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return float32(n) -} - -func (r *Lexer) Float32Str() float32 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 32) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return float32(n) -} - -func (r *Lexer) Float64() float64 { - s := r.number() - if !r.Ok() { - return 0 - } - - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: s, - }) - } - return n -} - -func (r *Lexer) Float64Str() float64 { - s, b := r.unsafeString() - if !r.Ok() { - return 0 - } - n, err := strconv.ParseFloat(s, 64) - if err != nil { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Reason: err.Error(), - Data: string(b), - }) - } - return n -} - -func (r *Lexer) Error() error { - return r.fatalError -} - -func (r *Lexer) AddError(e error) { - if r.fatalError == nil { - r.fatalError = e - } -} - -func (r *Lexer) AddNonFatalError(e error) { - r.addNonfatalError(&LexerError{ - Offset: r.start, - Data: string(r.Data[r.start:r.pos]), - Reason: e.Error(), - }) -} - -func (r *Lexer) addNonfatalError(err *LexerError) { - if r.UseMultipleErrors { - // We don't want to add errors with the same offset. - if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { - return - } - r.multipleErrors = append(r.multipleErrors, err) - return - } - r.fatalError = err -} - -func (r *Lexer) GetNonFatalErrors() []*LexerError { - return r.multipleErrors -} - -// JsonNumber fetches and json.Number from 'encoding/json' package. -// Both int, float or string, contains them are valid values -func (r *Lexer) JsonNumber() json.Number { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - if !r.Ok() { - r.errInvalidToken("json.Number") - return json.Number("") - } - - switch r.token.kind { - case tokenString: - return json.Number(r.String()) - case tokenNumber: - return json.Number(r.Raw()) - case tokenNull: - r.Null() - return json.Number("") - default: - r.errSyntax() - return json.Number("") - } -} - -// Interface fetches an interface{} analogous to the 'encoding/json' package. -func (r *Lexer) Interface() interface{} { - if r.token.kind == tokenUndef && r.Ok() { - r.FetchToken() - } - - if !r.Ok() { - return nil - } - switch r.token.kind { - case tokenString: - return r.String() - case tokenNumber: - return r.Float64() - case tokenBool: - return r.Bool() - case tokenNull: - r.Null() - return nil - } - - if r.token.delimValue == '{' { - r.consume() - - ret := map[string]interface{}{} - for !r.IsDelim('}') { - key := r.String() - r.WantColon() - ret[key] = r.Interface() - r.WantComma() - } - r.Delim('}') - - if r.Ok() { - return ret - } else { - return nil - } - } else if r.token.delimValue == '[' { - r.consume() - - ret := []interface{}{} - for !r.IsDelim(']') { - ret = append(ret, r.Interface()) - r.WantComma() - } - r.Delim(']') - - if r.Ok() { - return ret - } else { - return nil - } - } - r.errSyntax() - return nil -} - -// WantComma requires a comma to be present before fetching next token. -func (r *Lexer) WantComma() { - r.wantSep = ',' - r.firstElement = false -} - -// WantColon requires a colon to be present before fetching next token. -func (r *Lexer) WantColon() { - r.wantSep = ':' - r.firstElement = false -} diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go deleted file mode 100644 index eb8547c..0000000 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ /dev/null @@ -1,407 +0,0 @@ -// Package jwriter contains a JSON writer. -package jwriter - -import ( - "io" - "strconv" - "unicode/utf8" - - "github.com/mailru/easyjson/buffer" -) - -// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but -// Flags field in Writer is used to set and pass them around. -type Flags int - -const ( - NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. - NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. -) - -// Writer is a JSON writer. -type Writer struct { - Flags Flags - - Error error - Buffer buffer.Buffer - NoEscapeHTML bool -} - -// Size returns the size of the data that was written out. -func (w *Writer) Size() int { - return w.Buffer.Size() -} - -// DumpTo outputs the data to given io.Writer, resetting the buffer. -func (w *Writer) DumpTo(out io.Writer) (written int, err error) { - return w.Buffer.DumpTo(out) -} - -// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice -// as argument that it will try to reuse. -func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.BuildBytes(reuse...), nil -} - -// ReadCloser returns an io.ReadCloser that can be used to read the data. -// ReadCloser also resets the buffer. -func (w *Writer) ReadCloser() (io.ReadCloser, error) { - if w.Error != nil { - return nil, w.Error - } - - return w.Buffer.ReadCloser(), nil -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawByte(c byte) { - w.Buffer.AppendByte(c) -} - -// RawByte appends raw binary data to the buffer. -func (w *Writer) RawString(s string) { - w.Buffer.AppendString(s) -} - -// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for -// calling with results of MarshalJSON-like functions. -func (w *Writer) Raw(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.Buffer.AppendBytes(data) - default: - w.RawString("null") - } -} - -// RawText encloses raw binary data in quotes and appends in to the buffer. -// Useful for calling with results of MarshalText-like functions. -func (w *Writer) RawText(data []byte, err error) { - switch { - case w.Error != nil: - return - case err != nil: - w.Error = err - case len(data) > 0: - w.String(string(data)) - default: - w.RawString("null") - } -} - -// Base64Bytes appends data to the buffer after base64 encoding it -func (w *Writer) Base64Bytes(data []byte) { - if data == nil { - w.Buffer.AppendString("null") - return - } - w.Buffer.AppendByte('"') - w.base64(data) - w.Buffer.AppendByte('"') -} - -func (w *Writer) Uint8(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint16(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint32(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) -} - -func (w *Writer) Uint64(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Int8(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int16(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int32(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) -} - -func (w *Writer) Int64(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) -} - -func (w *Writer) Uint8Str(n uint8) { - w.Buffer.EnsureSpace(3) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint16Str(n uint16) { - w.Buffer.EnsureSpace(5) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint32Str(n uint32) { - w.Buffer.EnsureSpace(10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintStr(n uint) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Uint64Str(n uint64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) UintptrStr(n uintptr) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int8Str(n int8) { - w.Buffer.EnsureSpace(4) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int16Str(n int16) { - w.Buffer.EnsureSpace(6) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int32Str(n int32) { - w.Buffer.EnsureSpace(11) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) IntStr(n int) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Int64Str(n int64) { - w.Buffer.EnsureSpace(21) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float32(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) -} - -func (w *Writer) Float32Str(n float32) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Float64(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) -} - -func (w *Writer) Float64Str(n float64) { - w.Buffer.EnsureSpace(20) - w.Buffer.Buf = append(w.Buffer.Buf, '"') - w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) - w.Buffer.Buf = append(w.Buffer.Buf, '"') -} - -func (w *Writer) Bool(v bool) { - w.Buffer.EnsureSpace(5) - if v { - w.Buffer.Buf = append(w.Buffer.Buf, "true"...) - } else { - w.Buffer.Buf = append(w.Buffer.Buf, "false"...) - } -} - -const chars = "0123456789abcdef" - -func getTable(falseValues ...int) [128]bool { - table := [128]bool{} - - for i := 0; i < 128; i++ { - table[i] = true - } - - for _, v := range falseValues { - table[v] = false - } - - return table -} - -var ( - htmlEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '&', '<', '>', '\\') - htmlNoEscapeTable = getTable(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, '"', '\\') -) - -func (w *Writer) String(s string) { - w.Buffer.AppendByte('"') - - // Portions of the string that contain no escapes are appended as - // byte slices. - - p := 0 // last non-escape symbol - - var escapeTable [128]bool - if w.NoEscapeHTML { - escapeTable = htmlNoEscapeTable - } else { - escapeTable = htmlEscapeTable - } - - for i := 0; i < len(s); { - c := s[i] - - if c < utf8.RuneSelf { - if escapeTable[c] { - // single-width character, no escaping is required - i++ - continue - } - - w.Buffer.AppendString(s[p:i]) - switch c { - case '\t': - w.Buffer.AppendString(`\t`) - case '\r': - w.Buffer.AppendString(`\r`) - case '\n': - w.Buffer.AppendString(`\n`) - case '\\': - w.Buffer.AppendString(`\\`) - case '"': - w.Buffer.AppendString(`\"`) - default: - w.Buffer.AppendString(`\u00`) - w.Buffer.AppendByte(chars[c>>4]) - w.Buffer.AppendByte(chars[c&0xf]) - } - - i++ - p = i - continue - } - - // broken utf - runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) - if runeValue == utf8.RuneError && runeWidth == 1 { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\ufffd`) - i++ - p = i - continue - } - - // jsonp stuff - tab separator and line separator - if runeValue == '\u2028' || runeValue == '\u2029' { - w.Buffer.AppendString(s[p:i]) - w.Buffer.AppendString(`\u202`) - w.Buffer.AppendByte(chars[runeValue&0xf]) - i += runeWidth - p = i - continue - } - i += runeWidth - } - w.Buffer.AppendString(s[p:]) - w.Buffer.AppendByte('"') -} - -const encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" -const padChar = '=' - -func (w *Writer) base64(in []byte) { - - if len(in) == 0 { - return - } - - w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) - - si := 0 - n := (len(in) / 3) * 3 - - for si < n { - // Convert 3x 8bit source bytes into 4 bytes - val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F], encode[val>>6&0x3F], encode[val&0x3F]) - - si += 3 - } - - remain := len(in) - si - if remain == 0 { - return - } - - // Add the remaining small block - val := uint(in[si+0]) << 16 - if remain == 2 { - val |= uint(in[si+1]) << 8 - } - - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>18&0x3F], encode[val>>12&0x3F]) - - switch remain { - case 2: - w.Buffer.Buf = append(w.Buffer.Buf, encode[val>>6&0x3F], byte(padChar)) - case 1: - w.Buffer.Buf = append(w.Buffer.Buf, byte(padChar), byte(padChar)) - } -} diff --git a/vendor/github.com/mailru/easyjson/raw.go b/vendor/github.com/mailru/easyjson/raw.go deleted file mode 100644 index 81bd002..0000000 --- a/vendor/github.com/mailru/easyjson/raw.go +++ /dev/null @@ -1,45 +0,0 @@ -package easyjson - -import ( - "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// RawMessage is a raw piece of JSON (number, string, bool, object, array or -// null) that is extracted without parsing and output as is during marshaling. -type RawMessage []byte - -// MarshalEasyJSON does JSON marshaling using easyjson interface. -func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) { - if len(*v) == 0 { - w.RawString("null") - } else { - w.Raw(*v, nil) - } -} - -// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. -func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) { - *v = RawMessage(l.Raw()) -} - -// UnmarshalJSON implements encoding/json.Unmarshaler interface. -func (v *RawMessage) UnmarshalJSON(data []byte) error { - *v = data - return nil -} - -var nullBytes = []byte("null") - -// MarshalJSON implements encoding/json.Marshaler interface. -func (v RawMessage) MarshalJSON() ([]byte, error) { - if len(v) == 0 { - return nullBytes, nil - } - return v, nil -} - -// IsDefined is required for integration with omitempty easyjson logic. -func (v *RawMessage) IsDefined() bool { - return len(*v) > 0 -} diff --git a/vendor/github.com/mailru/easyjson/unknown_fields.go b/vendor/github.com/mailru/easyjson/unknown_fields.go deleted file mode 100644 index 6cfdf83..0000000 --- a/vendor/github.com/mailru/easyjson/unknown_fields.go +++ /dev/null @@ -1,34 +0,0 @@ -package easyjson - -import ( - json "encoding/json" - - jlexer "github.com/mailru/easyjson/jlexer" - "github.com/mailru/easyjson/jwriter" -) - -// UnknownFieldsProxy implemets UnknownsUnmarshaler and UnknownsMarshaler -// use it as embedded field in your structure to parse and then serialize unknown struct fields -type UnknownFieldsProxy struct { - unknownFields map[string]interface{} -} - -func (s *UnknownFieldsProxy) UnmarshalUnknown(in *jlexer.Lexer, key string) { - if s.unknownFields == nil { - s.unknownFields = make(map[string]interface{}, 1) - } - s.unknownFields[key] = in.Interface() -} - -func (s UnknownFieldsProxy) MarshalUnknowns(out *jwriter.Writer, first bool) { - for key, val := range s.unknownFields { - if first { - first = false - } else { - out.RawByte(',') - } - out.String(string(key)) - out.RawByte(':') - out.Raw(json.Marshal(val)) - } -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE deleted file mode 100644 index 5d8cb5b..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE +++ /dev/null @@ -1 +0,0 @@ -Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore deleted file mode 100644 index e16fb94..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore +++ /dev/null @@ -1 +0,0 @@ -cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile deleted file mode 100644 index 81be214..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -all: - -cover: - go test -cover -v -coverprofile=cover.dat ./... - go tool cover -func cover.dat - -.PHONY: cover diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go deleted file mode 100644 index 258c063..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/golang/protobuf/proto" -) - -var errInvalidVarint = errors.New("invalid varint32 encountered") - -// ReadDelimited decodes a message from the provided length-delimited stream, -// where the length is encoded as 32-bit varint prefix to the message body. -// It returns the total number of bytes read and any applicable error. This is -// roughly equivalent to the companion Java API's -// MessageLite#parseDelimitedFrom. As per the reader contract, this function -// calls r.Read repeatedly as required until exactly one message including its -// prefix is read and decoded (or an error has occurred). The function never -// reads more bytes from the stream than required. The function never returns -// an error if a message has been read and decoded correctly, even if the end -// of the stream has been reached in doing so. In that case, any subsequent -// calls return (0, io.EOF). -func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { - // Per AbstractParser#parsePartialDelimitedFrom with - // CodedInputStream#readRawVarint32. - var headerBuf [binary.MaxVarintLen32]byte - var bytesRead, varIntBytes int - var messageLength uint64 - for varIntBytes == 0 { // i.e. no varint has been decoded yet. - if bytesRead >= len(headerBuf) { - return bytesRead, errInvalidVarint - } - // We have to read byte by byte here to avoid reading more bytes - // than required. Each read byte is appended to what we have - // read before. - newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) - if newBytesRead == 0 { - if err != nil { - return bytesRead, err - } - // A Reader should not return (0, nil), but if it does, - // it should be treated as no-op (according to the - // Reader contract). So let's go on... - continue - } - bytesRead += newBytesRead - // Now present everything read so far to the varint decoder and - // see if a varint can be decoded already. - messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) - } - - messageBuf := make([]byte, messageLength) - newBytesRead, err := io.ReadFull(r, messageBuf) - bytesRead += newBytesRead - if err != nil { - return bytesRead, err - } - - return bytesRead, proto.Unmarshal(messageBuf, m) -} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go deleted file mode 100644 index c318385..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil provides record length-delimited Protocol Buffer streaming. -package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go deleted file mode 100644 index 8fb59ad..0000000 --- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2013 Matt T. Proud -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pbutil - -import ( - "encoding/binary" - "io" - - "github.com/golang/protobuf/proto" -) - -// WriteDelimited encodes and dumps a message to the provided writer prefixed -// with a 32-bit varint indicating the length of the encoded message, producing -// a length-delimited record stream, which can be used to chain together -// encoded messages of the same type together in a file. It returns the total -// number of bytes written and any applicable error. This is roughly -// equivalent to the companion Java API's MessageLite#writeDelimitedTo. -func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { - buffer, err := proto.Marshal(m) - if err != nil { - return 0, err - } - - var buf [binary.MaxVarintLen32]byte - encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) - - sync, err := w.Write(buf[:encodedLength]) - if err != nil { - return sync, err - } - - n, err = w.Write(buffer) - return n + sync, err -} diff --git a/vendor/github.com/olivere/elastic/v7/.gitignore b/vendor/github.com/olivere/elastic/v7/.gitignore deleted file mode 100644 index caa3019..0000000 --- a/vendor/github.com/olivere/elastic/v7/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -.envrc - -/.vscode/ -/.idea/ -/debug.test -/generator -/cluster-test/cluster-test -/cluster-test/*.log -/cluster-test/es-chaos-monkey -/dist -/go.sum -/spec -/tmp -/CHANGELOG-3.0.html - diff --git a/vendor/github.com/olivere/elastic/v7/.travis.yml.off b/vendor/github.com/olivere/elastic/v7/.travis.yml.off deleted file mode 100644 index b657308..0000000 --- a/vendor/github.com/olivere/elastic/v7/.travis.yml.off +++ /dev/null @@ -1,32 +0,0 @@ -sudo: required -language: go -go: -- "1.13.x" -- "1.14.x" -- tip -matrix: - allow_failures: - - go: tip -env: -- GO111MODULE=on -- GO111MODULE=off -addons: - apt: - update: true - packages: - - docker-ce -services: -- docker -before_install: -- if [[ "$TRAVIS_OS_NAME" == "linux" && ! $(which nc) ]] ; then sudo apt-get install -y netcat ; fi -- sudo sysctl -w vm.max_map_count=262144 -- docker-compose pull -- docker-compose up -d -- go get -u github.com/google/go-cmp/cmp -- go get -u github.com/fortytw2/leaktest -- go get . ./aws/... ./config/... ./trace/... ./uritemplates/... -- while ! nc -z localhost 9200; do sleep 1; done -- while ! nc -z localhost 9210; do sleep 1; done -install: true # ignore the go get -t -v ./... -script: -- go test -race -deprecations -strict-decoder -v . ./aws/... ./config/... ./trace/... ./uritemplates/... diff --git a/vendor/github.com/olivere/elastic/v7/CHANGELOG-3.0.md b/vendor/github.com/olivere/elastic/v7/CHANGELOG-3.0.md deleted file mode 100644 index 07f3e66..0000000 --- a/vendor/github.com/olivere/elastic/v7/CHANGELOG-3.0.md +++ /dev/null @@ -1,363 +0,0 @@ -# Elastic 3.0 - -Elasticsearch 2.0 comes with some [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/breaking-changes-2.0.html). You will probably need to upgrade your application and/or rewrite part of it due to those changes. - -We use that window of opportunity to also update Elastic (the Go client) from version 2.0 to 3.0. This will introduce both changes due to the Elasticsearch 2.0 update as well as changes that make Elastic cleaner by removing some old cruft. - -So, to summarize: - -1. Elastic 2.0 is compatible with Elasticsearch 1.7+ and is still actively maintained. -2. Elastic 3.0 is compatible with Elasticsearch 2.0+ and will soon become the new master branch. - -The rest of the document is a list of all changes in Elastic 3.0. - -## Pointer types - -All types have changed to be pointer types, not value types. This not only is cleaner but also simplifies the API as illustrated by the following example: - -Example for Elastic 2.0 (old): - -```go -q := elastic.NewMatchAllQuery() -res, err := elastic.Search("one").Query(&q).Do() // notice the & here -``` - -Example for Elastic 3.0 (new): - -```go -q := elastic.NewMatchAllQuery() -res, err := elastic.Search("one").Query(q).Do() // no more & -// ... which can be simplified as: -res, err := elastic.Search("one").Query(elastic.NewMatchAllQuery()).Do() -``` - -It also helps to prevent [subtle issues](https://github.com/olivere/elastic/issues/115#issuecomment-130753046). - -## Query/filter merge - -One of the biggest changes in Elasticsearch 2.0 is the [merge of queries and filters](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_queries_and_filters_merged). In Elasticsearch 1.x, you had a whole range of queries and filters that were basically identical (e.g. `term_query` and `term_filter`). - -The practical aspect of the merge is that you can now basically use queries where once you had to use filters instead. For Elastic 3.0 this means: We could remove a whole bunch of files. Yay! - -Notice that some methods still come by "filter", e.g. `PostFilter`. However, they accept a `Query` now when they used to accept a `Filter` before. - -Example for Elastic 2.0 (old): - -```go -q := elastic.NewMatchAllQuery() -f := elastic.NewTermFilter("tag", "important") -res, err := elastic.Search().Index("one").Query(&q).PostFilter(f) -``` - -Example for Elastic 3.0 (new): - -```go -q := elastic.NewMatchAllQuery() -f := elastic.NewTermQuery("tag", "important") // it's a query now! -res, err := elastic.Search().Index("one").Query(q).PostFilter(f) -``` - -## Facets are removed - -[Facets have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_facets_have_been_removed) in Elasticsearch 2.0. You need to use aggregations now. - -## Errors - -Elasticsearch 2.0 returns more information about an error in the HTTP response body. Elastic 3.0 now reads this information and makes it accessible by the consumer. - -Errors and all its details are now returned in [`Error`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59). - -### HTTP Status 404 (Not Found) - -When Elasticsearch does not find an entity or an index, it generally returns HTTP status code 404. In Elastic 2.0 this was a valid result and didn't raise an error from the `Do` functions. This has now changed in Elastic 3.0. - -Starting with Elastic 3.0, there are only two types of responses considered successful. First, responses with HTTP status codes [200..299]. Second, HEAD requests which return HTTP status 404. The latter is used by Elasticsearch to e.g. check for existence of indices or documents. All other responses will return an error. - -To check for HTTP Status 404 (with non-HEAD requests), e.g. when trying to get or delete a missing document, you can use the [`IsNotFound`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L84) helper (see below). - -The following example illustrates how to check for a missing document in Elastic 2.0 and what has changed in 3.0. - -Example for Elastic 2.0 (old): - -```go -res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() -if err != nil { - // Something else went wrong (but 404 is NOT an error in Elastic 2.0) -} -if !res.Found { - // Document has not been found -} -``` - -Example for Elastic 3.0 (new): - -```go -res, err = client.Get().Index("one").Type("tweet").Id("no-such-id").Do() -if err != nil { - if elastic.IsNotFound(err) { - // Document has not been found - } else { - // Something else went wrong - } -} -``` - -### HTTP Status 408 (Timeouts) - -Elasticsearch now responds with HTTP status code 408 (Timeout) when a request fails due to a timeout. E.g. if you specify a timeout with the Cluster Health API, the HTTP response status will be 408 if the timeout is raised. See [here](https://github.com/elastic/elasticsearch/commit/fe3179d9cccb569784434b2135ca9ae13d5158d3) for the specific commit to the Cluster Health API. - -To check for HTTP Status 408, we introduced the [`IsTimeout`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L101) helper. - -Example for Elastic 2.0 (old): - -```go -health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() -if err != nil { - // ... -} -if health.TimedOut { - // We have a timeout -} -``` - -Example for Elastic 3.0 (new): - -```go -health, err := client.ClusterHealth().WaitForStatus("yellow").Timeout("1s").Do() -if elastic.IsTimeout(err) { - // We have a timeout -} -``` - -### Bulk Errors - -The error response of a bulk operation used to be a simple string in Elasticsearch 1.x. -In Elasticsearch 2.0, it returns a structured JSON object with a lot more details about the error. -These errors are now captured in an object of type [`ErrorDetails`](https://github.com/olivere/elastic/blob/release-branch.v3/errors.go#L59) which is used in [`BulkResponseItem`](https://github.com/olivere/elastic/blob/release-branch.v3/bulk.go#L206). - -### Removed specific Elastic errors - -The specific error types `ErrMissingIndex`, `ErrMissingType`, and `ErrMissingId` have been removed. They were only used by `DeleteService` and are replaced by a generic error message. - -## Numeric types - -Elastic 3.0 has settled to use `float64` everywhere. It used to be a mix of `float32` and `float64` in Elastic 2.0. E.g. all boostable queries in Elastic 3.0 now have a boost type of `float64` where it used to be `float32`. - -## Pluralization - -Some services accept zero, one or more indices or types to operate on. -E.g. in the `SearchService` accepts a list of zero, one, or more indices to -search and therefor had a func called `Index(index string)` and a func -called `Indices(indices ...string)`. - -Elastic 3.0 now only uses the singular form that, when applicable, accepts a -variadic type. E.g. in the case of the `SearchService`, you now only have -one func with the following signature: `Index(indices ...string)`. - -Notice this is only limited to `Index(...)` and `Type(...)`. There are other -services with variadic functions. These have not been changed. - -## Multiple calls to variadic functions - -Some services with variadic functions have cleared the underlying slice when -called while other services just add to the existing slice. This has now been -normalized to always add to the underlying slice. - -Example for Elastic 2.0 (old): - -```go -// Would only cleared scroll id "two" -// because ScrollId cleared the values when called multiple times -client.ClearScroll().ScrollId("one").ScrollId("two").Do() -``` - -Example for Elastic 3.0 (new): - -```go -// Now (correctly) clears both scroll id "one" and "two" -// because ScrollId no longer clears the values when called multiple times -client.ClearScroll().ScrollId("one").ScrollId("two").Do() -``` - -## Ping service requires URL - -The `Ping` service raised some issues because it is different from all -other services. If not explicitly given a URL, it always pings `127.0.0.1:9200`. - -Users expected to ping the cluster, but that is not possible as the cluster -can be a set of many nodes: So which node do we ping then? - -To make it more clear, the `Ping` function on the client now requires users -to explicitly set the URL of the node to ping. - -## Meta fields - -Many of the meta fields e.g. `_parent` or `_routing` are now -[part of the top-level of a document](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_mapping_changes.html#migration-meta-fields) -and are no longer returned as parts of the `fields` object. We had to change -larger parts of e.g. the `Reindexer` to get it to work seamlessly with Elasticsearch 2.0. - -Notice that all stored meta-fields are now [returned by default](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_crud_and_routing_changes.html#_all_stored_meta_fields_returned_by_default). - -## HasParentQuery / HasChildQuery - -`NewHasParentQuery` and `NewHasChildQuery` must now include both parent/child type and query. It is now in line with the Java API. - -Example for Elastic 2.0 (old): - -```go -allQ := elastic.NewMatchAllQuery() -q := elastic.NewHasChildFilter("tweet").Query(&allQ) -``` - -Example for Elastic 3.0 (new): - -```go -q := elastic.NewHasChildQuery("tweet", elastic.NewMatchAllQuery()) -``` - -## SetBasicAuth client option - -You can now tell Elastic to pass HTTP Basic Auth credentials with each request. In previous versions of Elastic you had to set up your own `http.Transport` to do this. This should make it more convenient to use Elastic in combination with [Shield](https://www.elastic.co/products/shield) in its [basic setup](https://www.elastic.co/guide/en/shield/current/enable-basic-auth.html). - -Example: - -```go -client, err := elastic.NewClient(elastic.SetBasicAuth("user", "secret")) -if err != nil { - log.Fatal(err) -} -``` - -## Delete-by-Query API - -The Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_removed_features.html#_delete_by_query_is_now_a_plugin). It is no longer core part of Elasticsearch. You can [install it as a plugin as described here](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). - -Elastic 3.0 still contains the `DeleteByQueryService`, but you need to install the plugin first. If you don't install it and use `DeleteByQueryService` you will most probably get a 404. - -An older version of this document stated the following: - -> Elastic 3.0 still contains the `DeleteByQueryService` but it will fail with `ErrPluginNotFound` when the plugin is not installed. -> -> Example for Elastic 3.0 (new): -> -> ```go -> _, err := client.DeleteByQuery().Query(elastic.NewTermQuery("client", "1")).Do() -> if err == elastic.ErrPluginNotFound { -> // Delete By Query API is not available -> } -> ``` - -I have decided that this is not a good way to handle the case of a missing plugin. The main reason is that with this logic, you'd always have to check if the plugin is missing in case of an error. This is not only slow, but it also puts logic into a service where it should really be just opaque and return the response of Elasticsearch. - -If you rely on certain plugins to be installed, you should check on startup. That's where the following two helpers come into play. - -## HasPlugin and SetRequiredPlugins - -Some of the core functionality of Elasticsearch has now been moved into plugins. E.g. the Delete-by-Query API is [a plugin now](https://www.elastic.co/guide/en/elasticsearch/plugins/2.0/plugins-delete-by-query.html). - -You need to make sure to add these plugins to your Elasticsearch installation to still be able to use the `DeleteByQueryService`. You can test this now with the `HasPlugin(name string)` helper in the client. - -Example for Elastic 3.0 (new): - -```go -err, found := client.HasPlugin("delete-by-query") -if err == nil && found { - // ... Delete By Query API is available -} -``` - -To simplify this process, there is now a `SetRequiredPlugins` helper that can be passed as an option func when creating a new client. If the plugin is not installed, the client wouldn't be created in the first place. - -```go -// Will raise an error if the "delete-by-query" plugin is NOT installed -client, err := elastic.NewClient(elastic.SetRequiredPlugins("delete-by-query")) -if err != nil { - log.Fatal(err) -} -``` - -Notice that there also is a way to define [mandatory plugins](https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-plugins.html#_mandatory_plugins) in the Elasticsearch configuration file. - -## Common Query has been renamed to Common Terms Query - -The `CommonQuery` has been renamed to `CommonTermsQuery` to be in line with the [Java API](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_java_api_changes.html#_query_filter_refactoring). - -## Remove `MoreLikeThis` and `MoreLikeThisField` - -The More Like This API and the More Like This Field query [have been removed](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_more_like_this) and replaced with the `MoreLikeThisQuery`. - -## Remove Filtered Query - -With the merge of queries and filters, the [filtered query became deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). While it is only deprecated and therefore still available in Elasticsearch 2.0, we have decided to remove it from Elastic 3.0. Why? Because we think that when you're already forced to rewrite many of your application code, it might be a good chance to get rid of things that are deprecated as well. So you might simply change your filtered query with a boolean query as [described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_filtered_literal_query_and_literal_query_literal_filter_deprecated). - -## Remove FuzzyLikeThis and FuzzyLikeThisField - -Both have been removed from Elasticsearch 2.0 as well. - -## Remove LimitFilter - -The `limit` filter is [deprecated in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_literal_limit_literal_filter_deprecated) and becomes a no-op. Now is a good chance to remove it from your application as well. Use the `terminate_after` parameter in your search [as described here](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-body.html) to achieve similar effects. - -## Remove `_cache` and `_cache_key` from filters - -Both have been [removed from Elasticsearch 2.0 as well](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_query_dsl_changes.html#_filter_auto_caching). - -## Partial fields are gone - -Partial fields are [removed in Elasticsearch 2.0](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/_search_changes.html#_partial_fields) in favor of [source filtering](https://www.elastic.co/guide/en/elasticsearch/reference/2.0/search-request-source-filtering.html). - -## Scripting - -A [`Script`](https://github.com/olivere/elastic/blob/release-branch.v3/script.go) type has been added to Elastic 3.0. In Elastic 2.0, there were various places (e.g. aggregations) where you could just add the script as a string, specify the scripting language, add parameters etc. With Elastic 3.0, you should now always use the `Script` type. - -Example for Elastic 2.0 (old): - -```go -update, err := client.Update().Index("twitter").Type("tweet").Id("1"). - Script("ctx._source.retweets += num"). - ScriptParams(map[string]interface{}{"num": 1}). - Upsert(map[string]interface{}{"retweets": 0}). - Do() -``` - -Example for Elastic 3.0 (new): - -```go -update, err := client.Update().Index("twitter").Type("tweet").Id("1"). - Script(elastic.NewScript("ctx._source.retweets += num").Param("num", 1)). - Upsert(map[string]interface{}{"retweets": 0}). - Do() -``` - -## Cluster State - -The combination of `Metric(string)` and `Metrics(...string)` has been replaced by a single func with the signature `Metric(...string)`. - -## Unexported structs in response - -Services generally return a typed response from a `Do` func. Those structs are exported so that they can be passed around in your own application. In Elastic 3.0 however, we changed that (most) sub-structs are now unexported, meaning: You can only pass around the whole response, not sub-structures of it. This makes it easier for restructuring responses according to the Elasticsearch API. See [`ClusterStateResponse`](https://github.com/olivere/elastic/blob/release-branch.v3/cluster_state.go#L182) as an example. - -## Add offset to Histogram aggregation - -Histogram aggregations now have an [offset](https://github.com/elastic/elasticsearch/pull/9505) option. - -## Services - -### REST API specification - -As you might know, Elasticsearch comes with a REST API specification. The specification describes the endpoints in a JSON structure. - -Most services in Elastic predated the REST API specification. We are in the process of bringing all these services in line with the specification. Services can be generated by `go generate` (not 100% automatic though). This is an ongoing process. - -This probably doesn't mean a lot to you. However, you can now be more confident that Elastic supports all features that the REST API specification describes. - -At the same time, the file names of the services are renamed to match the REST API specification naming. - -### REST API Test Suite - -The REST API specification of Elasticsearch comes along with a test suite that official clients typically use to test for conformance. Up until now, Elastic didn't run this test suite. However, we are in the process of setting up infrastructure and tests to match this suite as well. - -This process in not completed though. - - diff --git a/vendor/github.com/olivere/elastic/v7/CHANGELOG-5.0.md b/vendor/github.com/olivere/elastic/v7/CHANGELOG-5.0.md deleted file mode 100644 index 161c6a1..0000000 --- a/vendor/github.com/olivere/elastic/v7/CHANGELOG-5.0.md +++ /dev/null @@ -1,195 +0,0 @@ -# Changes in Elastic 5.0 - -## Enforce context.Context in PerformRequest and Do - -We enforce the usage of `context.Context` everywhere you execute a request. -You need to change all your `Do()` calls to pass a context: `Do(ctx)`. -This enables automatic request cancelation and many other patterns. - -If you don't need this, simply pass `context.TODO()` or `context.Background()`. - -## Warmers removed - -Warmers are no longer necessary and have been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_index_apis.html#_warmers). - -## Optimize removed - -Optimize was deprecated in ES 2.0 and has been [removed in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_rest_api_changes.html#_literal__optimize_literal_endpoint_removed). -Use [Force Merge](https://www.elastic.co/guide/en/elasticsearch/reference/master/indices-forcemerge.html) instead. - -## Missing Query removed - -The `missing` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-exists-query.html#_literal_missing_literal_query). -Use `exists` query with `must_not` in `bool` query instead. - -## And Query removed - -The `and` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). -Use `must` clauses in a `bool` query instead. - -## Not Query removed - -TODO Is it removed? - -## Or Query removed - -The `or` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). -Use `should` clauses in a `bool` query instead. - -## Filtered Query removed - -The `filtered` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). -Use `bool` query instead, which supports `filter` clauses too. - -## Limit Query removed - -The `limit` query has been [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_search_changes.html#_deprecated_queries_removed). -Use the `terminate_after` parameter instead. - -# Template Query removed - -The `template` query has been [deprecated](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-template-query.html). You should use -Search Templates instead. - -We remove it from Elastic 5.0 as the 5.0 update is already a good opportunity -to get rid of old stuff. - -## `_timestamp` and `_ttl` removed - -Both of these fields were deprecated and are now [removed](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_mapping_changes.html#_literal__timestamp_literal_and_literal__ttl_literal). - -## Search template Put/Delete API returns `acknowledged` only - -The response type for Put/Delete search templates has changed. -It only returns a single `acknowledged` flag now. - -## Fields has been renamed to Stored Fields - -The `fields` parameter has been renamed to `stored_fields`. -See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fields_literal_parameter). - -## Fielddatafields has been renamed to Docvaluefields - -The `fielddata_fields` parameter [has been renamed](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_search_changes.html#_literal_fielddata_fields_literal_parameter) -to `docvalue_fields`. - -## Type exists endpoint changed - -The endpoint for checking whether a type exists has been changed from -`HEAD {index}/{type}` to `HEAD {index}/_mapping/{type}`. -See [here](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking_50_rest_api_changes.html#_literal_head_index_type_literal_replaced_with_literal_head_index__mapping_type_literal). - -## Refresh parameter changed - -The `?refresh` parameter previously could be a boolean value. It indicated -whether changes made by a request (e.g. by the Bulk API) should be immediately -visible in search, or not. Using `refresh=true` had the positive effect of -immediately seeing the changes when searching; the negative effect is that -it is a rather big performance hit. - -With 5.0, you now have the choice between these 3 values. - -* `"true"` - Refresh immediately -* `"false"` - Do not refresh (the default value) -* `"wait_for"` - Wait until ES made the document visible in search - -See [?refresh](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-refresh.html) in the documentation. - -Notice that `true` and `false` (the boolean values) are no longer available -now in Elastic. You must use a string instead, with one of the above values. - -## ReindexerService removed - -The `ReindexerService` was a custom solution that was started in the ES 1.x era -to automate reindexing data, from one index to another or even between clusters. - -ES 2.3 introduced its own [Reindex API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-reindex.html) -so we're going to remove our custom solution and ask you to use the native reindexer. - -The `ReindexService` is available via `client.Reindex()` (which used to point -to the custom reindexer). - -## Delete By Query back in core - -The [Delete By Query API](https://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html) -was moved into a plugin in 2.0. Now its back in core with a complete rewrite based on the Bulk API. - -It has it's own endpoint at `/_delete_by_query`. - -Delete By Query, Reindex, and Update By Query are very similar under the hood. - -## Reindex, Delete By Query, and Update By Query response changed - -The response from the above APIs changed a bit. E.g. the `retries` value -used to be an `int64` and returns separate values for `bulk` and `search` now: - -``` -// Old -{ - ... - "retries": 123, - ... -} -``` - -``` -// New -{ - ... - "retries": { - "bulk": 123, - "search": 0 - }, - ... -} -``` - -## ScanService removed - -The `ScanService` is removed. Use the (new) `ScrollService` instead. - -## New ScrollService - -There was confusion around `ScanService` and `ScrollService` doing basically -the same. One was returning slices and didn't support all query details, the -other returned one document after another and wasn't safe for concurrent use. -So we merged the two and merged it into a new `ScrollService` that -removes all the problems with the older services. - -In other words: -If you used `ScanService`, switch to `ScrollService`. -If you used the old `ScrollService`, you might need to fix some things but -overall it should just work. - -Changes: -- We replaced `elastic.EOS` with `io.EOF` to indicate the "end of scroll". - -TODO Not implemented yet - -## Suggesters - -They have been [completely rewritten in ES 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html). - -Some changes: -- Suggesters no longer have an [output](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking_50_suggester.html#_simpler_completion_indexing). - -TODO Fix all structural changes in suggesters - -## Percolator - -Percolator has [changed considerably](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/breaking_50_percolator.html). - -Elastic 5.0 adds the new -[Percolator Query](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/query-dsl-percolate-query.html) -which can be used in combination with the new -[Percolator type](https://www.elastic.co/guide/en/elasticsearch/reference/5.x/percolator.html). - -The Percolate service is removed from Elastic 5.0. - -## Remove Consistency, add WaitForActiveShards - -The `consistency` parameter has been removed in a lot of places, e.g. the Bulk, -Index, Delete, Delete-by-Query, Reindex, Update, and Update-by-Query API. - -It has been replaced by a somewhat similar `wait_for_active_shards` parameter. -See https://github.com/elastic/elasticsearch/pull/19454. diff --git a/vendor/github.com/olivere/elastic/v7/CHANGELOG-6.0.md b/vendor/github.com/olivere/elastic/v7/CHANGELOG-6.0.md deleted file mode 100644 index 255bda4..0000000 --- a/vendor/github.com/olivere/elastic/v7/CHANGELOG-6.0.md +++ /dev/null @@ -1,18 +0,0 @@ -# Changes from 5.0 to 6.0 - -See [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes-6.0.html). - -## _all removed - -6.0 has removed support for the `_all` field. - -## Boolean values coerced - -Only use `true` or `false` for boolean values, not `0` or `1` or `on` or `off`. - -## Single Type Indices - -Notice that 6.0 and future versions will default to single type indices, i.e. you may not use multiple types when e.g. adding an index with a mapping. - -See [here for details](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/removal-of-types.html#_what_are_mapping_types). - diff --git a/vendor/github.com/olivere/elastic/v7/CHANGELOG-7.0.md b/vendor/github.com/olivere/elastic/v7/CHANGELOG-7.0.md deleted file mode 100644 index 1edd9dd..0000000 --- a/vendor/github.com/olivere/elastic/v7/CHANGELOG-7.0.md +++ /dev/null @@ -1,55 +0,0 @@ -# Changes from 6.0 to 7.0 - -See [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/7.x/breaking-changes-7.0.html). - -## SearchHit.Source changed from `*json.RawMessage` to `json.RawMessage` - -The `SearchHit` structure changed from - -``` -// SearchHit is a single hit. -type SearchHit struct { - ... - Source *json.RawMessage `json:"_source,omitempty"` // stored document source - ... -} -``` - -to - -``` -// SearchHit is a single hit. -type SearchHit struct { - ... - Source json.RawMessage `json:"_source,omitempty"` // stored document source - ... -} -``` - -As `json.RawMessage` is a `[]byte`, there is no need to specify it -as `*json.RawMessage` as `json.RawMessage` is perfectly ok to represent -a `nil` value. - -So when deserializing the search hits, you need to change your code from: - -``` -for _, hit := range searchResult.Hits.Hits { - var doc Doc - err := json.Unmarshal(*hit.Source, &doc) // notice the * here - if err != nil { - // Deserialization failed - } -} -``` - -to - -``` -for _, hit := range searchResult.Hits.Hits { - var doc Doc - err := json.Unmarshal(hit.Source, &doc) // it's missing here - if err != nil { - // Deserialization failed - } -} -``` diff --git a/vendor/github.com/olivere/elastic/v7/CODE_OF_CONDUCT.md b/vendor/github.com/olivere/elastic/v7/CODE_OF_CONDUCT.md deleted file mode 100644 index acefece..0000000 --- a/vendor/github.com/olivere/elastic/v7/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,46 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at oliver@eilhard.net. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/olivere/elastic/v7/CONTRIBUTING.md b/vendor/github.com/olivere/elastic/v7/CONTRIBUTING.md deleted file mode 100644 index c7c425a..0000000 --- a/vendor/github.com/olivere/elastic/v7/CONTRIBUTING.md +++ /dev/null @@ -1,40 +0,0 @@ -# How to contribute - -Elastic is an open-source project and we are looking forward to each -contribution. - -Notice that while the [official Elasticsearch documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html) is rather good, it is a high-level -overview of the features of Elasticsearch. However, Elastic tries to resemble -the Java API of Elasticsearch which you can find [on GitHub](https://github.com/elastic/elasticsearch). - -This explains why you might think that some options are strange or missing -in Elastic, while often they're just different. Please check the Java API first. - -Having said that: Elasticsearch is moving fast and it might be very likely -that we missed some features or changes. Feel free to change that. - -## Your Pull Request - -To make it easy to review and understand your changes, please keep the -following things in mind before submitting your pull request: - -* You compared the existing implementation with the Java API, did you? -* Please work on the latest possible state of `olivere/elastic`. - Use `release-branch.v2` for targeting Elasticsearch 1.x and - `release-branch.v3` for targeting 2.x. -* Create a branch dedicated to your change. -* If possible, write a test case which confirms your change. -* Make sure your changes and your tests work with all recent versions of - Elasticsearch. We currently support Elasticsearch 1.7.x in the - release-branch.v2 and Elasticsearch 2.x in the release-branch.v3. -* Test your changes before creating a pull request (`go test ./...`). -* Don't mix several features or bug fixes in one pull request. -* Create a meaningful commit message. -* Explain your change, e.g. provide a link to the issue you are fixing and - probably a link to the Elasticsearch documentation and/or source code. -* Format your source with `go fmt`. - -## Additional Resources - -* [GitHub documentation](https://help.github.com/) -* [GitHub pull request documentation](https://help.github.com/en/articles/creating-a-pull-request) diff --git a/vendor/github.com/olivere/elastic/v7/CONTRIBUTORS b/vendor/github.com/olivere/elastic/v7/CONTRIBUTORS deleted file mode 100644 index 93f2d9e..0000000 --- a/vendor/github.com/olivere/elastic/v7/CONTRIBUTORS +++ /dev/null @@ -1,198 +0,0 @@ -# This is a list of people who have contributed code -# to the Elastic repository. -# -# It is just my small "thank you" to all those that helped -# making Elastic what it is. -# -# Please keep this list sorted. - -0x6875790d0a [@huydx](https://github.com/huydx) -Aaron Tami [@aarontami](https://github.com/aarontami) -Adam Alix [@adamalix](https://github.com/adamalix) -Adam Weiner [@adamweiner](https://github.com/adamweiner) -Adrian Lungu [@AdrianLungu](https://github.com/AdrianLungu) -alehano [@alehano](https://github.com/alehano) -Alejandro Carstens [@alejandro-carstens](https://github.com/alejandro-carstens) -Alex [@akotlar](https://github.com/akotlar) -Alexander Sack [@asac](https://github.com/asac) -Alexandre Olivier [@aliphen](https://github.com/aliphen) -Alexey Sharov [@nizsheanez](https://github.com/nizsheanez) -Aman Jain [@amanjain97](https://github.com/amanjain97) -Anders [@ANerd](https://github.com/ANerd) -AndreKR [@AndreKR](https://github.com/AndreKR) -André Bierlein [@ligustah](https://github.com/ligustah) -Andrew Dunham [@andrew-d](https://github.com/andrew-d) -Andrew Gaul [@andrewgaul](https://github.com/andrewgaul) -Andy Walker [@alaska](https://github.com/alaska) -Arpit Agarwal [@arpiagar](https://github.com/arpiagar) -Arquivei [@arquivei](https://github.com/arquivei) -Artemiy Elozhenko [@artezh](https://github.com/artezh) -arthurgustin [@arthurgustin](https://github.com/arthurgustin) -Bas van Dijk [@basvandijk](https://github.com/basvandijk) -Benjamin Fernandes [@LotharSee](https://github.com/LotharSee) -Benjamin Zarzycki [@kf6nux](https://github.com/kf6nux) -Björn Gerdau [@kernle32dll](https://github.com/kernle32dll) -Boris Popovschi [@Zyqsempai](https://github.com/Zyqsempai) -Bowei Xu [@vancexu](https://github.com/vancexu) -Braden Bassingthwaite [@bbassingthwaite-va](https://github.com/bbassingthwaite-va) -Brady Love [@bradylove](https://github.com/bradylove) -Bryan Conklin [@bmconklin](https://github.com/bmconklin) -Bruce Zhou [@brucez-isell](https://github.com/brucez-isell) -Carl Dunham [@carldunham](https://github.com/carldunham) -Carl Johan Gustavsson [@cjgu](https://github.com/cjgu) -Cat [@cat-turner](https://github.com/cat-turner) -César Jiménez [@cesarjimenez](https://github.com/cesarjimenez) -cforbes [@cforbes](https://github.com/cforbes) -張泰瑋(Chang Tai Wei) [@david30907d](https://github.com/david30907d) -cheshire [@NikitaSerenko](https://github.com/NikitaSerenko) -Chris M [@tebriel](https://github.com/tebriel) -Chris Rice [@donutmonger](https://github.com/donutmonger) -Claudiu Olteanu [@claudiuolteanu](https://github.com/claudiuolteanu) -Chris Duncan [@veqryn](https://github.com/veqryn) -Chris Ludden [@cludden](https://github.com/cludden) -Christophe Courtaut [@kri5](https://github.com/kri5) -cmitchell [@cmitchell](https://github.com/cmitchell) -Connor Peet [@connor4312](https://github.com/connor4312) -Conrad Pankoff [@deoxxa](https://github.com/deoxxa) -Corey Scott [@corsc](https://github.com/corsc) -Chris Petersen [@ex-nerd](https://github.com/ex-nerd) -Daniel Barrett [@shendaras](https://github.com/shendaras) -Daniel Heckrath [@DanielHeckrath](https://github.com/DanielHeckrath) -Daniel Imfeld [@dimfeld](https://github.com/dimfeld) -Daniel Santos [@danlsgiga](https://github.com/danlsgiga) -David Emanuel Buchmann [@wuurrd](https://github.com/wuurrd) -diacone [@diacone](https://github.com/diacone) -Diego Becciolini [@itizir](https://github.com/itizir) -Dwayne Schultz [@myshkin5](https://github.com/myshkin5) -Elizabeth Jarrett [@mejarrett](https://github.com/mejarrett) -Elliot Williams [@elliotwms](https://github.com/elliotwms) -Ellison Leão [@ellisonleao](https://github.com/ellisonleao) -Emil Gedda [@EmilGedda](https://github.com/EmilGedda) -Erik Grinaker [@erikgrinaker](https://github.com/erikgrinaker) -Erwin [@eticzon](https://github.com/eticzon) -Etienne Lafarge [@elafarge](https://github.com/elafarge) -Eugene Egorov [@EugeneEgorov](https://github.com/EugeneEgorov) -Evan Shaw [@edsrzf](https://github.com/edsrzf) -Fanfan [@wenpos](https://github.com/wenpos) -Faolan C-P [@fcheslack](https://github.com/fcheslack) -Filip Tepper [@filiptepper](https://github.com/filiptepper) -Garrett Kelley [@GarrettKelley](https://github.com/GarrettKelley) -Gaspard Douady [@plopik](https://github.com/plopik) -Gaylord Aulke [@blafasel42](https://github.com/blafasel42) -Gerhard Häring [@ghaering](https://github.com/ghaering) -gregoryfranklin [@gregoryfranklin](https://github.com/gregoryfranklin) -Guilherme Silveira [@guilherme-santos](https://github.com/guilherme-santos) -Guillaume J. Charmes [@creack](https://github.com/creack) -Guiseppe [@gm42](https://github.com/gm42) -Han Yu [@MoonighT](https://github.com/MoonighT) -Harmen [@alicebob](https://github.com/alicebob) -Harrison Wright [@wright8191](https://github.com/wright8191) -Henry Clifford [@hcliff](https://github.com/hcliff) -Henry Stern [@hstern](https://github.com/hstern) -Igor Dubinskiy [@idubinskiy](https://github.com/idubinskiy) -initialcontext [@initialcontext](https://github.com/initialcontext) -Isaac Saldana [@isaldana](https://github.com/isaldana) -Ishan Jain [@ishanjain28](https://github.com/ishanjain28) -J Barkey Wolf [@jjhbw](https://github.com/jjhbw) -Jack Lindamood [@cep21](https://github.com/cep21) -Jacob [@jdelgad](https://github.com/jdelgad) -Jan Düpmeier [@jduepmeier](https://github.com/jduepmeier) -Jayme Rotsaert [@jrots](https://github.com/jrots) -Jean-Alexandre Beaumont [@Enteris](https://github.com/Enteris) -Jean-François Roche [@jfroche](https://github.com/jfroche) -Jeff Rand [@jeffrand](https://github.com/jeffrand) -Jeremy Canady [@jrmycanady](https://github.com/jrmycanady) -Jérémie Vexiau [@texvex](https://github.com/texvex) -Jesper Bränn [@Yopi](https://github.com/Yopi) -Jim Berlage [@jimberlage](https://github.com/jimberlage) -Joe Buck [@four2five](https://github.com/four2five) -John Barker [@j16r](https://github.com/j16r) -John Goodall [@jgoodall](https://github.com/jgoodall) -John Stanford [@jxstanford](https://github.com/jxstanford) -Jonas Groenaas Drange [@semafor](https://github.com/semafor) -Josef Fröhle [@Dexus](https://github.com/Dexus) -José Martínez [@xose](https://github.com/xose) -Josh Chorlton [@jchorl](https://github.com/jchorl) -Jpnock [@Jpnock](https://github.com/Jpnock) -jun [@coseyo](https://github.com/coseyo) -Junpei Tsuji [@jun06t](https://github.com/jun06t) -Karen Yang [@kyangtt](https://github.com/kyangtt) -kartlee [@kartlee](https://github.com/kartlee) -Keith Hatton [@khatton-ft](https://github.com/khatton-ft) -kel [@liketic](https://github.com/liketic) -Kenta SUZUKI [@suzuken](https://github.com/suzuken) -Kevin Mulvey [@kmulvey](https://github.com/kmulvey) -Kyle Brandt [@kylebrandt](https://github.com/kylebrandt) -Larry Cinnabar [@larrycinnabar](https://github.com/larrycinnabar) -Leandro Piccilli [@lpic10](https://github.com/lpic10) -Lee [@leezhm](https://github.com/leezhm) -lechnertech [@lechnertech](https://github.com/lechnertech) -M. Zulfa Achsani [@misterciput](https://github.com/misterciput) -Maciej Lisiewski [@c2h5oh](https://github.com/c2h5oh) -Mara Kim [@autochthe](https://github.com/autochthe) -Marcy Buccellato [@marcybuccellato](https://github.com/marcybuccellato) -Mark Costello [@mcos](https://github.com/mcos) -Martin Häger [@protomouse](https://github.com/protomouse) -Matt Braymer-Hayes [@mattayes](https://github.com/mattayes) -Medhi Bechina [@mdzor](https://github.com/mdzor) -Mike Beshai [@mbesh](https://github.com/mbesh) -mmfrb [@mmfrb](https://github.com/mmfrb) -mnpritula [@mnpritula](https://github.com/mnpritula) -mosa [@mosasiru](https://github.com/mosasiru) -Muhammet Çakır [@cakirmuha](https://github.com/cakirmuha) -naimulhaider [@naimulhaider](https://github.com/naimulhaider) -Naoya Yoshizawa [@azihsoyn](https://github.com/azihsoyn) -navins [@ishare](https://github.com/ishare) -Naoya Tsutsumi [@tutuming](https://github.com/tutuming) -Nathan Lacey [@nlacey](https://github.com/nlacey) -NeoCN [@NeoCN](https://github.com/NeoCN) -Nicholas Wolff [@nwolff](https://github.com/nwolff) -Nick K [@utrack](https://github.com/utrack) -Nick Whyte [@nickw444](https://github.com/nickw444) -Nicolae Vartolomei [@nvartolomei](https://github.com/nvartolomei) -okhowang [@okhowang](https://github.com/okhowang) -Orne Brocaar [@brocaar](https://github.com/brocaar) -Paul [@eyeamera](https://github.com/eyeamera) -Paul Oldenburg [@lr-paul](https://github.com/lr-paul) -Pedro [@otherview](https://github.com/otherview) -Pete C [@peteclark-ft](https://github.com/peteclark-ft) -Peter Nagy [@nagypeterjob](https://github.com/nagypeterjob) -Paolo [@ppiccolo](https://github.com/ppiccolo) -Igor Panychek [@panychek](https://github.com/panychek) -Radoslaw Wesolowski [@r--w](https://github.com/r--w) -Rafał Gałus [@rgalus](https://github.com/rgalus) -rchicoli [@rchicoli](https://github.com/rchicoli) -Roman Colohanin [@zuzmic](https://github.com/zuzmic) -Ryan Schmukler [@rschmukler](https://github.com/rschmukler) -Ryan Wynn [@rwynn](https://github.com/rwynn) -Sacheendra talluri [@sacheendra](https://github.com/sacheendra) -Sean DuBois [@Sean-Der](https://github.com/Sean-Der) -Sagan Yaroslav [@sgnrslv](https://github.com/sgnrslv) -Shalin LK [@shalinlk](https://github.com/shalinlk) -Simon Schneider [@raynigon](https://github.com/raynigon) -singham [@zhaochenxiao90](https://github.com/zhaochenxiao90) -Slawomir CALUCH [@slawo](https://github.com/slawo) -soarpenguin [@soarpenguin](https://github.com/soarpenguin) -Stephan Krynauw [@skrynauw](https://github.com/skrynauw) -Stephen Kubovic [@stephenkubovic](https://github.com/stephenkubovic) -Stuart Warren [@Woz](https://github.com/stuart-warren) -Sulaiman [@salajlan](https://github.com/salajlan) -Sundar [@sundarv85](https://github.com/sundarv85) -Swarlston [@Swarlston](https://github.com/Swarlston) -Take [ww24](https://github.com/ww24) -Tetsuya Morimoto [@t2y](https://github.com/t2y) -TheZeroSlave [@TheZeroSlave](https://github.com/TheZeroSlave) -Tomasz Elendt [@telendt](https://github.com/telendt) -TimeEmit [@TimeEmit](https://github.com/timeemit) -TusharM [@tusharm](https://github.com/tusharm) -wangtuo [@wangtuo](https://github.com/wangtuo) -Wédney Yuri [@wedneyyuri](https://github.com/wedneyyuri) -Wesley Kim [@wesleyk](https://github.com/wesleyk) -wolfkdy [@wolfkdy](https://github.com/wolfkdy) -Wyndham Blanton [@wyndhblb](https://github.com/wyndhblb) -Yarden Bar [@ayashjorden](https://github.com/ayashjorden) -Yuya Kusakabe [@higebu](https://github.com/higebu) -zakthomas [@zakthomas](https://github.com/zakthomas) -Zach [@snowzach](https://github.com/snowzach) -zhangxin [@visaxin](https://github.com/visaxin) -@林 [@zplzpl](https://github.com/zplzpl) diff --git a/vendor/github.com/olivere/elastic/v7/ISSUE_TEMPLATE.md b/vendor/github.com/olivere/elastic/v7/ISSUE_TEMPLATE.md deleted file mode 100644 index 11d6e2c..0000000 --- a/vendor/github.com/olivere/elastic/v7/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,19 +0,0 @@ -Please use the following questions as a guideline to help me answer -your issue/question without further inquiry. Thank you. - -### Which version of Elastic are you using? - -[ ] elastic.v7 (for Elasticsearch 7.x) -[ ] elastic.v6 (for Elasticsearch 6.x) -[ ] elastic.v5 (for Elasticsearch 5.x) -[ ] elastic.v3 (for Elasticsearch 2.x) -[ ] elastic.v2 (for Elasticsearch 1.x) - -### Please describe the expected behavior - - -### Please describe the actual behavior - - -### Any steps to reproduce the behavior? - diff --git a/vendor/github.com/olivere/elastic/v7/LICENSE b/vendor/github.com/olivere/elastic/v7/LICENSE deleted file mode 100644 index 8b22cdb..0000000 --- a/vendor/github.com/olivere/elastic/v7/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) -Copyright © 2012-2015 Oliver Eilhard - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the “Software”), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. diff --git a/vendor/github.com/olivere/elastic/v7/README.md b/vendor/github.com/olivere/elastic/v7/README.md deleted file mode 100644 index 2f2b073..0000000 --- a/vendor/github.com/olivere/elastic/v7/README.md +++ /dev/null @@ -1,426 +0,0 @@ -# Elastic - -**This is a development branch that is actively being worked on. DO NOT USE IN PRODUCTION! If you want to use stable versions of Elastic, please use Go modules for the 7.x release (or later) or a dependency manager like [dep](https://github.com/golang/dep) for earlier releases.** - -Elastic is an [Elasticsearch](http://www.elasticsearch.org/) client for the -[Go](http://www.golang.org/) programming language. - -[![Build Status](https://github.com/olivere/elastic/workflows/Test/badge.svg)](https://github.com/olivere/elastic/actions) -[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://pkg.go.dev/github.com/olivere/elastic/v7?tab=doc) -[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/olivere/elastic/master/LICENSE) - -See the [wiki](https://github.com/olivere/elastic/wiki) for additional information about Elastic. - -Buy Me A Coffee - -## Releases - -**The release branches (e.g. [`release-branch.v7`](https://github.com/olivere/elastic/tree/release-branch.v7)) -are actively being worked on and can break at any time. -If you want to use stable versions of Elastic, please use Go modules.** - -Here's the version matrix: - -Elasticsearch version | Elastic version | Package URL | Remarks | -----------------------|------------------|-------------|---------| -7.x                   | 7.0             | [`github.com/olivere/elastic/v7`](https://github.com/olivere/elastic) ([source](https://github.com/olivere/elastic/tree/release-branch.v7) [doc](http://godoc.org/github.com/olivere/elastic)) | Use Go modules. -6.x                   | 6.0             | [`github.com/olivere/elastic`](https://github.com/olivere/elastic) ([source](https://github.com/olivere/elastic/tree/release-branch.v6) [doc](http://godoc.org/github.com/olivere/elastic)) | Use a dependency manager (see below). -5.x | 5.0 | [`gopkg.in/olivere/elastic.v5`](https://gopkg.in/olivere/elastic.v5) ([source](https://github.com/olivere/elastic/tree/release-branch.v5) [doc](http://godoc.org/gopkg.in/olivere/elastic.v5)) | Actively maintained. -2.x | 3.0 | [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3) ([source](https://github.com/olivere/elastic/tree/release-branch.v3) [doc](http://godoc.org/gopkg.in/olivere/elastic.v3)) | Deprecated. Please update. -1.x | 2.0 | [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2) ([source](https://github.com/olivere/elastic/tree/release-branch.v2) [doc](http://godoc.org/gopkg.in/olivere/elastic.v2)) | Deprecated. Please update. -0.9-1.3 | 1.0 | [`gopkg.in/olivere/elastic.v1`](https://gopkg.in/olivere/elastic.v1) ([source](https://github.com/olivere/elastic/tree/release-branch.v1) [doc](http://godoc.org/gopkg.in/olivere/elastic.v1)) | Deprecated. Please update. - -**Example:** - -You have installed Elasticsearch 7.0.0 and want to use Elastic. -As listed above, you should use Elastic 7.0 (code is in `release-branch.v7`). - -To use the required version of Elastic in your application, you -should use [Go modules](https://github.com/golang/go/wiki/Modules) -to manage dependencies. Make sure to use a version such as `7.0.0` or later. - -To use Elastic, import: - -```go -import "github.com/olivere/elastic/v7" -``` - -### Elastic 7.0 - -Elastic 7.0 targets Elasticsearch 7.x which [was released on April 10th 2019](https://www.elastic.co/guide/en/elasticsearch/reference/7.0/release-notes-7.0.0.html). - -As always with major version, there are a lot of [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/7.0/release-notes-7.0.0.html#breaking-7.0.0). -We will use this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v7/CHANGELOG-7.0.md), -as we already did in earlier (major) releases. - -### Elastic 6.0 - -Elastic 6.0 targets Elasticsearch 6.x which was [released on 14th November 2017](https://www.elastic.co/blog/elasticsearch-6-0-0-released). - -Notice that there are a lot of [breaking changes in Elasticsearch 6.0](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/breaking-changes-6.0.html) -and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v6/CHANGELOG-6.0.md) -as we did in the transition from earlier versions of Elastic. - -### Elastic 5.0 - -Elastic 5.0 targets Elasticsearch 5.0.0 and later. Elasticsearch 5.0.0 was -[released on 26th October 2016](https://www.elastic.co/blog/elasticsearch-5-0-0-released). - -Notice that there are will be a lot of [breaking changes in Elasticsearch 5.0](https://www.elastic.co/guide/en/elasticsearch/reference/5.0/breaking-changes-5.0.html) -and we used this as an opportunity to [clean up and refactor Elastic](https://github.com/olivere/elastic/blob/release-branch.v5/CHANGELOG-5.0.md) -as we did in the transition from Elastic 2.0 (for Elasticsearch 1.x) to Elastic 3.0 (for Elasticsearch 2.x). - -Furthermore, the jump in version numbers will give us a chance to be in sync with the Elastic Stack. - -### Elastic 3.0 - -Elastic 3.0 targets Elasticsearch 2.x and is published via [`gopkg.in/olivere/elastic.v3`](https://gopkg.in/olivere/elastic.v3). - -Elastic 3.0 will only get critical bug fixes. You should update to a recent version. - -### Elastic 2.0 - -Elastic 2.0 targets Elasticsearch 1.x and is published via [`gopkg.in/olivere/elastic.v2`](https://gopkg.in/olivere/elastic.v2). - -Elastic 2.0 will only get critical bug fixes. You should update to a recent version. - -### Elastic 1.0 - -Elastic 1.0 is deprecated. You should really update Elasticsearch and Elastic -to a recent version. - -However, if you cannot update for some reason, don't worry. Version 1.0 is -still available. All you need to do is go-get it and change your import path -as described above. - - -## Status - -We use Elastic in production since 2012. Elastic is stable but the API changes -now and then. We strive for API compatibility. -However, Elasticsearch sometimes introduces [breaking changes](https://www.elastic.co/guide/en/elasticsearch/reference/master/breaking-changes.html) -and we sometimes have to adapt. - -Having said that, there have been no big API changes that required you -to rewrite your application big time. More often than not it's renaming APIs -and adding/removing features so that Elastic is in sync with Elasticsearch. - -Elastic has been used in production starting with Elasticsearch 0.90 up to recent 7.x -versions. -We recently switched to [GitHub Actions for testing](https://github.com/olivere/elastic/actions). -Before that, we used [Travis CI](https://travis-ci.org/olivere/elastic) successfully for years). - -Elasticsearch has quite a few features. Most of them are implemented -by Elastic. I add features and APIs as required. It's straightforward -to implement missing pieces. I'm accepting pull requests :-) - -Having said that, I hope you find the project useful. - - -## Getting Started - -The first thing you do is to create a [Client](https://github.com/olivere/elastic/blob/master/client.go). -The client connects to Elasticsearch on `http://127.0.0.1:9200` by default. - -You typically create one client for your app. Here's a complete example of -creating a client, creating an index, adding a document, executing a search etc. - -An example is available [here](https://olivere.github.io/elastic/). - -Here's a [link to a complete working example for v6](https://gist.github.com/olivere/e4a376b4783c0914e44ea4f745ce2ebf). - -Here are a few tips on how to get used to Elastic: - -1. Head over to the [Wiki](https://github.com/olivere/elastic/wiki) for detailed information and - topics like e.g. [how to add a middleware](https://github.com/olivere/elastic/wiki/HttpTransport) - or how to [connect to AWS](https://github.com/olivere/elastic/wiki/Using-with-AWS-Elasticsearch-Service). -2. If you are unsure how to implement something, read the tests (all `_test.go` files). - They not only serve as a guard against changes, but also as a reference. -3. The [recipes](https://github.com/olivere/elastic/tree/release-branch.v6/recipes) - contains small examples on how to implement something, e.g. bulk indexing, scrolling etc. - - -## API Status - -### Document APIs - -- [x] Index API -- [x] Get API -- [x] Delete API -- [x] Delete By Query API -- [x] Update API -- [x] Update By Query API -- [x] Multi Get API -- [x] Bulk API -- [x] Reindex API -- [x] Term Vectors -- [x] Multi termvectors API - -### Search APIs - -- [x] Search -- [x] Search Template -- [ ] Multi Search Template -- [x] Search Shards API -- [x] Suggesters - - [x] Term Suggester - - [x] Phrase Suggester - - [x] Completion Suggester - - [x] Context Suggester -- [x] Multi Search API -- [x] Count API -- [x] Validate API -- [x] Explain API -- [x] Profile API -- [x] Field Capabilities API - -### Aggregations - -- Metrics Aggregations - - [x] Avg - - [x] Cardinality - - [x] Extended Stats - - [x] Geo Bounds - - [x] Geo Centroid - - [x] Max - - [x] Min - - [x] Percentiles - - [x] Percentile Ranks - - [ ] Scripted Metric - - [x] Stats - - [x] Sum - - [x] Top Hits - - [x] Value Count -- Bucket Aggregations - - [x] Adjacency Matrix - - [x] Children - - [x] Auto-interval Date Histogram - - [x] Date Histogram - - [x] Date Range - - [x] Diversified Sampler - - [x] Filter - - [x] Filters - - [x] Geo Distance - - [ ] GeoHash Grid - - [x] Global - - [x] Histogram - - [x] IP Range - - [x] Missing - - [x] Nested - - [x] Range - - [x] Reverse Nested - - [x] Sampler - - [x] Significant Terms - - [x] Significant Text - - [x] Terms - - [x] Composite -- Pipeline Aggregations - - [x] Avg Bucket - - [x] Derivative - - [x] Max Bucket - - [x] Min Bucket - - [x] Sum Bucket - - [x] Stats Bucket - - [ ] Extended Stats Bucket - - [x] Percentiles Bucket - - [x] Moving Average - - [x] Cumulative Sum - - [x] Bucket Script - - [x] Bucket Selector - - [x] Bucket Sort - - [x] Serial Differencing -- [x] Matrix Aggregations - - [x] Matrix Stats -- [x] Aggregation Metadata - -### Indices APIs - -- [x] Create Index -- [x] Delete Index -- [x] Get Index -- [x] Indices Exists -- [x] Open / Close Index -- [x] Shrink Index -- [x] Rollover Index -- [x] Put Mapping -- [x] Get Mapping -- [x] Get Field Mapping -- [x] Types Exists -- [x] Index Aliases -- [x] Update Indices Settings -- [x] Get Settings -- [x] Analyze - - [x] Explain Analyze -- [x] Index Templates -- [x] Indices Stats -- [x] Indices Segments -- [ ] Indices Recovery -- [ ] Indices Shard Stores -- [x] Clear Cache -- [x] Flush - - [x] Synced Flush -- [x] Refresh -- [x] Force Merge - -### Index Lifecycle Management APIs - -- [x] Create Policy -- [x] Get Policy -- [x] Delete Policy -- [ ] Move to Step -- [ ] Remove Policy -- [ ] Retry Policy -- [ ] Get Ilm Status -- [ ] Explain Lifecycle -- [ ] Start Ilm -- [ ] Stop Ilm - -### cat APIs - -- [X] cat aliases -- [X] cat allocation -- [X] cat count -- [ ] cat fielddata -- [X] cat health -- [X] cat indices -- [ ] cat master -- [ ] cat nodeattrs -- [ ] cat nodes -- [ ] cat pending tasks -- [ ] cat plugins -- [ ] cat recovery -- [ ] cat repositories -- [ ] cat thread pool -- [ ] cat shards -- [ ] cat segments -- [ ] cat snapshots -- [ ] cat templates - -### Cluster APIs - -- [x] Cluster Health -- [x] Cluster State -- [x] Cluster Stats -- [ ] Pending Cluster Tasks -- [x] Cluster Reroute -- [ ] Cluster Update Settings -- [x] Nodes Stats -- [x] Nodes Info -- [ ] Nodes Feature Usage -- [ ] Remote Cluster Info -- [x] Task Management API -- [ ] Nodes hot_threads -- [ ] Cluster Allocation Explain API - -### Query DSL - -- [x] Match All Query -- [x] Inner hits -- Full text queries - - [x] Match Query - - [x] Match Phrase Query - - [x] Match Phrase Prefix Query - - [x] Multi Match Query - - [x] Common Terms Query - - [x] Query String Query - - [x] Simple Query String Query -- Term level queries - - [x] Term Query - - [x] Terms Query - - [x] Terms Set Query - - [x] Range Query - - [x] Exists Query - - [x] Prefix Query - - [x] Wildcard Query - - [x] Regexp Query - - [x] Fuzzy Query - - [x] Type Query - - [x] Ids Query -- Compound queries - - [x] Constant Score Query - - [x] Bool Query - - [x] Dis Max Query - - [x] Function Score Query - - [x] Boosting Query -- Joining queries - - [x] Nested Query - - [x] Has Child Query - - [x] Has Parent Query - - [x] Parent Id Query -- Geo queries - - [ ] GeoShape Query - - [x] Geo Bounding Box Query - - [x] Geo Distance Query - - [x] Geo Polygon Query -- Specialized queries - - [x] Distance Feature Query - - [x] More Like This Query - - [x] Script Query - - [x] Script Score Query - - [x] Percolate Query -- Span queries - - [ ] Span Term Query - - [ ] Span Multi Term Query - - [ ] Span First Query - - [ ] Span Near Query - - [ ] Span Or Query - - [ ] Span Not Query - - [ ] Span Containing Query - - [ ] Span Within Query - - [ ] Span Field Masking Query -- [ ] Minimum Should Match -- [ ] Multi Term Query Rewrite - -### Modules - -- Snapshot and Restore - - [x] Repositories - - [x] Snapshot get - - [x] Snapshot create - - [x] Snapshot delete - - [ ] Restore - - [ ] Snapshot status - - [ ] Monitoring snapshot/restore status - - [ ] Stopping currently running snapshot and restore -- Scripting - - [x] GetScript - - [x] PutScript - - [x] DeleteScript - -### Sorting - -- [x] Sort by score -- [x] Sort by field -- [x] Sort by geo distance -- [x] Sort by script -- [x] Sort by doc - -### Scrolling - -Scrolling is supported via a `ScrollService`. It supports an iterator-like interface. -The `ClearScroll` API is implemented as well. - -A pattern for [efficiently scrolling in parallel](https://github.com/olivere/elastic/wiki/ScrollParallel) -is described in the [Wiki](https://github.com/olivere/elastic/wiki). - -## How to contribute - -Read [the contribution guidelines](https://github.com/olivere/elastic/blob/master/CONTRIBUTING.md). - -## Credits - -Thanks a lot for the great folks working hard on -[Elasticsearch](https://www.elastic.co/products/elasticsearch) -and -[Go](https://golang.org/). - -Elastic uses portions of the -[uritemplates](https://github.com/jtacoma/uritemplates) library -by Joshua Tacoma, -[backoff](https://github.com/cenkalti/backoff) by Cenk Altı and -[leaktest](https://github.com/fortytw2/leaktest) by Ian Chiles. - -## LICENSE - -MIT-LICENSE. See [LICENSE](http://olivere.mit-license.org/) -or the LICENSE file provided in the repository for details. diff --git a/vendor/github.com/olivere/elastic/v7/acknowledged_response.go b/vendor/github.com/olivere/elastic/v7/acknowledged_response.go deleted file mode 100644 index a203c22..0000000 --- a/vendor/github.com/olivere/elastic/v7/acknowledged_response.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// AcknowledgedResponse is returned from various APIs. It simply indicates -// whether the operation is acknowledged or not. -type AcknowledgedResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/backoff.go b/vendor/github.com/olivere/elastic/v7/backoff.go deleted file mode 100644 index 736959f..0000000 --- a/vendor/github.com/olivere/elastic/v7/backoff.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "math" - "math/rand" - "sync" - "time" -) - -// BackoffFunc specifies the signature of a function that returns the -// time to wait before the next call to a resource. To stop retrying -// return false in the 2nd return value. -type BackoffFunc func(retry int) (time.Duration, bool) - -// Backoff allows callers to implement their own Backoff strategy. -type Backoff interface { - // Next implements a BackoffFunc. - Next(retry int) (time.Duration, bool) -} - -// -- ZeroBackoff -- - -// ZeroBackoff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, -// indefinitely. -type ZeroBackoff struct{} - -// Next implements BackoffFunc for ZeroBackoff. -func (b ZeroBackoff) Next(retry int) (time.Duration, bool) { - return 0, true -} - -// -- StopBackoff -- - -// StopBackoff is a fixed backoff policy that always returns false for -// Next(), meaning that the operation should never be retried. -type StopBackoff struct{} - -// Next implements BackoffFunc for StopBackoff. -func (b StopBackoff) Next(retry int) (time.Duration, bool) { - return 0, false -} - -// -- ConstantBackoff -- - -// ConstantBackoff is a backoff policy that always returns the same delay. -type ConstantBackoff struct { - interval time.Duration -} - -// NewConstantBackoff returns a new ConstantBackoff. -func NewConstantBackoff(interval time.Duration) *ConstantBackoff { - return &ConstantBackoff{interval: interval} -} - -// Next implements BackoffFunc for ConstantBackoff. -func (b *ConstantBackoff) Next(retry int) (time.Duration, bool) { - return b.interval, true -} - -// -- Exponential -- - -// ExponentialBackoff implements the simple exponential backoff described by -// Douglas Thain at http://dthain.blogspot.de/2009/02/exponential-backoff-in-distributed.html. -type ExponentialBackoff struct { - t float64 // initial timeout (in msec) - f float64 // exponential factor (e.g. 2) - m float64 // maximum timeout (in msec) -} - -// NewExponentialBackoff returns a ExponentialBackoff backoff policy. -// Use initialTimeout to set the first/minimal interval -// and maxTimeout to set the maximum wait interval. -func NewExponentialBackoff(initialTimeout, maxTimeout time.Duration) *ExponentialBackoff { - return &ExponentialBackoff{ - t: float64(int64(initialTimeout / time.Millisecond)), - f: 2.0, - m: float64(int64(maxTimeout / time.Millisecond)), - } -} - -// Next implements BackoffFunc for ExponentialBackoff. -func (b *ExponentialBackoff) Next(retry int) (time.Duration, bool) { - r := 1.0 + rand.Float64() // random number in [1..2] - m := math.Min(r*b.t*math.Pow(b.f, float64(retry)), b.m) - if m >= b.m { - return 0, false - } - d := time.Duration(int64(m)) * time.Millisecond - return d, true -} - -// -- Simple Backoff -- - -// SimpleBackoff takes a list of fixed values for backoff intervals. -// Each call to Next returns the next value from that fixed list. -// After each value is returned, subsequent calls to Next will only return -// the last element. The values are optionally "jittered" (off by default). -type SimpleBackoff struct { - sync.Mutex - ticks []int - jitter bool -} - -// NewSimpleBackoff creates a SimpleBackoff algorithm with the specified -// list of fixed intervals in milliseconds. -func NewSimpleBackoff(ticks ...int) *SimpleBackoff { - return &SimpleBackoff{ - ticks: ticks, - jitter: false, - } -} - -// Jitter enables or disables jittering values. -func (b *SimpleBackoff) Jitter(flag bool) *SimpleBackoff { - b.Lock() - b.jitter = flag - b.Unlock() - return b -} - -// jitter randomizes the interval to return a value of [0.5*millis .. 1.5*millis]. -func jitter(millis int) int { - if millis <= 0 { - return 0 - } - return millis/2 + rand.Intn(millis) -} - -// Next implements BackoffFunc for SimpleBackoff. -func (b *SimpleBackoff) Next(retry int) (time.Duration, bool) { - b.Lock() - defer b.Unlock() - - if retry >= len(b.ticks) { - return 0, false - } - - ms := b.ticks[retry] - if b.jitter { - ms = jitter(ms) - } - return time.Duration(ms) * time.Millisecond, true -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk.go b/vendor/github.com/olivere/elastic/v7/bulk.go deleted file mode 100644 index 9ffe880..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk.go +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// BulkService allows for batching bulk requests and sending them to -// Elasticsearch in one roundtrip. Use the Add method with BulkIndexRequest, -// BulkUpdateRequest, and BulkDeleteRequest to add bulk requests to a batch, -// then use Do to send them to Elasticsearch. -// -// BulkService will be reset after each Do call. In other words, you can -// reuse BulkService to send many batches. You do not have to create a new -// BulkService for each batch. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html -// for more details. -type BulkService struct { - client *Client - retrier Retrier - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - typ string - requests []BulkableRequest - pipeline string - timeout string - refresh string - routing string - waitForActiveShards string - - // estimated bulk size in bytes, up to the request index sizeInBytesCursor - sizeInBytes int64 - sizeInBytesCursor int -} - -// NewBulkService initializes a new BulkService. -func NewBulkService(client *Client) *BulkService { - builder := &BulkService{ - client: client, - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *BulkService) Pretty(pretty bool) *BulkService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *BulkService) Human(human bool) *BulkService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *BulkService) ErrorTrace(errorTrace bool) *BulkService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *BulkService) FilterPath(filterPath ...string) *BulkService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *BulkService) Header(name string, value string) *BulkService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *BulkService) Headers(headers http.Header) *BulkService { - s.headers = headers - return s -} - -// Reset cleans up the request queue -func (s *BulkService) Reset() { - s.requests = make([]BulkableRequest, 0) - s.sizeInBytes = 0 - s.sizeInBytesCursor = 0 -} - -// Retrier allows to set specific retry logic for this BulkService. -// If not specified, it will use the client's default retrier. -func (s *BulkService) Retrier(retrier Retrier) *BulkService { - s.retrier = retrier - return s -} - -// Index specifies the index to use for all batches. You may also leave -// this blank and specify the index in the individual bulk requests. -func (s *BulkService) Index(index string) *BulkService { - s.index = index - return s -} - -// Type specifies the type to use for all batches. You may also leave -// this blank and specify the type in the individual bulk requests. -func (s *BulkService) Type(typ string) *BulkService { - s.typ = typ - return s -} - -// Timeout is a global timeout for processing bulk requests. This is a -// server-side timeout, i.e. it tells Elasticsearch the time after which -// it should stop processing. -func (s *BulkService) Timeout(timeout string) *BulkService { - s.timeout = timeout - return s -} - -// Refresh controls when changes made by this request are made visible -// to search. The allowed values are: "true" (refresh the relevant -// primary and replica shards immediately), "wait_for" (wait for the -// changes to be made visible by a refresh before replying), or "false" -// (no refresh related actions). The default value is "false". -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *BulkService) Refresh(refresh string) *BulkService { - s.refresh = refresh - return s -} - -// Routing specifies the routing value. -func (s *BulkService) Routing(routing string) *BulkService { - s.routing = routing - return s -} - -// Pipeline specifies the pipeline id to preprocess incoming documents with. -func (s *BulkService) Pipeline(pipeline string) *BulkService { - s.pipeline = pipeline - return s -} - -// WaitForActiveShards sets the number of shard copies that must be active -// before proceeding with the bulk operation. Defaults to 1, meaning the -// primary shard only. Set to `all` for all shard copies, otherwise set to -// any non-negative value less than or equal to the total number of copies -// for the shard (number of replicas + 1). -func (s *BulkService) WaitForActiveShards(waitForActiveShards string) *BulkService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// Add adds bulkable requests, i.e. BulkIndexRequest, BulkUpdateRequest, -// and/or BulkDeleteRequest. -func (s *BulkService) Add(requests ...BulkableRequest) *BulkService { - s.requests = append(s.requests, requests...) - return s -} - -// EstimatedSizeInBytes returns the estimated size of all bulkable -// requests added via Add. -func (s *BulkService) EstimatedSizeInBytes() int64 { - if s.sizeInBytesCursor == len(s.requests) { - return s.sizeInBytes - } - for _, r := range s.requests[s.sizeInBytesCursor:] { - s.sizeInBytes += s.estimateSizeInBytes(r) - s.sizeInBytesCursor++ - } - return s.sizeInBytes -} - -// estimateSizeInBytes returns the estimates size of the given -// bulkable request, i.e. BulkIndexRequest, BulkUpdateRequest, and -// BulkDeleteRequest. -func (s *BulkService) estimateSizeInBytes(r BulkableRequest) int64 { - lines, _ := r.Source() - size := 0 - for _, line := range lines { - // +1 for the \n - size += len(line) + 1 - } - return int64(size) -} - -// NumberOfActions returns the number of bulkable requests that need to -// be sent to Elasticsearch on the next batch. -func (s *BulkService) NumberOfActions() int { - return len(s.requests) -} - -func (s *BulkService) bodyAsString() (string, error) { - // Pre-allocate to reduce allocs - var buf strings.Builder - buf.Grow(int(s.EstimatedSizeInBytes())) - - for _, req := range s.requests { - source, err := req.Source() - if err != nil { - return "", err - } - for _, line := range source { - buf.WriteString(line) - buf.WriteByte('\n') - } - } - - return buf.String(), nil -} - -// Do sends the batched requests to Elasticsearch. Note that, when successful, -// you can reuse the BulkService for the next batch as the list of bulk -// requests is cleared on success. -func (s *BulkService) Do(ctx context.Context) (*BulkResponse, error) { - // No actions? - if s.NumberOfActions() == 0 { - return nil, errors.New("elastic: No bulk actions to commit") - } - - // Get body - body, err := s.bodyAsString() - if err != nil { - return nil, err - } - - // Build url - path := "/" - if len(s.index) > 0 { - index, err := uritemplates.Expand("{index}", map[string]string{ - "index": s.index, - }) - if err != nil { - return nil, err - } - path += index + "/" - } - if len(s.typ) > 0 { - typ, err := uritemplates.Expand("{type}", map[string]string{ - "type": s.typ, - }) - if err != nil { - return nil, err - } - path += typ + "/" - } - path += "_bulk" - - // Parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.pipeline != "" { - params.Set("pipeline", s.pipeline) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - ContentType: "application/x-ndjson", - Retrier: s.retrier, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return results - ret := new(BulkResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - - // Reset so the request can be reused - s.Reset() - - return ret, nil -} - -// BulkResponse is a response to a bulk execution. -// -// Example: -// { -// "took":3, -// "errors":false, -// "items":[{ -// "index":{ -// "_index":"index1", -// "_type":"tweet", -// "_id":"1", -// "_version":3, -// "status":201 -// } -// },{ -// "index":{ -// "_index":"index2", -// "_type":"tweet", -// "_id":"2", -// "_version":3, -// "status":200 -// } -// },{ -// "delete":{ -// "_index":"index1", -// "_type":"tweet", -// "_id":"1", -// "_version":4, -// "status":200, -// "found":true -// } -// },{ -// "update":{ -// "_index":"index2", -// "_type":"tweet", -// "_id":"2", -// "_version":4, -// "status":200 -// } -// }] -// } -type BulkResponse struct { - Took int `json:"took,omitempty"` - Errors bool `json:"errors,omitempty"` - Items []map[string]*BulkResponseItem `json:"items,omitempty"` -} - -// BulkResponseItem is the result of a single bulk request. -type BulkResponseItem struct { - Index string `json:"_index,omitempty"` - Type string `json:"_type,omitempty"` - Id string `json:"_id,omitempty"` - Version int64 `json:"_version,omitempty"` - Result string `json:"result,omitempty"` - Shards *ShardsInfo `json:"_shards,omitempty"` - SeqNo int64 `json:"_seq_no,omitempty"` - PrimaryTerm int64 `json:"_primary_term,omitempty"` - Status int `json:"status,omitempty"` - ForcedRefresh bool `json:"forced_refresh,omitempty"` - Error *ErrorDetails `json:"error,omitempty"` - GetResult *GetResult `json:"get,omitempty"` -} - -// Indexed returns all bulk request results of "index" actions. -func (r *BulkResponse) Indexed() []*BulkResponseItem { - return r.ByAction("index") -} - -// Created returns all bulk request results of "create" actions. -func (r *BulkResponse) Created() []*BulkResponseItem { - return r.ByAction("create") -} - -// Updated returns all bulk request results of "update" actions. -func (r *BulkResponse) Updated() []*BulkResponseItem { - return r.ByAction("update") -} - -// Deleted returns all bulk request results of "delete" actions. -func (r *BulkResponse) Deleted() []*BulkResponseItem { - return r.ByAction("delete") -} - -// ByAction returns all bulk request results of a certain action, -// e.g. "index" or "delete". -func (r *BulkResponse) ByAction(action string) []*BulkResponseItem { - if r.Items == nil { - return nil - } - var items []*BulkResponseItem - for _, item := range r.Items { - if result, found := item[action]; found { - items = append(items, result) - } - } - return items -} - -// ById returns all bulk request results of a given document id, -// regardless of the action ("index", "delete" etc.). -func (r *BulkResponse) ById(id string) []*BulkResponseItem { - if r.Items == nil { - return nil - } - var items []*BulkResponseItem - for _, item := range r.Items { - for _, result := range item { - if result.Id == id { - items = append(items, result) - } - } - } - return items -} - -// Failed returns those items of a bulk response that have errors, -// i.e. those that don't have a status code between 200 and 299. -func (r *BulkResponse) Failed() []*BulkResponseItem { - if r.Items == nil { - return nil - } - var errors []*BulkResponseItem - for _, item := range r.Items { - for _, result := range item { - if !(result.Status >= 200 && result.Status <= 299) { - errors = append(errors, result) - } - } - } - return errors -} - -// Succeeded returns those items of a bulk response that have no errors, -// i.e. those have a status code between 200 and 299. -func (r *BulkResponse) Succeeded() []*BulkResponseItem { - if r.Items == nil { - return nil - } - var succeeded []*BulkResponseItem - for _, item := range r.Items { - for _, result := range item { - if result.Status >= 200 && result.Status <= 299 { - succeeded = append(succeeded, result) - } - } - } - return succeeded -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk_delete_request.go b/vendor/github.com/olivere/elastic/v7/bulk_delete_request.go deleted file mode 100644 index 55b2cbc..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk_delete_request.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -//go:generate easyjson bulk_delete_request.go - -import ( - "encoding/json" - "fmt" - "strings" -) - -// -- Bulk delete request -- - -// BulkDeleteRequest is a request to remove a document from Elasticsearch. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html -// for details. -type BulkDeleteRequest struct { - BulkableRequest - index string - typ string - id string - parent string - routing string - version int64 // default is MATCH_ANY - versionType string // default is "internal" - ifSeqNo *int64 - ifPrimaryTerm *int64 - - source []string - - useEasyJSON bool -} - -//easyjson:json -type bulkDeleteRequestCommand map[string]bulkDeleteRequestCommandOp - -//easyjson:json -type bulkDeleteRequestCommandOp struct { - Index string `json:"_index,omitempty"` - Type string `json:"_type,omitempty"` - Id string `json:"_id,omitempty"` - Parent string `json:"parent,omitempty"` - Routing string `json:"routing,omitempty"` - Version int64 `json:"version,omitempty"` - VersionType string `json:"version_type,omitempty"` - IfSeqNo *int64 `json:"if_seq_no,omitempty"` - IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` -} - -// NewBulkDeleteRequest returns a new BulkDeleteRequest. -func NewBulkDeleteRequest() *BulkDeleteRequest { - return &BulkDeleteRequest{} -} - -// UseEasyJSON is an experimental setting that enables serialization -// with github.com/mailru/easyjson, which should in faster serialization -// time and less allocations, but removed compatibility with encoding/json, -// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations -// for details. This setting is disabled by default. -func (r *BulkDeleteRequest) UseEasyJSON(enable bool) *BulkDeleteRequest { - r.useEasyJSON = enable - return r -} - -// Index specifies the Elasticsearch index to use for this delete request. -// If unspecified, the index set on the BulkService will be used. -func (r *BulkDeleteRequest) Index(index string) *BulkDeleteRequest { - r.index = index - r.source = nil - return r -} - -// Type specifies the Elasticsearch type to use for this delete request. -// If unspecified, the type set on the BulkService will be used. -func (r *BulkDeleteRequest) Type(typ string) *BulkDeleteRequest { - r.typ = typ - r.source = nil - return r -} - -// Id specifies the identifier of the document to delete. -func (r *BulkDeleteRequest) Id(id string) *BulkDeleteRequest { - r.id = id - r.source = nil - return r -} - -// Parent specifies the parent of the request, which is used in parent/child -// mappings. -func (r *BulkDeleteRequest) Parent(parent string) *BulkDeleteRequest { - r.parent = parent - r.source = nil - return r -} - -// Routing specifies a routing value for the request. -func (r *BulkDeleteRequest) Routing(routing string) *BulkDeleteRequest { - r.routing = routing - r.source = nil - return r -} - -// Version indicates the version to be deleted as part of an optimistic -// concurrency model. -func (r *BulkDeleteRequest) Version(version int64) *BulkDeleteRequest { - r.version = version - r.source = nil - return r -} - -// VersionType can be "internal" (default), "external", "external_gte", -// or "external_gt". -func (r *BulkDeleteRequest) VersionType(versionType string) *BulkDeleteRequest { - r.versionType = versionType - r.source = nil - return r -} - -// IfSeqNo indicates to only perform the delete operation if the last -// operation that has changed the document has the specified sequence number. -func (r *BulkDeleteRequest) IfSeqNo(ifSeqNo int64) *BulkDeleteRequest { - r.ifSeqNo = &ifSeqNo - return r -} - -// IfPrimaryTerm indicates to only perform the delete operation if the -// last operation that has changed the document has the specified primary term. -func (r *BulkDeleteRequest) IfPrimaryTerm(ifPrimaryTerm int64) *BulkDeleteRequest { - r.ifPrimaryTerm = &ifPrimaryTerm - return r -} - -// String returns the on-wire representation of the delete request, -// concatenated as a single string. -func (r *BulkDeleteRequest) String() string { - lines, err := r.Source() - if err != nil { - return fmt.Sprintf("error: %v", err) - } - return strings.Join(lines, "\n") -} - -// Source returns the on-wire representation of the delete request, -// split into an action-and-meta-data line and an (optional) source line. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html -// for details. -func (r *BulkDeleteRequest) Source() ([]string, error) { - if r.source != nil { - return r.source, nil - } - command := bulkDeleteRequestCommand{ - "delete": bulkDeleteRequestCommandOp{ - Index: r.index, - Type: r.typ, - Id: r.id, - Routing: r.routing, - Parent: r.parent, - Version: r.version, - VersionType: r.versionType, - IfSeqNo: r.ifSeqNo, - IfPrimaryTerm: r.ifPrimaryTerm, - }, - } - - var err error - var body []byte - if r.useEasyJSON { - // easyjson - body, err = command.MarshalJSON() - } else { - // encoding/json - body, err = json.Marshal(command) - } - if err != nil { - return nil, err - } - - lines := []string{string(body)} - r.source = lines - - return lines, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk_delete_request_easyjson.go b/vendor/github.com/olivere/elastic/v7/bulk_delete_request_easyjson.go deleted file mode 100644 index 084e541..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk_delete_request_easyjson.go +++ /dev/null @@ -1,262 +0,0 @@ -// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. - -package elastic - -import ( - json "encoding/json" - easyjson "github.com/mailru/easyjson" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" -) - -// suppress unused package warning -var ( - _ *json.RawMessage - _ *jlexer.Lexer - _ *jwriter.Writer - _ easyjson.Marshaler -) - -func easyjson8092efb6DecodeGithubComOlivereElasticV7(in *jlexer.Lexer, out *bulkDeleteRequestCommandOp) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeString() - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "_index": - out.Index = string(in.String()) - case "_type": - out.Type = string(in.String()) - case "_id": - out.Id = string(in.String()) - case "parent": - out.Parent = string(in.String()) - case "routing": - out.Routing = string(in.String()) - case "version": - out.Version = int64(in.Int64()) - case "version_type": - out.VersionType = string(in.String()) - case "if_seq_no": - if in.IsNull() { - in.Skip() - out.IfSeqNo = nil - } else { - if out.IfSeqNo == nil { - out.IfSeqNo = new(int64) - } - *out.IfSeqNo = int64(in.Int64()) - } - case "if_primary_term": - if in.IsNull() { - in.Skip() - out.IfPrimaryTerm = nil - } else { - if out.IfPrimaryTerm == nil { - out.IfPrimaryTerm = new(int64) - } - *out.IfPrimaryTerm = int64(in.Int64()) - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson8092efb6EncodeGithubComOlivereElasticV7(out *jwriter.Writer, in bulkDeleteRequestCommandOp) { - out.RawByte('{') - first := true - _ = first - if in.Index != "" { - const prefix string = ",\"_index\":" - first = false - out.RawString(prefix[1:]) - out.String(string(in.Index)) - } - if in.Type != "" { - const prefix string = ",\"_type\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Type)) - } - if in.Id != "" { - const prefix string = ",\"_id\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Id)) - } - if in.Parent != "" { - const prefix string = ",\"parent\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Parent)) - } - if in.Routing != "" { - const prefix string = ",\"routing\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Routing)) - } - if in.Version != 0 { - const prefix string = ",\"version\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(in.Version)) - } - if in.VersionType != "" { - const prefix string = ",\"version_type\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.VersionType)) - } - if in.IfSeqNo != nil { - const prefix string = ",\"if_seq_no\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(*in.IfSeqNo)) - } - if in.IfPrimaryTerm != nil { - const prefix string = ",\"if_primary_term\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(*in.IfPrimaryTerm)) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v bulkDeleteRequestCommandOp) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson8092efb6EncodeGithubComOlivereElasticV7(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v bulkDeleteRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) { - easyjson8092efb6EncodeGithubComOlivereElasticV7(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *bulkDeleteRequestCommandOp) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson8092efb6DecodeGithubComOlivereElasticV7(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *bulkDeleteRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson8092efb6DecodeGithubComOlivereElasticV7(l, v) -} -func easyjson8092efb6DecodeGithubComOlivereElasticV71(in *jlexer.Lexer, out *bulkDeleteRequestCommand) { - isTopLevel := in.IsStart() - if in.IsNull() { - in.Skip() - } else { - in.Delim('{') - *out = make(bulkDeleteRequestCommand) - for !in.IsDelim('}') { - key := string(in.String()) - in.WantColon() - var v1 bulkDeleteRequestCommandOp - (v1).UnmarshalEasyJSON(in) - (*out)[key] = v1 - in.WantComma() - } - in.Delim('}') - } - if isTopLevel { - in.Consumed() - } -} -func easyjson8092efb6EncodeGithubComOlivereElasticV71(out *jwriter.Writer, in bulkDeleteRequestCommand) { - if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 { - out.RawString(`null`) - } else { - out.RawByte('{') - v2First := true - for v2Name, v2Value := range in { - if v2First { - v2First = false - } else { - out.RawByte(',') - } - out.String(string(v2Name)) - out.RawByte(':') - (v2Value).MarshalEasyJSON(out) - } - out.RawByte('}') - } -} - -// MarshalJSON supports json.Marshaler interface -func (v bulkDeleteRequestCommand) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson8092efb6EncodeGithubComOlivereElasticV71(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v bulkDeleteRequestCommand) MarshalEasyJSON(w *jwriter.Writer) { - easyjson8092efb6EncodeGithubComOlivereElasticV71(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *bulkDeleteRequestCommand) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson8092efb6DecodeGithubComOlivereElasticV71(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *bulkDeleteRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson8092efb6DecodeGithubComOlivereElasticV71(l, v) -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk_index_request.go b/vendor/github.com/olivere/elastic/v7/bulk_index_request.go deleted file mode 100644 index fcbc717..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk_index_request.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -//go:generate easyjson bulk_index_request.go - -import ( - "encoding/json" - "fmt" - "strings" -) - -// BulkIndexRequest is a request to add a document to Elasticsearch. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html -// for details. -type BulkIndexRequest struct { - BulkableRequest - index string - typ string - id string - opType string - routing string - parent string - version *int64 // default is MATCH_ANY - versionType string // default is "internal" - doc interface{} - pipeline string - retryOnConflict *int - ifSeqNo *int64 - ifPrimaryTerm *int64 - - source []string - - useEasyJSON bool -} - -//easyjson:json -type bulkIndexRequestCommand map[string]bulkIndexRequestCommandOp - -//easyjson:json -type bulkIndexRequestCommandOp struct { - Index string `json:"_index,omitempty"` - Id string `json:"_id,omitempty"` - Type string `json:"_type,omitempty"` - Parent string `json:"parent,omitempty"` - // RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+. - RetryOnConflict *int `json:"retry_on_conflict,omitempty"` - Routing string `json:"routing,omitempty"` - Version *int64 `json:"version,omitempty"` - VersionType string `json:"version_type,omitempty"` - Pipeline string `json:"pipeline,omitempty"` - IfSeqNo *int64 `json:"if_seq_no,omitempty"` - IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` -} - -// NewBulkIndexRequest returns a new BulkIndexRequest. -// The operation type is "index" by default. -func NewBulkIndexRequest() *BulkIndexRequest { - return &BulkIndexRequest{ - opType: "index", - } -} - -// UseEasyJSON is an experimental setting that enables serialization -// with github.com/mailru/easyjson, which should in faster serialization -// time and less allocations, but removed compatibility with encoding/json, -// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations -// for details. This setting is disabled by default. -func (r *BulkIndexRequest) UseEasyJSON(enable bool) *BulkIndexRequest { - r.useEasyJSON = enable - return r -} - -// Index specifies the Elasticsearch index to use for this index request. -// If unspecified, the index set on the BulkService will be used. -func (r *BulkIndexRequest) Index(index string) *BulkIndexRequest { - r.index = index - r.source = nil - return r -} - -// Type specifies the Elasticsearch type to use for this index request. -// If unspecified, the type set on the BulkService will be used. -func (r *BulkIndexRequest) Type(typ string) *BulkIndexRequest { - r.typ = typ - r.source = nil - return r -} - -// Id specifies the identifier of the document to index. -func (r *BulkIndexRequest) Id(id string) *BulkIndexRequest { - r.id = id - r.source = nil - return r -} - -// OpType specifies if this request should follow create-only or upsert -// behavior. This follows the OpType of the standard document index API. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#operation-type -// for details. -func (r *BulkIndexRequest) OpType(opType string) *BulkIndexRequest { - r.opType = opType - r.source = nil - return r -} - -// Routing specifies a routing value for the request. -func (r *BulkIndexRequest) Routing(routing string) *BulkIndexRequest { - r.routing = routing - r.source = nil - return r -} - -// Parent specifies the identifier of the parent document (if available). -func (r *BulkIndexRequest) Parent(parent string) *BulkIndexRequest { - r.parent = parent - r.source = nil - return r -} - -// Version indicates the version of the document as part of an optimistic -// concurrency model. -func (r *BulkIndexRequest) Version(version int64) *BulkIndexRequest { - v := version - r.version = &v - r.source = nil - return r -} - -// VersionType specifies how versions are created. It can be e.g. internal, -// external, external_gte, or force. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#index-versioning -// for details. -func (r *BulkIndexRequest) VersionType(versionType string) *BulkIndexRequest { - r.versionType = versionType - r.source = nil - return r -} - -// Doc specifies the document to index. -func (r *BulkIndexRequest) Doc(doc interface{}) *BulkIndexRequest { - r.doc = doc - r.source = nil - return r -} - -// RetryOnConflict specifies how often to retry in case of a version conflict. -func (r *BulkIndexRequest) RetryOnConflict(retryOnConflict int) *BulkIndexRequest { - r.retryOnConflict = &retryOnConflict - r.source = nil - return r -} - -// Pipeline to use while processing the request. -func (r *BulkIndexRequest) Pipeline(pipeline string) *BulkIndexRequest { - r.pipeline = pipeline - r.source = nil - return r -} - -// IfSeqNo indicates to only perform the index operation if the last -// operation that has changed the document has the specified sequence number. -func (r *BulkIndexRequest) IfSeqNo(ifSeqNo int64) *BulkIndexRequest { - r.ifSeqNo = &ifSeqNo - return r -} - -// IfPrimaryTerm indicates to only perform the index operation if the -// last operation that has changed the document has the specified primary term. -func (r *BulkIndexRequest) IfPrimaryTerm(ifPrimaryTerm int64) *BulkIndexRequest { - r.ifPrimaryTerm = &ifPrimaryTerm - return r -} - -// String returns the on-wire representation of the index request, -// concatenated as a single string. -func (r *BulkIndexRequest) String() string { - lines, err := r.Source() - if err != nil { - return fmt.Sprintf("error: %v", err) - } - return strings.Join(lines, "\n") -} - -// Source returns the on-wire representation of the index request, -// split into an action-and-meta-data line and an (optional) source line. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html -// for details. -func (r *BulkIndexRequest) Source() ([]string, error) { - // { "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } } - // { "field1" : "value1" } - - if r.source != nil { - return r.source, nil - } - - lines := make([]string, 2) - - // "index" ... - indexCommand := bulkIndexRequestCommandOp{ - Index: r.index, - Type: r.typ, - Id: r.id, - Routing: r.routing, - Parent: r.parent, - Version: r.version, - VersionType: r.versionType, - RetryOnConflict: r.retryOnConflict, - Pipeline: r.pipeline, - IfSeqNo: r.ifSeqNo, - IfPrimaryTerm: r.ifPrimaryTerm, - } - command := bulkIndexRequestCommand{ - r.opType: indexCommand, - } - - var err error - var body []byte - if r.useEasyJSON { - // easyjson - body, err = command.MarshalJSON() - } else { - // encoding/json - body, err = json.Marshal(command) - } - if err != nil { - return nil, err - } - - lines[0] = string(body) - - // "field1" ... - if r.doc != nil { - switch t := r.doc.(type) { - default: - body, err := json.Marshal(r.doc) - if err != nil { - return nil, err - } - lines[1] = string(body) - case json.RawMessage: - lines[1] = string(t) - case *json.RawMessage: - lines[1] = string(*t) - case string: - lines[1] = t - case *string: - lines[1] = *t - } - } else { - lines[1] = "{}" - } - - r.source = lines - return lines, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk_index_request_easyjson.go b/vendor/github.com/olivere/elastic/v7/bulk_index_request_easyjson.go deleted file mode 100644 index aaae24e..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk_index_request_easyjson.go +++ /dev/null @@ -1,302 +0,0 @@ -// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. - -package elastic - -import ( - json "encoding/json" - easyjson "github.com/mailru/easyjson" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" -) - -// suppress unused package warning -var ( - _ *json.RawMessage - _ *jlexer.Lexer - _ *jwriter.Writer - _ easyjson.Marshaler -) - -func easyjson9de0fcbfDecodeGithubComOlivereElasticV7(in *jlexer.Lexer, out *bulkIndexRequestCommandOp) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeString() - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "_index": - out.Index = string(in.String()) - case "_id": - out.Id = string(in.String()) - case "_type": - out.Type = string(in.String()) - case "parent": - out.Parent = string(in.String()) - case "retry_on_conflict": - if in.IsNull() { - in.Skip() - out.RetryOnConflict = nil - } else { - if out.RetryOnConflict == nil { - out.RetryOnConflict = new(int) - } - *out.RetryOnConflict = int(in.Int()) - } - case "routing": - out.Routing = string(in.String()) - case "version": - if in.IsNull() { - in.Skip() - out.Version = nil - } else { - if out.Version == nil { - out.Version = new(int64) - } - *out.Version = int64(in.Int64()) - } - case "version_type": - out.VersionType = string(in.String()) - case "pipeline": - out.Pipeline = string(in.String()) - case "if_seq_no": - if in.IsNull() { - in.Skip() - out.IfSeqNo = nil - } else { - if out.IfSeqNo == nil { - out.IfSeqNo = new(int64) - } - *out.IfSeqNo = int64(in.Int64()) - } - case "if_primary_term": - if in.IsNull() { - in.Skip() - out.IfPrimaryTerm = nil - } else { - if out.IfPrimaryTerm == nil { - out.IfPrimaryTerm = new(int64) - } - *out.IfPrimaryTerm = int64(in.Int64()) - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson9de0fcbfEncodeGithubComOlivereElasticV7(out *jwriter.Writer, in bulkIndexRequestCommandOp) { - out.RawByte('{') - first := true - _ = first - if in.Index != "" { - const prefix string = ",\"_index\":" - first = false - out.RawString(prefix[1:]) - out.String(string(in.Index)) - } - if in.Id != "" { - const prefix string = ",\"_id\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Id)) - } - if in.Type != "" { - const prefix string = ",\"_type\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Type)) - } - if in.Parent != "" { - const prefix string = ",\"parent\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Parent)) - } - if in.RetryOnConflict != nil { - const prefix string = ",\"retry_on_conflict\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int(int(*in.RetryOnConflict)) - } - if in.Routing != "" { - const prefix string = ",\"routing\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Routing)) - } - if in.Version != nil { - const prefix string = ",\"version\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(*in.Version)) - } - if in.VersionType != "" { - const prefix string = ",\"version_type\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.VersionType)) - } - if in.Pipeline != "" { - const prefix string = ",\"pipeline\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Pipeline)) - } - if in.IfSeqNo != nil { - const prefix string = ",\"if_seq_no\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(*in.IfSeqNo)) - } - if in.IfPrimaryTerm != nil { - const prefix string = ",\"if_primary_term\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(*in.IfPrimaryTerm)) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v bulkIndexRequestCommandOp) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson9de0fcbfEncodeGithubComOlivereElasticV7(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v bulkIndexRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) { - easyjson9de0fcbfEncodeGithubComOlivereElasticV7(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *bulkIndexRequestCommandOp) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson9de0fcbfDecodeGithubComOlivereElasticV7(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *bulkIndexRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson9de0fcbfDecodeGithubComOlivereElasticV7(l, v) -} -func easyjson9de0fcbfDecodeGithubComOlivereElasticV71(in *jlexer.Lexer, out *bulkIndexRequestCommand) { - isTopLevel := in.IsStart() - if in.IsNull() { - in.Skip() - } else { - in.Delim('{') - *out = make(bulkIndexRequestCommand) - for !in.IsDelim('}') { - key := string(in.String()) - in.WantColon() - var v1 bulkIndexRequestCommandOp - (v1).UnmarshalEasyJSON(in) - (*out)[key] = v1 - in.WantComma() - } - in.Delim('}') - } - if isTopLevel { - in.Consumed() - } -} -func easyjson9de0fcbfEncodeGithubComOlivereElasticV71(out *jwriter.Writer, in bulkIndexRequestCommand) { - if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 { - out.RawString(`null`) - } else { - out.RawByte('{') - v2First := true - for v2Name, v2Value := range in { - if v2First { - v2First = false - } else { - out.RawByte(',') - } - out.String(string(v2Name)) - out.RawByte(':') - (v2Value).MarshalEasyJSON(out) - } - out.RawByte('}') - } -} - -// MarshalJSON supports json.Marshaler interface -func (v bulkIndexRequestCommand) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson9de0fcbfEncodeGithubComOlivereElasticV71(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v bulkIndexRequestCommand) MarshalEasyJSON(w *jwriter.Writer) { - easyjson9de0fcbfEncodeGithubComOlivereElasticV71(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *bulkIndexRequestCommand) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson9de0fcbfDecodeGithubComOlivereElasticV71(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *bulkIndexRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson9de0fcbfDecodeGithubComOlivereElasticV71(l, v) -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk_processor.go b/vendor/github.com/olivere/elastic/v7/bulk_processor.go deleted file mode 100644 index f2711f8..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk_processor.go +++ /dev/null @@ -1,656 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "errors" - "net" - "sync" - "sync/atomic" - "time" -) - -var ( - // ErrBulkItemRetry is returned in BulkProcessor from a worker when - // a response item needs to be retried. - ErrBulkItemRetry = errors.New("elastic: uncommitted bulk response items") - - defaultRetryItemStatusCodes = []int{408, 429, 503, 507} -) - -// BulkProcessorService allows to easily process bulk requests. It allows setting -// policies when to flush new bulk requests, e.g. based on a number of actions, -// on the size of the actions, and/or to flush periodically. It also allows -// to control the number of concurrent bulk requests allowed to be executed -// in parallel. -// -// BulkProcessorService, by default, commits either every 1000 requests or when the -// (estimated) size of the bulk requests exceeds 5 MB. However, it does not -// commit periodically. BulkProcessorService also does retry by default, using -// an exponential backoff algorithm. It also will automatically re-enqueue items -// returned with a status of 408, 429, 503 or 507. You can change this -// behavior with RetryItemStatusCodes. -// -// The caller is responsible for setting the index and type on every -// bulk request added to BulkProcessorService. -// -// BulkProcessorService takes ideas from the BulkProcessor of the -// Elasticsearch Java API as documented in -// https://www.elastic.co/guide/en/elasticsearch/client/java-api/current/java-docs-bulk-processor.html. -type BulkProcessorService struct { - c *Client - beforeFn BulkBeforeFunc - afterFn BulkAfterFunc - name string // name of processor - numWorkers int // # of workers (>= 1) - bulkActions int // # of requests after which to commit - bulkSize int // # of bytes after which to commit - flushInterval time.Duration // periodic flush interval - wantStats bool // indicates whether to gather statistics - backoff Backoff // a custom Backoff to use for errors - retryItemStatusCodes []int // array of status codes for bulk response line items that may be retried -} - -// NewBulkProcessorService creates a new BulkProcessorService. -func NewBulkProcessorService(client *Client) *BulkProcessorService { - return &BulkProcessorService{ - c: client, - numWorkers: 1, - bulkActions: 1000, - bulkSize: 5 << 20, // 5 MB - backoff: NewExponentialBackoff( - time.Duration(200)*time.Millisecond, - time.Duration(10000)*time.Millisecond, - ), - retryItemStatusCodes: defaultRetryItemStatusCodes, - } -} - -// BulkBeforeFunc defines the signature of callbacks that are executed -// before a commit to Elasticsearch. -type BulkBeforeFunc func(executionId int64, requests []BulkableRequest) - -// BulkAfterFunc defines the signature of callbacks that are executed -// after a commit to Elasticsearch. The err parameter signals an error. -type BulkAfterFunc func(executionId int64, requests []BulkableRequest, response *BulkResponse, err error) - -// Before specifies a function to be executed before bulk requests get committed -// to Elasticsearch. -func (s *BulkProcessorService) Before(fn BulkBeforeFunc) *BulkProcessorService { - s.beforeFn = fn - return s -} - -// After specifies a function to be executed when bulk requests have been -// committed to Elasticsearch. The After callback executes both when the -// commit was successful as well as on failures. -func (s *BulkProcessorService) After(fn BulkAfterFunc) *BulkProcessorService { - s.afterFn = fn - return s -} - -// Name is an optional name to identify this bulk processor. -func (s *BulkProcessorService) Name(name string) *BulkProcessorService { - s.name = name - return s -} - -// Workers is the number of concurrent workers allowed to be -// executed. Defaults to 1 and must be greater or equal to 1. -func (s *BulkProcessorService) Workers(num int) *BulkProcessorService { - s.numWorkers = num - return s -} - -// BulkActions specifies when to flush based on the number of actions -// currently added. Defaults to 1000 and can be set to -1 to be disabled. -func (s *BulkProcessorService) BulkActions(bulkActions int) *BulkProcessorService { - s.bulkActions = bulkActions - return s -} - -// BulkSize specifies when to flush based on the size (in bytes) of the actions -// currently added. Defaults to 5 MB and can be set to -1 to be disabled. -func (s *BulkProcessorService) BulkSize(bulkSize int) *BulkProcessorService { - s.bulkSize = bulkSize - return s -} - -// FlushInterval specifies when to flush at the end of the given interval. -// This is disabled by default. If you want the bulk processor to -// operate completely asynchronously, set both BulkActions and BulkSize to -// -1 and set the FlushInterval to a meaningful interval. -func (s *BulkProcessorService) FlushInterval(interval time.Duration) *BulkProcessorService { - s.flushInterval = interval - return s -} - -// Stats tells bulk processor to gather stats while running. -// Use Stats to return the stats. This is disabled by default. -func (s *BulkProcessorService) Stats(wantStats bool) *BulkProcessorService { - s.wantStats = wantStats - return s -} - -// Backoff sets the backoff strategy to use for errors. -func (s *BulkProcessorService) Backoff(backoff Backoff) *BulkProcessorService { - s.backoff = backoff - return s -} - -// RetryItemStatusCodes sets an array of status codes that indicate that a bulk -// response line item should be retried. -func (s *BulkProcessorService) RetryItemStatusCodes(retryItemStatusCodes ...int) *BulkProcessorService { - s.retryItemStatusCodes = retryItemStatusCodes - return s -} - -// Do creates a new BulkProcessor and starts it. -// Consider the BulkProcessor as a running instance that accepts bulk requests -// and commits them to Elasticsearch, spreading the work across one or more -// workers. -// -// You can interoperate with the BulkProcessor returned by Do, e.g. Start and -// Stop (or Close) it. -// -// Context is an optional context that is passed into the bulk request -// service calls. In contrast to other operations, this context is used in -// a long running process. You could use it to pass e.g. loggers, but you -// shouldn't use it for cancellation. -// -// Calling Do several times returns new BulkProcessors. You probably don't -// want to do this. BulkProcessorService implements just a builder pattern. -func (s *BulkProcessorService) Do(ctx context.Context) (*BulkProcessor, error) { - - retryItemStatusCodes := make(map[int]struct{}) - for _, code := range s.retryItemStatusCodes { - retryItemStatusCodes[code] = struct{}{} - } - - p := newBulkProcessor( - s.c, - s.beforeFn, - s.afterFn, - s.name, - s.numWorkers, - s.bulkActions, - s.bulkSize, - s.flushInterval, - s.wantStats, - s.backoff, - retryItemStatusCodes) - - err := p.Start(ctx) - if err != nil { - return nil, err - } - return p, nil -} - -// -- Bulk Processor Statistics -- - -// BulkProcessorStats contains various statistics of a bulk processor -// while it is running. Use the Stats func to return it while running. -type BulkProcessorStats struct { - Flushed int64 // number of times the flush interval has been invoked - Committed int64 // # of times workers committed bulk requests - Indexed int64 // # of requests indexed - Created int64 // # of requests that ES reported as creates (201) - Updated int64 // # of requests that ES reported as updates - Deleted int64 // # of requests that ES reported as deletes - Succeeded int64 // # of requests that ES reported as successful - Failed int64 // # of requests that ES reported as failed - - Workers []*BulkProcessorWorkerStats // stats for each worker -} - -// BulkProcessorWorkerStats represents per-worker statistics. -type BulkProcessorWorkerStats struct { - Queued int64 // # of requests queued in this worker - LastDuration time.Duration // duration of last commit -} - -// newBulkProcessorStats initializes and returns a BulkProcessorStats struct. -func newBulkProcessorStats(workers int) *BulkProcessorStats { - stats := &BulkProcessorStats{ - Workers: make([]*BulkProcessorWorkerStats, workers), - } - for i := 0; i < workers; i++ { - stats.Workers[i] = &BulkProcessorWorkerStats{} - } - return stats -} - -func (st *BulkProcessorStats) dup() *BulkProcessorStats { - dst := new(BulkProcessorStats) - dst.Flushed = st.Flushed - dst.Committed = st.Committed - dst.Indexed = st.Indexed - dst.Created = st.Created - dst.Updated = st.Updated - dst.Deleted = st.Deleted - dst.Succeeded = st.Succeeded - dst.Failed = st.Failed - for _, src := range st.Workers { - dst.Workers = append(dst.Workers, src.dup()) - } - return dst -} - -func (st *BulkProcessorWorkerStats) dup() *BulkProcessorWorkerStats { - dst := new(BulkProcessorWorkerStats) - dst.Queued = st.Queued - dst.LastDuration = st.LastDuration - return dst -} - -// -- Bulk Processor -- - -// BulkProcessor encapsulates a task that accepts bulk requests and -// orchestrates committing them to Elasticsearch via one or more workers. -// -// BulkProcessor is returned by setting up a BulkProcessorService and -// calling the Do method. -type BulkProcessor struct { - c *Client - beforeFn BulkBeforeFunc - afterFn BulkAfterFunc - name string - bulkActions int - bulkSize int - numWorkers int - executionId int64 - requestsC chan BulkableRequest - workerWg sync.WaitGroup - workers []*bulkWorker - flushInterval time.Duration - flusherStopC chan struct{} - wantStats bool - retryItemStatusCodes map[int]struct{} - backoff Backoff - - startedMu sync.Mutex // guards the following block - started bool - - statsMu sync.Mutex // guards the following block - stats *BulkProcessorStats - - stopReconnC chan struct{} // channel to signal stop reconnection attempts -} - -func newBulkProcessor( - client *Client, - beforeFn BulkBeforeFunc, - afterFn BulkAfterFunc, - name string, - numWorkers int, - bulkActions int, - bulkSize int, - flushInterval time.Duration, - wantStats bool, - backoff Backoff, - retryItemStatusCodes map[int]struct{}) *BulkProcessor { - return &BulkProcessor{ - c: client, - beforeFn: beforeFn, - afterFn: afterFn, - name: name, - numWorkers: numWorkers, - bulkActions: bulkActions, - bulkSize: bulkSize, - flushInterval: flushInterval, - wantStats: wantStats, - retryItemStatusCodes: retryItemStatusCodes, - backoff: backoff, - } -} - -// Start starts the bulk processor. If the processor is already started, -// nil is returned. -func (p *BulkProcessor) Start(ctx context.Context) error { - p.startedMu.Lock() - defer p.startedMu.Unlock() - - if p.started { - return nil - } - - // We must have at least one worker. - if p.numWorkers < 1 { - p.numWorkers = 1 - } - - p.requestsC = make(chan BulkableRequest) - p.executionId = 0 - p.stats = newBulkProcessorStats(p.numWorkers) - p.stopReconnC = make(chan struct{}) - - // Create and start up workers. - p.workers = make([]*bulkWorker, p.numWorkers) - for i := 0; i < p.numWorkers; i++ { - p.workerWg.Add(1) - p.workers[i] = newBulkWorker(p, i) - go p.workers[i].work(ctx) - } - - // Start the ticker for flush (if enabled) - if int64(p.flushInterval) > 0 { - p.flusherStopC = make(chan struct{}) - go p.flusher(p.flushInterval) - } - - p.started = true - - return nil -} - -// Stop is an alias for Close. -func (p *BulkProcessor) Stop() error { - return p.Close() -} - -// Close stops the bulk processor previously started with Do. -// If it is already stopped, this is a no-op and nil is returned. -// -// By implementing Close, BulkProcessor implements the io.Closer interface. -func (p *BulkProcessor) Close() error { - p.startedMu.Lock() - defer p.startedMu.Unlock() - - // Already stopped? Do nothing. - if !p.started { - return nil - } - - // Tell connection checkers to stop - if p.stopReconnC != nil { - close(p.stopReconnC) - p.stopReconnC = nil - } - - // Stop flusher (if enabled) - if p.flusherStopC != nil { - p.flusherStopC <- struct{}{} - <-p.flusherStopC - close(p.flusherStopC) - p.flusherStopC = nil - } - - // Stop all workers. - close(p.requestsC) - p.workerWg.Wait() - - p.started = false - - return nil -} - -// Stats returns the latest bulk processor statistics. -// Collecting stats must be enabled first by calling Stats(true) on -// the service that created this processor. -func (p *BulkProcessor) Stats() BulkProcessorStats { - p.statsMu.Lock() - defer p.statsMu.Unlock() - return *p.stats.dup() -} - -// Add adds a single request to commit by the BulkProcessorService. -// -// The caller is responsible for setting the index and type on the request. -func (p *BulkProcessor) Add(request BulkableRequest) { - p.requestsC <- request -} - -// Flush manually asks all workers to commit their outstanding requests. -// It returns only when all workers acknowledge completion. -func (p *BulkProcessor) Flush() error { - p.statsMu.Lock() - p.stats.Flushed++ - p.statsMu.Unlock() - - for _, w := range p.workers { - w.flushC <- struct{}{} - <-w.flushAckC // wait for completion - } - return nil -} - -// flusher is a single goroutine that periodically asks all workers to -// commit their outstanding bulk requests. It is only started if -// FlushInterval is greater than 0. -func (p *BulkProcessor) flusher(interval time.Duration) { - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-ticker.C: // Periodic flush - p.Flush() // TODO swallow errors here? - - case <-p.flusherStopC: - p.flusherStopC <- struct{}{} - return - } - } -} - -// -- Bulk Worker -- - -// bulkWorker encapsulates a single worker, running in a goroutine, -// receiving bulk requests and eventually committing them to Elasticsearch. -// It is strongly bound to a BulkProcessor. -type bulkWorker struct { - p *BulkProcessor - i int - bulkActions int - bulkSize int - service *BulkService - flushC chan struct{} - flushAckC chan struct{} -} - -// newBulkWorker creates a new bulkWorker instance. -func newBulkWorker(p *BulkProcessor, i int) *bulkWorker { - return &bulkWorker{ - p: p, - i: i, - bulkActions: p.bulkActions, - bulkSize: p.bulkSize, - service: NewBulkService(p.c), - flushC: make(chan struct{}), - flushAckC: make(chan struct{}), - } -} - -// work waits for bulk requests and manual flush calls on the respective -// channels and is invoked as a goroutine when the bulk processor is started. -func (w *bulkWorker) work(ctx context.Context) { - defer func() { - w.p.workerWg.Done() - close(w.flushAckC) - close(w.flushC) - }() - - var stop bool - for !stop { - var err error - select { - case req, open := <-w.p.requestsC: - if open { - // Received a new request - if _, err = req.Source(); err == nil { - w.service.Add(req) - if w.commitRequired() { - err = w.commit(ctx) - } - } - } else { - // Channel closed: Stop. - stop = true - if w.service.NumberOfActions() > 0 { - err = w.commit(ctx) - } - } - - case <-w.flushC: - // Commit outstanding requests - if w.service.NumberOfActions() > 0 { - err = w.commit(ctx) - } - w.flushAckC <- struct{}{} - } - if err != nil { - w.p.c.errorf("elastic: bulk processor %q was unable to perform work: %v", w.p.name, err) - if !stop { - waitForActive := func() { - // Add back pressure to prevent Add calls from filling up the request queue - ready := make(chan struct{}) - go w.waitForActiveConnection(ready) - <-ready - } - if _, ok := err.(net.Error); ok { - waitForActive() - } else if IsConnErr(err) { - waitForActive() - } - } - } - } -} - -// commit commits the bulk requests in the given service, -// invoking callbacks as specified. -func (w *bulkWorker) commit(ctx context.Context) error { - var res *BulkResponse - - // commitFunc will commit bulk requests and, on failure, be retried - // via exponential backoff - commitFunc := func() error { - var err error - // Save requests because they will be reset in service.Do - reqs := w.service.requests - res, err = w.service.Do(ctx) - if err == nil { - // Overall bulk request was OK. But each bulk response item also has a status - if w.p.retryItemStatusCodes != nil && len(w.p.retryItemStatusCodes) > 0 { - // Check res.Items since some might be soft failures - if res.Items != nil && res.Errors { - // res.Items will be 1 to 1 with reqs in same order - for i, item := range res.Items { - for _, result := range item { - if _, found := w.p.retryItemStatusCodes[result.Status]; found { - w.service.Add(reqs[i]) - if err == nil { - err = ErrBulkItemRetry - } - } - } - } - } - } - } - return err - } - // notifyFunc will be called if retry fails - notifyFunc := func(err error) { - w.p.c.errorf("elastic: bulk processor %q failed but may retry: %v", w.p.name, err) - } - - id := atomic.AddInt64(&w.p.executionId, 1) - - // Update # documents in queue before eventual retries - w.p.statsMu.Lock() - if w.p.wantStats { - w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) - } - w.p.statsMu.Unlock() - - // Save requests because they will be reset in commitFunc - reqs := w.service.requests - - // Invoke before callback - if w.p.beforeFn != nil { - w.p.beforeFn(id, reqs) - } - - // Commit bulk requests - err := RetryNotify(commitFunc, w.p.backoff, notifyFunc) - w.updateStats(res) - if err != nil { - w.p.c.errorf("elastic: bulk processor %q failed: %v", w.p.name, err) - } - - // Invoke after callback - if w.p.afterFn != nil { - w.p.afterFn(id, reqs, res, err) - } - - return err -} - -func (w *bulkWorker) waitForActiveConnection(ready chan<- struct{}) { - defer close(ready) - - t := time.NewTicker(5 * time.Second) - defer t.Stop() - - client := w.p.c - stopReconnC := w.p.stopReconnC - w.p.c.errorf("elastic: bulk processor %q is waiting for an active connection", w.p.name) - - // loop until a health check finds at least 1 active connection or the reconnection channel is closed - for { - select { - case _, ok := <-stopReconnC: - if !ok { - w.p.c.errorf("elastic: bulk processor %q active connection check interrupted", w.p.name) - return - } - case <-t.C: - client.healthcheck(context.Background(), 3*time.Second, true) - if client.mustActiveConn() == nil { - // found an active connection - // exit and signal done to the WaitGroup - return - } - } - } -} - -func (w *bulkWorker) updateStats(res *BulkResponse) { - // Update stats - if res != nil { - w.p.statsMu.Lock() - if w.p.wantStats { - w.p.stats.Committed++ - if res != nil { - w.p.stats.Indexed += int64(len(res.Indexed())) - w.p.stats.Created += int64(len(res.Created())) - w.p.stats.Updated += int64(len(res.Updated())) - w.p.stats.Deleted += int64(len(res.Deleted())) - w.p.stats.Succeeded += int64(len(res.Succeeded())) - w.p.stats.Failed += int64(len(res.Failed())) - } - w.p.stats.Workers[w.i].Queued = int64(len(w.service.requests)) - w.p.stats.Workers[w.i].LastDuration = time.Duration(int64(res.Took)) * time.Millisecond - } - w.p.statsMu.Unlock() - } -} - -// commitRequired returns true if the service has to commit its -// bulk requests. This can be either because the number of actions -// or the estimated size in bytes is larger than specified in the -// BulkProcessorService. -func (w *bulkWorker) commitRequired() bool { - if w.bulkActions >= 0 && w.service.NumberOfActions() >= w.bulkActions { - return true - } - if w.bulkSize >= 0 && w.service.EstimatedSizeInBytes() >= int64(w.bulkSize) { - return true - } - return false -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk_request.go b/vendor/github.com/olivere/elastic/v7/bulk_request.go deleted file mode 100644 index ce3bf07..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk_request.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" -) - -// -- Bulkable request (index/update/delete) -- - -// BulkableRequest is a generic interface to bulkable requests. -type BulkableRequest interface { - fmt.Stringer - Source() ([]string, error) -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk_update_request.go b/vendor/github.com/olivere/elastic/v7/bulk_update_request.go deleted file mode 100644 index a0ddf18..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk_update_request.go +++ /dev/null @@ -1,334 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -//go:generate easyjson bulk_update_request.go - -import ( - "encoding/json" - "fmt" - "strings" -) - -// BulkUpdateRequest is a request to update a document in Elasticsearch. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html -// for details. -type BulkUpdateRequest struct { - BulkableRequest - index string - typ string - id string - - routing string - parent string - script *Script - scriptedUpsert *bool - version int64 // default is MATCH_ANY - versionType string // default is "internal" - retryOnConflict *int - upsert interface{} - docAsUpsert *bool - detectNoop *bool - doc interface{} - returnSource *bool - ifSeqNo *int64 - ifPrimaryTerm *int64 - - source []string - - useEasyJSON bool -} - -//easyjson:json -type bulkUpdateRequestCommand map[string]bulkUpdateRequestCommandOp - -//easyjson:json -type bulkUpdateRequestCommandOp struct { - Index string `json:"_index,omitempty"` - Type string `json:"_type,omitempty"` - Id string `json:"_id,omitempty"` - Parent string `json:"parent,omitempty"` - // RetryOnConflict is "_retry_on_conflict" for 6.0 and "retry_on_conflict" for 6.1+. - RetryOnConflict *int `json:"retry_on_conflict,omitempty"` - Routing string `json:"routing,omitempty"` - Version int64 `json:"version,omitempty"` - VersionType string `json:"version_type,omitempty"` - IfSeqNo *int64 `json:"if_seq_no,omitempty"` - IfPrimaryTerm *int64 `json:"if_primary_term,omitempty"` -} - -//easyjson:json -type bulkUpdateRequestCommandData struct { - DetectNoop *bool `json:"detect_noop,omitempty"` - Doc interface{} `json:"doc,omitempty"` - DocAsUpsert *bool `json:"doc_as_upsert,omitempty"` - Script interface{} `json:"script,omitempty"` - ScriptedUpsert *bool `json:"scripted_upsert,omitempty"` - Upsert interface{} `json:"upsert,omitempty"` - Source *bool `json:"_source,omitempty"` -} - -// NewBulkUpdateRequest returns a new BulkUpdateRequest. -func NewBulkUpdateRequest() *BulkUpdateRequest { - return &BulkUpdateRequest{} -} - -// UseEasyJSON is an experimental setting that enables serialization -// with github.com/mailru/easyjson, which should in faster serialization -// time and less allocations, but removed compatibility with encoding/json, -// usage of unsafe etc. See https://github.com/mailru/easyjson#issues-notes-and-limitations -// for details. This setting is disabled by default. -func (r *BulkUpdateRequest) UseEasyJSON(enable bool) *BulkUpdateRequest { - r.useEasyJSON = enable - return r -} - -// Index specifies the Elasticsearch index to use for this update request. -// If unspecified, the index set on the BulkService will be used. -func (r *BulkUpdateRequest) Index(index string) *BulkUpdateRequest { - r.index = index - r.source = nil - return r -} - -// Type specifies the Elasticsearch type to use for this update request. -// If unspecified, the type set on the BulkService will be used. -func (r *BulkUpdateRequest) Type(typ string) *BulkUpdateRequest { - r.typ = typ - r.source = nil - return r -} - -// Id specifies the identifier of the document to update. -func (r *BulkUpdateRequest) Id(id string) *BulkUpdateRequest { - r.id = id - r.source = nil - return r -} - -// Routing specifies a routing value for the request. -func (r *BulkUpdateRequest) Routing(routing string) *BulkUpdateRequest { - r.routing = routing - r.source = nil - return r -} - -// Parent specifies the identifier of the parent document (if available). -func (r *BulkUpdateRequest) Parent(parent string) *BulkUpdateRequest { - r.parent = parent - r.source = nil - return r -} - -// Script specifies an update script. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html#bulk-update -// and https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html -// for details. -func (r *BulkUpdateRequest) Script(script *Script) *BulkUpdateRequest { - r.script = script - r.source = nil - return r -} - -// ScripedUpsert specifies if your script will run regardless of -// whether the document exists or not. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-update.html#_literal_scripted_upsert_literal -func (r *BulkUpdateRequest) ScriptedUpsert(upsert bool) *BulkUpdateRequest { - r.scriptedUpsert = &upsert - r.source = nil - return r -} - -// RetryOnConflict specifies how often to retry in case of a version conflict. -func (r *BulkUpdateRequest) RetryOnConflict(retryOnConflict int) *BulkUpdateRequest { - r.retryOnConflict = &retryOnConflict - r.source = nil - return r -} - -// Version indicates the version of the document as part of an optimistic -// concurrency model. -func (r *BulkUpdateRequest) Version(version int64) *BulkUpdateRequest { - r.version = version - r.source = nil - return r -} - -// VersionType can be "internal" (default), "external", "external_gte", -// or "external_gt". -func (r *BulkUpdateRequest) VersionType(versionType string) *BulkUpdateRequest { - r.versionType = versionType - r.source = nil - return r -} - -// IfSeqNo indicates to only perform the index operation if the last -// operation that has changed the document has the specified sequence number. -func (r *BulkUpdateRequest) IfSeqNo(ifSeqNo int64) *BulkUpdateRequest { - r.ifSeqNo = &ifSeqNo - return r -} - -// IfPrimaryTerm indicates to only perform the index operation if the -// last operation that has changed the document has the specified primary term. -func (r *BulkUpdateRequest) IfPrimaryTerm(ifPrimaryTerm int64) *BulkUpdateRequest { - r.ifPrimaryTerm = &ifPrimaryTerm - return r -} - -// Doc specifies the updated document. -func (r *BulkUpdateRequest) Doc(doc interface{}) *BulkUpdateRequest { - r.doc = doc - r.source = nil - return r -} - -// DocAsUpsert indicates whether the contents of Doc should be used as -// the Upsert value. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-update.html#_literal_doc_as_upsert_literal -// for details. -func (r *BulkUpdateRequest) DocAsUpsert(docAsUpsert bool) *BulkUpdateRequest { - r.docAsUpsert = &docAsUpsert - r.source = nil - return r -} - -// DetectNoop specifies whether changes that don't affect the document -// should be ignored (true) or unignored (false). This is enabled by default -// in Elasticsearch. -func (r *BulkUpdateRequest) DetectNoop(detectNoop bool) *BulkUpdateRequest { - r.detectNoop = &detectNoop - r.source = nil - return r -} - -// Upsert specifies the document to use for upserts. It will be used for -// create if the original document does not exist. -func (r *BulkUpdateRequest) Upsert(doc interface{}) *BulkUpdateRequest { - r.upsert = doc - r.source = nil - return r -} - -// ReturnSource specifies whether Elasticsearch should return the source -// after the update. In the request, this responds to the `_source` field. -// It is false by default. -func (r *BulkUpdateRequest) ReturnSource(source bool) *BulkUpdateRequest { - r.returnSource = &source - r.source = nil - return r -} - -// String returns the on-wire representation of the update request, -// concatenated as a single string. -func (r *BulkUpdateRequest) String() string { - lines, err := r.Source() - if err != nil { - return fmt.Sprintf("error: %v", err) - } - return strings.Join(lines, "\n") -} - -// Source returns the on-wire representation of the update request, -// split into an action-and-meta-data line and an (optional) source line. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-bulk.html -// for details. -func (r *BulkUpdateRequest) Source() ([]string, error) { - // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } - // { "doc" : { "field1" : "value1", ... } } - // or - // { "update" : { "_index" : "test", "_type" : "type1", "_id" : "1", ... } } - // { "script" : { ... } } - - if r.source != nil { - return r.source, nil - } - - lines := make([]string, 2) - - // "update" ... - updateCommand := bulkUpdateRequestCommandOp{ - Index: r.index, - Type: r.typ, - Id: r.id, - Routing: r.routing, - Parent: r.parent, - Version: r.version, - VersionType: r.versionType, - RetryOnConflict: r.retryOnConflict, - IfSeqNo: r.ifSeqNo, - IfPrimaryTerm: r.ifPrimaryTerm, - } - command := bulkUpdateRequestCommand{ - "update": updateCommand, - } - - var err error - var body []byte - if r.useEasyJSON { - // easyjson - body, err = command.MarshalJSON() - } else { - // encoding/json - body, err = json.Marshal(command) - } - if err != nil { - return nil, err - } - - lines[0] = string(body) - - // 2nd line: {"doc" : { ... }} or {"script": {...}} - var doc interface{} - if r.doc != nil { - // Automatically serialize strings as raw JSON - switch t := r.doc.(type) { - default: - doc = r.doc - case string: - if len(t) > 0 { - doc = json.RawMessage(t) - } - case *string: - if t != nil && len(*t) > 0 { - doc = json.RawMessage(*t) - } - } - } - data := bulkUpdateRequestCommandData{ - DocAsUpsert: r.docAsUpsert, - DetectNoop: r.detectNoop, - Upsert: r.upsert, - ScriptedUpsert: r.scriptedUpsert, - Doc: doc, - Source: r.returnSource, - } - if r.script != nil { - script, err := r.script.Source() - if err != nil { - return nil, err - } - data.Script = script - } - - if r.useEasyJSON { - // easyjson - body, err = data.MarshalJSON() - } else { - // encoding/json - body, err = json.Marshal(data) - } - if err != nil { - return nil, err - } - - lines[1] = string(body) - - r.source = lines - return lines, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/bulk_update_request_easyjson.go b/vendor/github.com/olivere/elastic/v7/bulk_update_request_easyjson.go deleted file mode 100644 index 79d8db1..0000000 --- a/vendor/github.com/olivere/elastic/v7/bulk_update_request_easyjson.go +++ /dev/null @@ -1,489 +0,0 @@ -// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. - -package elastic - -import ( - json "encoding/json" - easyjson "github.com/mailru/easyjson" - jlexer "github.com/mailru/easyjson/jlexer" - jwriter "github.com/mailru/easyjson/jwriter" -) - -// suppress unused package warning -var ( - _ *json.RawMessage - _ *jlexer.Lexer - _ *jwriter.Writer - _ easyjson.Marshaler -) - -func easyjson1ed00e60DecodeGithubComOlivereElasticV7(in *jlexer.Lexer, out *bulkUpdateRequestCommandOp) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeString() - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "_index": - out.Index = string(in.String()) - case "_type": - out.Type = string(in.String()) - case "_id": - out.Id = string(in.String()) - case "parent": - out.Parent = string(in.String()) - case "retry_on_conflict": - if in.IsNull() { - in.Skip() - out.RetryOnConflict = nil - } else { - if out.RetryOnConflict == nil { - out.RetryOnConflict = new(int) - } - *out.RetryOnConflict = int(in.Int()) - } - case "routing": - out.Routing = string(in.String()) - case "version": - out.Version = int64(in.Int64()) - case "version_type": - out.VersionType = string(in.String()) - case "if_seq_no": - if in.IsNull() { - in.Skip() - out.IfSeqNo = nil - } else { - if out.IfSeqNo == nil { - out.IfSeqNo = new(int64) - } - *out.IfSeqNo = int64(in.Int64()) - } - case "if_primary_term": - if in.IsNull() { - in.Skip() - out.IfPrimaryTerm = nil - } else { - if out.IfPrimaryTerm == nil { - out.IfPrimaryTerm = new(int64) - } - *out.IfPrimaryTerm = int64(in.Int64()) - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson1ed00e60EncodeGithubComOlivereElasticV7(out *jwriter.Writer, in bulkUpdateRequestCommandOp) { - out.RawByte('{') - first := true - _ = first - if in.Index != "" { - const prefix string = ",\"_index\":" - first = false - out.RawString(prefix[1:]) - out.String(string(in.Index)) - } - if in.Type != "" { - const prefix string = ",\"_type\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Type)) - } - if in.Id != "" { - const prefix string = ",\"_id\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Id)) - } - if in.Parent != "" { - const prefix string = ",\"parent\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Parent)) - } - if in.RetryOnConflict != nil { - const prefix string = ",\"retry_on_conflict\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int(int(*in.RetryOnConflict)) - } - if in.Routing != "" { - const prefix string = ",\"routing\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.Routing)) - } - if in.Version != 0 { - const prefix string = ",\"version\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(in.Version)) - } - if in.VersionType != "" { - const prefix string = ",\"version_type\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.String(string(in.VersionType)) - } - if in.IfSeqNo != nil { - const prefix string = ",\"if_seq_no\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(*in.IfSeqNo)) - } - if in.IfPrimaryTerm != nil { - const prefix string = ",\"if_primary_term\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Int64(int64(*in.IfPrimaryTerm)) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v bulkUpdateRequestCommandOp) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson1ed00e60EncodeGithubComOlivereElasticV7(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v bulkUpdateRequestCommandOp) MarshalEasyJSON(w *jwriter.Writer) { - easyjson1ed00e60EncodeGithubComOlivereElasticV7(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *bulkUpdateRequestCommandOp) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson1ed00e60DecodeGithubComOlivereElasticV7(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *bulkUpdateRequestCommandOp) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson1ed00e60DecodeGithubComOlivereElasticV7(l, v) -} -func easyjson1ed00e60DecodeGithubComOlivereElasticV71(in *jlexer.Lexer, out *bulkUpdateRequestCommandData) { - isTopLevel := in.IsStart() - if in.IsNull() { - if isTopLevel { - in.Consumed() - } - in.Skip() - return - } - in.Delim('{') - for !in.IsDelim('}') { - key := in.UnsafeString() - in.WantColon() - if in.IsNull() { - in.Skip() - in.WantComma() - continue - } - switch key { - case "detect_noop": - if in.IsNull() { - in.Skip() - out.DetectNoop = nil - } else { - if out.DetectNoop == nil { - out.DetectNoop = new(bool) - } - *out.DetectNoop = bool(in.Bool()) - } - case "doc": - if m, ok := out.Doc.(easyjson.Unmarshaler); ok { - m.UnmarshalEasyJSON(in) - } else if m, ok := out.Doc.(json.Unmarshaler); ok { - _ = m.UnmarshalJSON(in.Raw()) - } else { - out.Doc = in.Interface() - } - case "doc_as_upsert": - if in.IsNull() { - in.Skip() - out.DocAsUpsert = nil - } else { - if out.DocAsUpsert == nil { - out.DocAsUpsert = new(bool) - } - *out.DocAsUpsert = bool(in.Bool()) - } - case "script": - if m, ok := out.Script.(easyjson.Unmarshaler); ok { - m.UnmarshalEasyJSON(in) - } else if m, ok := out.Script.(json.Unmarshaler); ok { - _ = m.UnmarshalJSON(in.Raw()) - } else { - out.Script = in.Interface() - } - case "scripted_upsert": - if in.IsNull() { - in.Skip() - out.ScriptedUpsert = nil - } else { - if out.ScriptedUpsert == nil { - out.ScriptedUpsert = new(bool) - } - *out.ScriptedUpsert = bool(in.Bool()) - } - case "upsert": - if m, ok := out.Upsert.(easyjson.Unmarshaler); ok { - m.UnmarshalEasyJSON(in) - } else if m, ok := out.Upsert.(json.Unmarshaler); ok { - _ = m.UnmarshalJSON(in.Raw()) - } else { - out.Upsert = in.Interface() - } - case "_source": - if in.IsNull() { - in.Skip() - out.Source = nil - } else { - if out.Source == nil { - out.Source = new(bool) - } - *out.Source = bool(in.Bool()) - } - default: - in.SkipRecursive() - } - in.WantComma() - } - in.Delim('}') - if isTopLevel { - in.Consumed() - } -} -func easyjson1ed00e60EncodeGithubComOlivereElasticV71(out *jwriter.Writer, in bulkUpdateRequestCommandData) { - out.RawByte('{') - first := true - _ = first - if in.DetectNoop != nil { - const prefix string = ",\"detect_noop\":" - first = false - out.RawString(prefix[1:]) - out.Bool(bool(*in.DetectNoop)) - } - if in.Doc != nil { - const prefix string = ",\"doc\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - if m, ok := in.Doc.(easyjson.Marshaler); ok { - m.MarshalEasyJSON(out) - } else if m, ok := in.Doc.(json.Marshaler); ok { - out.Raw(m.MarshalJSON()) - } else { - out.Raw(json.Marshal(in.Doc)) - } - } - if in.DocAsUpsert != nil { - const prefix string = ",\"doc_as_upsert\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Bool(bool(*in.DocAsUpsert)) - } - if in.Script != nil { - const prefix string = ",\"script\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - if m, ok := in.Script.(easyjson.Marshaler); ok { - m.MarshalEasyJSON(out) - } else if m, ok := in.Script.(json.Marshaler); ok { - out.Raw(m.MarshalJSON()) - } else { - out.Raw(json.Marshal(in.Script)) - } - } - if in.ScriptedUpsert != nil { - const prefix string = ",\"scripted_upsert\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Bool(bool(*in.ScriptedUpsert)) - } - if in.Upsert != nil { - const prefix string = ",\"upsert\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - if m, ok := in.Upsert.(easyjson.Marshaler); ok { - m.MarshalEasyJSON(out) - } else if m, ok := in.Upsert.(json.Marshaler); ok { - out.Raw(m.MarshalJSON()) - } else { - out.Raw(json.Marshal(in.Upsert)) - } - } - if in.Source != nil { - const prefix string = ",\"_source\":" - if first { - first = false - out.RawString(prefix[1:]) - } else { - out.RawString(prefix) - } - out.Bool(bool(*in.Source)) - } - out.RawByte('}') -} - -// MarshalJSON supports json.Marshaler interface -func (v bulkUpdateRequestCommandData) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson1ed00e60EncodeGithubComOlivereElasticV71(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v bulkUpdateRequestCommandData) MarshalEasyJSON(w *jwriter.Writer) { - easyjson1ed00e60EncodeGithubComOlivereElasticV71(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *bulkUpdateRequestCommandData) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson1ed00e60DecodeGithubComOlivereElasticV71(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *bulkUpdateRequestCommandData) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson1ed00e60DecodeGithubComOlivereElasticV71(l, v) -} -func easyjson1ed00e60DecodeGithubComOlivereElasticV72(in *jlexer.Lexer, out *bulkUpdateRequestCommand) { - isTopLevel := in.IsStart() - if in.IsNull() { - in.Skip() - } else { - in.Delim('{') - *out = make(bulkUpdateRequestCommand) - for !in.IsDelim('}') { - key := string(in.String()) - in.WantColon() - var v1 bulkUpdateRequestCommandOp - (v1).UnmarshalEasyJSON(in) - (*out)[key] = v1 - in.WantComma() - } - in.Delim('}') - } - if isTopLevel { - in.Consumed() - } -} -func easyjson1ed00e60EncodeGithubComOlivereElasticV72(out *jwriter.Writer, in bulkUpdateRequestCommand) { - if in == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 { - out.RawString(`null`) - } else { - out.RawByte('{') - v2First := true - for v2Name, v2Value := range in { - if v2First { - v2First = false - } else { - out.RawByte(',') - } - out.String(string(v2Name)) - out.RawByte(':') - (v2Value).MarshalEasyJSON(out) - } - out.RawByte('}') - } -} - -// MarshalJSON supports json.Marshaler interface -func (v bulkUpdateRequestCommand) MarshalJSON() ([]byte, error) { - w := jwriter.Writer{} - easyjson1ed00e60EncodeGithubComOlivereElasticV72(&w, v) - return w.Buffer.BuildBytes(), w.Error -} - -// MarshalEasyJSON supports easyjson.Marshaler interface -func (v bulkUpdateRequestCommand) MarshalEasyJSON(w *jwriter.Writer) { - easyjson1ed00e60EncodeGithubComOlivereElasticV72(w, v) -} - -// UnmarshalJSON supports json.Unmarshaler interface -func (v *bulkUpdateRequestCommand) UnmarshalJSON(data []byte) error { - r := jlexer.Lexer{Data: data} - easyjson1ed00e60DecodeGithubComOlivereElasticV72(&r, v) - return r.Error() -} - -// UnmarshalEasyJSON supports easyjson.Unmarshaler interface -func (v *bulkUpdateRequestCommand) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjson1ed00e60DecodeGithubComOlivereElasticV72(l, v) -} diff --git a/vendor/github.com/olivere/elastic/v7/canonicalize.go b/vendor/github.com/olivere/elastic/v7/canonicalize.go deleted file mode 100644 index a436f03..0000000 --- a/vendor/github.com/olivere/elastic/v7/canonicalize.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "net/url" - -// canonicalize takes a list of URLs and returns its canonicalized form, i.e. -// remove anything but scheme, userinfo, host, path, and port. -// It also removes all trailing slashes. Invalid URLs or URLs that do not -// use protocol http or https are skipped. -// -// Example: -// http://127.0.0.1:9200/?query=1 -> http://127.0.0.1:9200 -// http://127.0.0.1:9200/db1/ -> http://127.0.0.1:9200/db1 -func canonicalize(rawurls ...string) []string { - var canonicalized []string - for _, rawurl := range rawurls { - u, err := url.Parse(rawurl) - if err == nil { - if u.Scheme == "http" || u.Scheme == "https" { - // Trim trailing slashes - for len(u.Path) > 0 && u.Path[len(u.Path)-1] == '/' { - u.Path = u.Path[0 : len(u.Path)-1] - } - u.Fragment = "" - u.RawQuery = "" - canonicalized = append(canonicalized, u.String()) - } - } - } - return canonicalized -} diff --git a/vendor/github.com/olivere/elastic/v7/cat_aliases.go b/vendor/github.com/olivere/elastic/v7/cat_aliases.go deleted file mode 100644 index ddadef5..0000000 --- a/vendor/github.com/olivere/elastic/v7/cat_aliases.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// CatAliasesService shows information about currently configured aliases -// to indices including filter and routing infos. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-aliases.html -// for details. -type CatAliasesService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - local *bool - masterTimeout string - aliases []string - columns []string - sort []string // list of columns for sort order -} - -// NewCatAliasesService creates a new CatAliasesService. -func NewCatAliasesService(client *Client) *CatAliasesService { - return &CatAliasesService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *CatAliasesService) Pretty(pretty bool) *CatAliasesService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *CatAliasesService) Human(human bool) *CatAliasesService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *CatAliasesService) ErrorTrace(errorTrace bool) *CatAliasesService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *CatAliasesService) FilterPath(filterPath ...string) *CatAliasesService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *CatAliasesService) Header(name string, value string) *CatAliasesService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *CatAliasesService) Headers(headers http.Header) *CatAliasesService { - s.headers = headers - return s -} - -// Alias specifies one or more aliases to which information should be returned. -func (s *CatAliasesService) Alias(alias ...string) *CatAliasesService { - s.aliases = alias - return s -} - -// Local indicates to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *CatAliasesService) Local(local bool) *CatAliasesService { - s.local = &local - return s -} - -// MasterTimeout is the explicit operation timeout for connection to master node. -func (s *CatAliasesService) MasterTimeout(masterTimeout string) *CatAliasesService { - s.masterTimeout = masterTimeout - return s -} - -// Columns to return in the response. -// To get a list of all possible columns to return, run the following command -// in your terminal: -// -// Example: -// curl 'http://localhost:9200/_cat/aliases?help' -// -// You can use Columns("*") to return all possible columns. That might take -// a little longer than the default set of columns. -func (s *CatAliasesService) Columns(columns ...string) *CatAliasesService { - s.columns = columns - return s -} - -// Sort is a list of fields to sort by. -func (s *CatAliasesService) Sort(fields ...string) *CatAliasesService { - s.sort = fields - return s -} - -// buildURL builds the URL for the operation. -func (s *CatAliasesService) buildURL() (string, url.Values, error) { - // Build URL - var ( - path string - err error - ) - - if len(s.aliases) > 0 { - path, err = uritemplates.Expand("/_cat/aliases/{name}", map[string]string{ - "name": strings.Join(s.aliases, ","), - }) - } else { - path = "/_cat/aliases" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{ - "format": []string{"json"}, // always returns as JSON - } - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if len(s.sort) > 0 { - params.Set("s", strings.Join(s.sort, ",")) - } - if len(s.columns) > 0 { - params.Set("h", strings.Join(s.columns, ",")) - } - return path, params, nil -} - -// Do executes the operation. -func (s *CatAliasesService) Do(ctx context.Context) (CatAliasesResponse, error) { - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret CatAliasesResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a get request. - -// CatAliasesResponse is the outcome of CatAliasesService.Do. -type CatAliasesResponse []CatAliasesResponseRow - -// CatAliasesResponseRow is a single row in a CatAliasesResponse. -// Notice that not all of these fields might be filled; that depends -// on the number of columns chose in the request (see CatAliasesService.Columns). -type CatAliasesResponseRow struct { - // Alias name. - Alias string `json:"alias"` - // Index the alias points to. - Index string `json:"index"` - // Filter, e.g. "*" or "-". - Filter string `json:"filter"` - // RoutingIndex specifies the index routing (or "-"). - RoutingIndex string `json:"routing.index"` - // RoutingSearch specifies the search routing (or "-"). - RoutingSearch string `json:"routing.search"` - // IsWriteIndex indicates whether the index can be written to (or "-"). - IsWriteIndex string `json:"is_write_index"` -} diff --git a/vendor/github.com/olivere/elastic/v7/cat_allocation.go b/vendor/github.com/olivere/elastic/v7/cat_allocation.go deleted file mode 100644 index 0e49707..0000000 --- a/vendor/github.com/olivere/elastic/v7/cat_allocation.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// CatAllocationService provides a snapshot of how many shards are allocated -// to each data node and how much disk space they are using. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-allocation.html -// for details. -type CatAllocationService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - bytes string // b, k, m, or g - local *bool - masterTimeout string - nodes []string - columns []string - sort []string // list of columns for sort order -} - -// NewCatAllocationService creates a new CatAllocationService. -func NewCatAllocationService(client *Client) *CatAllocationService { - return &CatAllocationService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *CatAllocationService) Pretty(pretty bool) *CatAllocationService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *CatAllocationService) Human(human bool) *CatAllocationService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *CatAllocationService) ErrorTrace(errorTrace bool) *CatAllocationService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *CatAllocationService) FilterPath(filterPath ...string) *CatAllocationService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *CatAllocationService) Header(name string, value string) *CatAllocationService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *CatAllocationService) Headers(headers http.Header) *CatAllocationService { - s.headers = headers - return s -} - -// NodeID specifies one or more node IDs to for information should be returned. -func (s *CatAllocationService) NodeID(nodes ...string) *CatAllocationService { - s.nodes = nodes - return s -} - -// Bytes represents the unit in which to display byte values. -// Valid values are: "b", "k", "m", or "g". -func (s *CatAllocationService) Bytes(bytes string) *CatAllocationService { - s.bytes = bytes - return s -} - -// Local indicates to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *CatAllocationService) Local(local bool) *CatAllocationService { - s.local = &local - return s -} - -// MasterTimeout is the explicit operation timeout for connection to master node. -func (s *CatAllocationService) MasterTimeout(masterTimeout string) *CatAllocationService { - s.masterTimeout = masterTimeout - return s -} - -// Columns to return in the response. -// To get a list of all possible columns to return, run the following command -// in your terminal: -// -// Example: -// curl 'http://localhost:9200/_cat/aliases?help' -// -// You can use Columns("*") to return all possible columns. That might take -// a little longer than the default set of columns. -func (s *CatAllocationService) Columns(columns ...string) *CatAllocationService { - s.columns = columns - return s -} - -// Sort is a list of fields to sort by. -func (s *CatAllocationService) Sort(fields ...string) *CatAllocationService { - s.sort = fields - return s -} - -// buildURL builds the URL for the operation. -func (s *CatAllocationService) buildURL() (string, url.Values, error) { - // Build URL - var ( - path string - err error - ) - - if len(s.nodes) > 0 { - path, err = uritemplates.Expand("/_cat/allocation/{node_id}", map[string]string{ - "node_id": strings.Join(s.nodes, ","), - }) - } else { - path = "/_cat/allocation" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{ - "format": []string{"json"}, // always returns as JSON - } - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.bytes != "" { - params.Set("bytes", s.bytes) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if len(s.sort) > 0 { - params.Set("s", strings.Join(s.sort, ",")) - } - if len(s.columns) > 0 { - params.Set("h", strings.Join(s.columns, ",")) - } - return path, params, nil -} - -// Do executes the operation. -func (s *CatAllocationService) Do(ctx context.Context) (CatAllocationResponse, error) { - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret CatAllocationResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a get request. - -// CatAllocationResponse is the outcome of CatAllocationService.Do. -type CatAllocationResponse []CatAllocationResponseRow - -// CatAllocationResponseRow is a single row in a CatAllocationResponse. -// Notice that not all of these fields might be filled; that depends -// on the number of columns chose in the request (see CatAllocationService.Columns). -type CatAllocationResponseRow struct { - // Shards represents the number of shards on a node. - Shards int `json:"shards,string"` - // DiskIndices represents the disk used by ES indices, e.g. "46.1kb". - DiskIndices string `json:"disk.indices"` - // DiskUsed represents the disk used (total, not just ES), e.g. "34.5gb" - DiskUsed string `json:"disk.used"` - // DiskAvail represents the disk available, e.g. "53.2gb". - DiskAvail string `json:"disk.avail"` - // DiskTotal represents the total capacity of all volumes, e.g. "87.7gb". - DiskTotal string `json:"disk.total"` - // DiskPercent represents the percent of disk used, e.g. 39. - DiskPercent int `json:"disk.percent,string"` - // Host represents the hostname of the node. - Host string `json:"host"` - // IP represents the IP address of the node. - IP string `json:"ip"` - // Node represents the node ID. - Node string `json:"node"` -} diff --git a/vendor/github.com/olivere/elastic/v7/cat_count.go b/vendor/github.com/olivere/elastic/v7/cat_count.go deleted file mode 100644 index 82fffd9..0000000 --- a/vendor/github.com/olivere/elastic/v7/cat_count.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// CatCountService provides quick access to the document count of the entire cluster, -// or individual indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-count.html -// for details. -type CatCountService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - local *bool - masterTimeout string - columns []string - sort []string // list of columns for sort order -} - -// NewCatCountService creates a new CatCountService. -func NewCatCountService(client *Client) *CatCountService { - return &CatCountService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *CatCountService) Pretty(pretty bool) *CatCountService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *CatCountService) Human(human bool) *CatCountService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *CatCountService) ErrorTrace(errorTrace bool) *CatCountService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *CatCountService) FilterPath(filterPath ...string) *CatCountService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *CatCountService) Header(name string, value string) *CatCountService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *CatCountService) Headers(headers http.Header) *CatCountService { - s.headers = headers - return s -} - -// Index specifies zero or more indices for which to return counts -// (by default counts for all indices are returned). -func (s *CatCountService) Index(index ...string) *CatCountService { - s.index = index - return s -} - -// Local indicates to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *CatCountService) Local(local bool) *CatCountService { - s.local = &local - return s -} - -// MasterTimeout is the explicit operation timeout for connection to master node. -func (s *CatCountService) MasterTimeout(masterTimeout string) *CatCountService { - s.masterTimeout = masterTimeout - return s -} - -// Columns to return in the response. -// To get a list of all possible columns to return, run the following command -// in your terminal: -// -// Example: -// curl 'http://localhost:9200/_cat/count?help' -// -// You can use Columns("*") to return all possible columns. That might take -// a little longer than the default set of columns. -func (s *CatCountService) Columns(columns ...string) *CatCountService { - s.columns = columns - return s -} - -// Sort is a list of fields to sort by. -func (s *CatCountService) Sort(fields ...string) *CatCountService { - s.sort = fields - return s -} - -// buildURL builds the URL for the operation. -func (s *CatCountService) buildURL() (string, url.Values, error) { - // Build URL - var ( - path string - err error - ) - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/_cat/count/{index}", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_cat/count" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{ - "format": []string{"json"}, // always returns as JSON - } - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if len(s.sort) > 0 { - params.Set("s", strings.Join(s.sort, ",")) - } - if len(s.columns) > 0 { - params.Set("h", strings.Join(s.columns, ",")) - } - return path, params, nil -} - -// Do executes the operation. -func (s *CatCountService) Do(ctx context.Context) (CatCountResponse, error) { - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret CatCountResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a get request. - -// CatCountResponse is the outcome of CatCountService.Do. -type CatCountResponse []CatCountResponseRow - -// CatCountResponseRow specifies the data returned for one index -// of a CatCountResponse. Notice that not all of these fields might -// be filled; that depends on the number of columns chose in the -// request (see CatCountService.Columns). -type CatCountResponseRow struct { - Epoch int64 `json:"epoch,string"` // e.g. 1527077996 - Timestamp string `json:"timestamp"` // e.g. "12:19:56" - Count int `json:"count,string"` // number of documents -} diff --git a/vendor/github.com/olivere/elastic/v7/cat_health.go b/vendor/github.com/olivere/elastic/v7/cat_health.go deleted file mode 100644 index 2d6bf89..0000000 --- a/vendor/github.com/olivere/elastic/v7/cat_health.go +++ /dev/null @@ -1,211 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" -) - -// CatHealthService returns a terse representation of the same information -// as /_cluster/health. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-health.html -// for details. -type CatHealthService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - local *bool - masterTimeout string - columns []string - sort []string // list of columns for sort order - disableTimestamping *bool -} - -// NewCatHealthService creates a new CatHealthService. -func NewCatHealthService(client *Client) *CatHealthService { - return &CatHealthService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *CatHealthService) Pretty(pretty bool) *CatHealthService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *CatHealthService) Human(human bool) *CatHealthService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *CatHealthService) ErrorTrace(errorTrace bool) *CatHealthService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *CatHealthService) FilterPath(filterPath ...string) *CatHealthService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *CatHealthService) Header(name string, value string) *CatHealthService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *CatHealthService) Headers(headers http.Header) *CatHealthService { - s.headers = headers - return s -} - -// Local indicates to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *CatHealthService) Local(local bool) *CatHealthService { - s.local = &local - return s -} - -// MasterTimeout is the explicit operation timeout for connection to master node. -func (s *CatHealthService) MasterTimeout(masterTimeout string) *CatHealthService { - s.masterTimeout = masterTimeout - return s -} - -// Columns to return in the response. -// To get a list of all possible columns to return, run the following command -// in your terminal: -// -// Example: -// curl 'http://localhost:9200/_cat/indices?help' -// -// You can use Columns("*") to return all possible columns. That might take -// a little longer than the default set of columns. -func (s *CatHealthService) Columns(columns ...string) *CatHealthService { - s.columns = columns - return s -} - -// Sort is a list of fields to sort by. -func (s *CatHealthService) Sort(fields ...string) *CatHealthService { - s.sort = fields - return s -} - -// DisableTimestamping disables timestamping (default: true). -func (s *CatHealthService) DisableTimestamping(disable bool) *CatHealthService { - s.disableTimestamping = &disable - return s -} - -// buildURL builds the URL for the operation. -func (s *CatHealthService) buildURL() (string, url.Values, error) { - // Build URL - path := "/_cat/health" - - // Add query string parameters - params := url.Values{ - "format": []string{"json"}, // always returns as JSON - } - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if len(s.sort) > 0 { - params.Set("s", strings.Join(s.sort, ",")) - } - if v := s.disableTimestamping; v != nil { - params.Set("ts", fmt.Sprint(*v)) - } - if len(s.columns) > 0 { - params.Set("h", strings.Join(s.columns, ",")) - } - return path, params, nil -} - -// Do executes the operation. -func (s *CatHealthService) Do(ctx context.Context) (CatHealthResponse, error) { - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret CatHealthResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a get request. - -// CatHealthResponse is the outcome of CatHealthService.Do. -type CatHealthResponse []CatHealthResponseRow - -// CatHealthResponseRow is a single row in a CatHealthResponse. -// Notice that not all of these fields might be filled; that depends -// on the number of columns chose in the request (see CatHealthService.Columns). -type CatHealthResponseRow struct { - Epoch int64 `json:"epoch,string"` // e.g. 1527077996 - Timestamp string `json:"timestamp"` // e.g. "12:19:56" - Cluster string `json:"cluster"` // cluster name, e.g. "elasticsearch" - Status string `json:"status"` // health status, e.g. "green", "yellow", or "red" - NodeTotal int `json:"node.total,string"` // total number of nodes - NodeData int `json:"node.data,string"` // number of nodes that can store data - Shards int `json:"shards,string"` // total number of shards - Pri int `json:"pri,string"` // number of primary shards - Relo int `json:"relo,string"` // number of relocating nodes - Init int `json:"init,string"` // number of initializing nodes - Unassign int `json:"unassign,string"` // number of unassigned shards - PendingTasks int `json:"pending_tasks,string"` // number of pending tasks - MaxTaskWaitTime string `json:"max_task_wait_time"` // wait time of longest task pending, e.g. "-" or time in millis - ActiveShardsPercent string `json:"active_shards_percent"` // active number of shards in percent, e.g. "100%" -} diff --git a/vendor/github.com/olivere/elastic/v7/cat_indices.go b/vendor/github.com/olivere/elastic/v7/cat_indices.go deleted file mode 100644 index 77725c1..0000000 --- a/vendor/github.com/olivere/elastic/v7/cat_indices.go +++ /dev/null @@ -1,531 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// CatIndicesService returns the list of indices plus some additional -// information about them. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cat-indices.html -// for details. -type CatIndicesService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - - index string - bytes string // b, k, m, or g - local *bool - masterTimeout string - columns []string - health string // green, yellow, or red - primaryOnly *bool // true for primary shards only - sort []string // list of columns for sort order - headers http.Header -} - -// NewCatIndicesService creates a new CatIndicesService. -func NewCatIndicesService(client *Client) *CatIndicesService { - return &CatIndicesService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *CatIndicesService) Pretty(pretty bool) *CatIndicesService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *CatIndicesService) Human(human bool) *CatIndicesService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *CatIndicesService) ErrorTrace(errorTrace bool) *CatIndicesService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *CatIndicesService) FilterPath(filterPath ...string) *CatIndicesService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *CatIndicesService) Header(name string, value string) *CatIndicesService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *CatIndicesService) Headers(headers http.Header) *CatIndicesService { - s.headers = headers - return s -} - -// Index is the name of the index to list (by default all indices are returned). -func (s *CatIndicesService) Index(index string) *CatIndicesService { - s.index = index - return s -} - -// Bytes represents the unit in which to display byte values. -// Valid values are: "b", "k", "m", or "g". -func (s *CatIndicesService) Bytes(bytes string) *CatIndicesService { - s.bytes = bytes - return s -} - -// Local indicates to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *CatIndicesService) Local(local bool) *CatIndicesService { - s.local = &local - return s -} - -// MasterTimeout is the explicit operation timeout for connection to master node. -func (s *CatIndicesService) MasterTimeout(masterTimeout string) *CatIndicesService { - s.masterTimeout = masterTimeout - return s -} - -// Columns to return in the response. -// To get a list of all possible columns to return, run the following command -// in your terminal: -// -// Example: -// curl 'http://localhost:9200/_cat/indices?help' -// -// You can use Columns("*") to return all possible columns. That might take -// a little longer than the default set of columns. -func (s *CatIndicesService) Columns(columns ...string) *CatIndicesService { - s.columns = columns - return s -} - -// Health filters indices by their health status. -// Valid values are: "green", "yellow", or "red". -func (s *CatIndicesService) Health(healthState string) *CatIndicesService { - s.health = healthState - return s -} - -// PrimaryOnly when set to true returns stats only for primary shards (default: false). -func (s *CatIndicesService) PrimaryOnly(primaryOnly bool) *CatIndicesService { - s.primaryOnly = &primaryOnly - return s -} - -// Sort is a list of fields to sort by. -func (s *CatIndicesService) Sort(fields ...string) *CatIndicesService { - s.sort = fields - return s -} - -// buildURL builds the URL for the operation. -func (s *CatIndicesService) buildURL() (string, url.Values, error) { - // Build URL - var ( - path string - err error - ) - - if s.index != "" { - path, err = uritemplates.Expand("/_cat/indices/{index}", map[string]string{ - "index": s.index, - }) - } else { - path = "/_cat/indices" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{ - "format": []string{"json"}, // always returns as JSON - } - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.bytes != "" { - params.Set("bytes", s.bytes) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if len(s.columns) > 0 { - // loop through all columns and apply alias if needed - for i, column := range s.columns { - if fullValueRaw, isAliased := catIndicesResponseRowAliasesMap[column]; isAliased { - // alias can be translated to multiple fields, - // so if translated value contains a comma, than replace the first value - // and append the others - if strings.Contains(fullValueRaw, ",") { - fullValues := strings.Split(fullValueRaw, ",") - s.columns[i] = fullValues[0] - s.columns = append(s.columns, fullValues[1:]...) - } else { - s.columns[i] = fullValueRaw - } - } - } - - params.Set("h", strings.Join(s.columns, ",")) - } - if s.health != "" { - params.Set("health", s.health) - } - if v := s.primaryOnly; v != nil { - params.Set("pri", fmt.Sprint(*v)) - } - if len(s.sort) > 0 { - params.Set("s", strings.Join(s.sort, ",")) - } - return path, params, nil -} - -// Do executes the operation. -func (s *CatIndicesService) Do(ctx context.Context) (CatIndicesResponse, error) { - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret CatIndicesResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a get request. - -// CatIndicesResponse is the outcome of CatIndicesService.Do. -type CatIndicesResponse []CatIndicesResponseRow - -// CatIndicesResponseRow specifies the data returned for one index -// of a CatIndicesResponse. Notice that not all of these fields might -// be filled; that depends on the number of columns chose in the -// request (see CatIndicesService.Columns). -type CatIndicesResponseRow struct { - Health string `json:"health"` // "green", "yellow", or "red" - Status string `json:"status"` // "open" or "closed" - Index string `json:"index"` // index name - UUID string `json:"uuid"` // index uuid - Pri int `json:"pri,string"` // number of primary shards - Rep int `json:"rep,string"` // number of replica shards - DocsCount int `json:"docs.count,string"` // number of available documents - DocsDeleted int `json:"docs.deleted,string"` // number of deleted documents - CreationDate int64 `json:"creation.date,string"` // index creation date (millisecond value), e.g. 1527077221644 - CreationDateString string `json:"creation.date.string"` // index creation date (as string), e.g. "2018-05-23T12:07:01.644Z" - StoreSize string `json:"store.size"` // store size of primaries & replicas, e.g. "4.6kb" - PriStoreSize string `json:"pri.store.size"` // store size of primaries, e.g. "230b" - CompletionSize string `json:"completion.size"` // size of completion on primaries & replicas - PriCompletionSize string `json:"pri.completion.size"` // size of completion on primaries - FielddataMemorySize string `json:"fielddata.memory_size"` // used fielddata cache on primaries & replicas - PriFielddataMemorySize string `json:"pri.fielddata.memory_size"` // used fielddata cache on primaries - FielddataEvictions int `json:"fielddata.evictions,string"` // fielddata evictions on primaries & replicas - PriFielddataEvictions int `json:"pri.fielddata.evictions,string"` // fielddata evictions on primaries - QueryCacheMemorySize string `json:"query_cache.memory_size"` // used query cache on primaries & replicas - PriQueryCacheMemorySize string `json:"pri.query_cache.memory_size"` // used query cache on primaries - QueryCacheEvictions int `json:"query_cache.evictions,string"` // query cache evictions on primaries & replicas - PriQueryCacheEvictions int `json:"pri.query_cache.evictions,string"` // query cache evictions on primaries - RequestCacheMemorySize string `json:"request_cache.memory_size"` // used request cache on primaries & replicas - PriRequestCacheMemorySize string `json:"pri.request_cache.memory_size"` // used request cache on primaries - RequestCacheEvictions int `json:"request_cache.evictions,string"` // request cache evictions on primaries & replicas - PriRequestCacheEvictions int `json:"pri.request_cache.evictions,string"` // request cache evictions on primaries - RequestCacheHitCount int `json:"request_cache.hit_count,string"` // request cache hit count on primaries & replicas - PriRequestCacheHitCount int `json:"pri.request_cache.hit_count,string"` // request cache hit count on primaries - RequestCacheMissCount int `json:"request_cache.miss_count,string"` // request cache miss count on primaries & replicas - PriRequestCacheMissCount int `json:"pri.request_cache.miss_count,string"` // request cache miss count on primaries - FlushTotal int `json:"flush.total,string"` // number of flushes on primaries & replicas - PriFlushTotal int `json:"pri.flush.total,string"` // number of flushes on primaries - FlushTotalTime string `json:"flush.total_time"` // time spent in flush on primaries & replicas - PriFlushTotalTime string `json:"pri.flush.total_time"` // time spent in flush on primaries - GetCurrent int `json:"get.current,string"` // number of current get ops on primaries & replicas - PriGetCurrent int `json:"pri.get.current,string"` // number of current get ops on primaries - GetTime string `json:"get.time"` // time spent in get on primaries & replicas - PriGetTime string `json:"pri.get.time"` // time spent in get on primaries - GetTotal int `json:"get.total,string"` // number of get ops on primaries & replicas - PriGetTotal int `json:"pri.get.total,string"` // number of get ops on primaries - GetExistsTime string `json:"get.exists_time"` // time spent in successful gets on primaries & replicas - PriGetExistsTime string `json:"pri.get.exists_time"` // time spent in successful gets on primaries - GetExistsTotal int `json:"get.exists_total,string"` // number of successful gets on primaries & replicas - PriGetExistsTotal int `json:"pri.get.exists_total,string"` // number of successful gets on primaries - GetMissingTime string `json:"get.missing_time"` // time spent in failed gets on primaries & replicas - PriGetMissingTime string `json:"pri.get.missing_time"` // time spent in failed gets on primaries - GetMissingTotal int `json:"get.missing_total,string"` // number of failed gets on primaries & replicas - PriGetMissingTotal int `json:"pri.get.missing_total,string"` // number of failed gets on primaries - IndexingDeleteCurrent int `json:"indexing.delete_current,string"` // number of current deletions on primaries & replicas - PriIndexingDeleteCurrent int `json:"pri.indexing.delete_current,string"` // number of current deletions on primaries - IndexingDeleteTime string `json:"indexing.delete_time"` // time spent in deletions on primaries & replicas - PriIndexingDeleteTime string `json:"pri.indexing.delete_time"` // time spent in deletions on primaries - IndexingDeleteTotal int `json:"indexing.delete_total,string"` // number of delete ops on primaries & replicas - PriIndexingDeleteTotal int `json:"pri.indexing.delete_total,string"` // number of delete ops on primaries - IndexingIndexCurrent int `json:"indexing.index_current,string"` // number of current indexing on primaries & replicas - PriIndexingIndexCurrent int `json:"pri.indexing.index_current,string"` // number of current indexing on primaries - IndexingIndexTime string `json:"indexing.index_time"` // time spent in indexing on primaries & replicas - PriIndexingIndexTime string `json:"pri.indexing.index_time"` // time spent in indexing on primaries - IndexingIndexTotal int `json:"indexing.index_total,string"` // number of index ops on primaries & replicas - PriIndexingIndexTotal int `json:"pri.indexing.index_total,string"` // number of index ops on primaries - IndexingIndexFailed int `json:"indexing.index_failed,string"` // number of failed indexing ops on primaries & replicas - PriIndexingIndexFailed int `json:"pri.indexing.index_failed,string"` // number of failed indexing ops on primaries - MergesCurrent int `json:"merges.current,string"` // number of current merges on primaries & replicas - PriMergesCurrent int `json:"pri.merges.current,string"` // number of current merges on primaries - MergesCurrentDocs int `json:"merges.current_docs,string"` // number of current merging docs on primaries & replicas - PriMergesCurrentDocs int `json:"pri.merges.current_docs,string"` // number of current merging docs on primaries - MergesCurrentSize string `json:"merges.current_size"` // size of current merges on primaries & replicas - PriMergesCurrentSize string `json:"pri.merges.current_size"` // size of current merges on primaries - MergesTotal int `json:"merges.total,string"` // number of completed merge ops on primaries & replicas - PriMergesTotal int `json:"pri.merges.total,string"` // number of completed merge ops on primaries - MergesTotalDocs int `json:"merges.total_docs,string"` // docs merged on primaries & replicas - PriMergesTotalDocs int `json:"pri.merges.total_docs,string"` // docs merged on primaries - MergesTotalSize string `json:"merges.total_size"` // size merged on primaries & replicas - PriMergesTotalSize string `json:"pri.merges.total_size"` // size merged on primaries - MergesTotalTime string `json:"merges.total_time"` // time spent in merges on primaries & replicas - PriMergesTotalTime string `json:"pri.merges.total_time"` // time spent in merges on primaries - RefreshTotal int `json:"refresh.total,string"` // total refreshes on primaries & replicas - PriRefreshTotal int `json:"pri.refresh.total,string"` // total refreshes on primaries - RefreshExternalTotal int `json:"refresh.external_total,string"` // total external refreshes on primaries & replicas - PriRefreshExternalTotal int `json:"pri.refresh.external_total,string"` // total external refreshes on primaries - RefreshTime string `json:"refresh.time"` // time spent in refreshes on primaries & replicas - PriRefreshTime string `json:"pri.refresh.time"` // time spent in refreshes on primaries - RefreshExternalTime string `json:"refresh.external_time"` // external time spent in refreshes on primaries & replicas - PriRefreshExternalTime string `json:"pri.refresh.external_time"` // external time spent in refreshes on primaries - RefreshListeners int `json:"refresh.listeners,string"` // number of pending refresh listeners on primaries & replicas - PriRefreshListeners int `json:"pri.refresh.listeners,string"` // number of pending refresh listeners on primaries - SearchFetchCurrent int `json:"search.fetch_current,string"` // current fetch phase ops on primaries & replicas - PriSearchFetchCurrent int `json:"pri.search.fetch_current,string"` // current fetch phase ops on primaries - SearchFetchTime string `json:"search.fetch_time"` // time spent in fetch phase on primaries & replicas - PriSearchFetchTime string `json:"pri.search.fetch_time"` // time spent in fetch phase on primaries - SearchFetchTotal int `json:"search.fetch_total,string"` // total fetch ops on primaries & replicas - PriSearchFetchTotal int `json:"pri.search.fetch_total,string"` // total fetch ops on primaries - SearchOpenContexts int `json:"search.open_contexts,string"` // open search contexts on primaries & replicas - PriSearchOpenContexts int `json:"pri.search.open_contexts,string"` // open search contexts on primaries - SearchQueryCurrent int `json:"search.query_current,string"` // current query phase ops on primaries & replicas - PriSearchQueryCurrent int `json:"pri.search.query_current,string"` // current query phase ops on primaries - SearchQueryTime string `json:"search.query_time"` // time spent in query phase on primaries & replicas, e.g. "0s" - PriSearchQueryTime string `json:"pri.search.query_time"` // time spent in query phase on primaries, e.g. "0s" - SearchQueryTotal int `json:"search.query_total,string"` // total query phase ops on primaries & replicas - PriSearchQueryTotal int `json:"pri.search.query_total,string"` // total query phase ops on primaries - SearchScrollCurrent int `json:"search.scroll_current,string"` // open scroll contexts on primaries & replicas - PriSearchScrollCurrent int `json:"pri.search.scroll_current,string"` // open scroll contexts on primaries - SearchScrollTime string `json:"search.scroll_time"` // time scroll contexts held open on primaries & replicas, e.g. "0s" - PriSearchScrollTime string `json:"pri.search.scroll_time"` // time scroll contexts held open on primaries, e.g. "0s" - SearchScrollTotal int `json:"search.scroll_total,string"` // completed scroll contexts on primaries & replicas - PriSearchScrollTotal int `json:"pri.search.scroll_total,string"` // completed scroll contexts on primaries - SearchThrottled bool `json:"search.throttled,string"` // indicates if the index is search throttled - SegmentsCount int `json:"segments.count,string"` // number of segments on primaries & replicas - PriSegmentsCount int `json:"pri.segments.count,string"` // number of segments on primaries - SegmentsMemory string `json:"segments.memory"` // memory used by segments on primaries & replicas, e.g. "1.3kb" - PriSegmentsMemory string `json:"pri.segments.memory"` // memory used by segments on primaries, e.g. "1.3kb" - SegmentsIndexWriterMemory string `json:"segments.index_writer_memory"` // memory used by index writer on primaries & replicas, e.g. "0b" - PriSegmentsIndexWriterMemory string `json:"pri.segments.index_writer_memory"` // memory used by index writer on primaries, e.g. "0b" - SegmentsVersionMapMemory string `json:"segments.version_map_memory"` // memory used by version map on primaries & replicas, e.g. "0b" - PriSegmentsVersionMapMemory string `json:"pri.segments.version_map_memory"` // memory used by version map on primaries, e.g. "0b" - SegmentsFixedBitsetMemory string `json:"segments.fixed_bitset_memory"` // memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields on primaries & replicas, e.g. "0b" - PriSegmentsFixedBitsetMemory string `json:"pri.segments.fixed_bitset_memory"` // memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields on primaries, e.g. "0b" - WarmerCurrent int `json:"warmer.current,string"` // current warmer ops on primaries & replicas - PriWarmerCurrent int `json:"pri.warmer.current,string"` // current warmer ops on primaries - WarmerTotal int `json:"warmer.total,string"` // total warmer ops on primaries & replicas - PriWarmerTotal int `json:"pri.warmer.total,string"` // total warmer ops on primaries - WarmerTotalTime string `json:"warmer.total_time"` // time spent in warmers on primaries & replicas, e.g. "47s" - PriWarmerTotalTime string `json:"pri.warmer.total_time"` // time spent in warmers on primaries, e.g. "47s" - SuggestCurrent int `json:"suggest.current,string"` // number of current suggest ops on primaries & replicas - PriSuggestCurrent int `json:"pri.suggest.current,string"` // number of current suggest ops on primaries - SuggestTime string `json:"suggest.time"` // time spend in suggest on primaries & replicas, "31s" - PriSuggestTime string `json:"pri.suggest.time"` // time spend in suggest on primaries, e.g. "31s" - SuggestTotal int `json:"suggest.total,string"` // number of suggest ops on primaries & replicas - PriSuggestTotal int `json:"pri.suggest.total,string"` // number of suggest ops on primaries - MemoryTotal string `json:"memory.total"` // total user memory on primaries & replicas, e.g. "1.5kb" - PriMemoryTotal string `json:"pri.memory.total"` // total user memory on primaries, e.g. "1.5kb" -} - -// catIndicesResponseRowAliasesMap holds the global map for columns aliases -// the map is used by CatIndicesService.buildURL -// for backwards compatibility some fields are able to have the same aliases -// that means that one alias can be translated to different columns (from different elastic versions) -// example for understanding: rto -> RefreshTotal, RefreshExternalTotal -var catIndicesResponseRowAliasesMap = map[string]string{ - "qce": "query_cache.evictions", - "searchFetchTime": "search.fetch_time", - "memoryTotal": "memory.total", - "requestCacheEvictions": "request_cache.evictions", - "ftt": "flush.total_time", - "iic": "indexing.index_current", - "mtt": "merges.total_time", - "scti": "search.scroll_time", - "searchScrollTime": "search.scroll_time", - "segmentsCount": "segments.count", - "getTotal": "get.total", - "sfti": "search.fetch_time", - "searchScrollCurrent": "search.scroll_current", - "svmm": "segments.version_map_memory", - "warmerTotalTime": "warmer.total_time", - "r": "rep", - "indexingIndexTime": "indexing.index_time", - "refreshTotal": "refresh.total,refresh.external_total", - "scc": "search.scroll_current", - "suggestTime": "suggest.time", - "idc": "indexing.delete_current", - "rti": "refresh.time,refresh.external_time", - "sfto": "search.fetch_total", - "completionSize": "completion.size", - "mt": "merges.total", - "segmentsVersionMapMemory": "segments.version_map_memory", - "rto": "refresh.total,refresh.external_total", - "id": "uuid", - "dd": "docs.deleted", - "docsDeleted": "docs.deleted", - "fielddataMemory": "fielddata.memory_size", - "getTime": "get.time", - "getExistsTime": "get.exists_time", - "mtd": "merges.total_docs", - "rli": "refresh.listeners", - "h": "health", - "cds": "creation.date.string", - "rcmc": "request_cache.miss_count", - "iif": "indexing.index_failed", - "warmerCurrent": "warmer.current", - "gti": "get.time", - "indexingIndexFailed": "indexing.index_failed", - "mts": "merges.total_size", - "sqti": "search.query_time", - "segmentsIndexWriterMemory": "segments.index_writer_memory", - "iiti": "indexing.index_time", - "iito": "indexing.index_total", - "cd": "creation.date", - "gc": "get.current", - "searchFetchTotal": "search.fetch_total", - "sqc": "search.query_current", - "segmentsMemory": "segments.memory", - "dc": "docs.count", - "qcm": "query_cache.memory_size", - "queryCacheMemory": "query_cache.memory_size", - "mergesTotalDocs": "merges.total_docs", - "searchOpenContexts": "search.open_contexts", - "shards.primary": "pri", - "cs": "completion.size", - "mergesTotalTIme": "merges.total_time", - "wtt": "warmer.total_time", - "mergesCurrentSize": "merges.current_size", - "mergesTotal": "merges.total", - "refreshTime": "refresh.time,refresh.external_time", - "wc": "warmer.current", - "p": "pri", - "idti": "indexing.delete_time", - "searchQueryCurrent": "search.query_current", - "warmerTotal": "warmer.total", - "suggestTotal": "suggest.total", - "tm": "memory.total", - "ss": "store.size", - "ft": "flush.total", - "getExistsTotal": "get.exists_total", - "scto": "search.scroll_total", - "s": "status", - "queryCacheEvictions": "query_cache.evictions", - "rce": "request_cache.evictions", - "geto": "get.exists_total", - "refreshListeners": "refresh.listeners", - "suto": "suggest.total", - "storeSize": "store.size", - "gmti": "get.missing_time", - "indexingIdexCurrent": "indexing.index_current", - "searchFetchCurrent": "search.fetch_current", - "idx": "index", - "fm": "fielddata.memory_size", - "geti": "get.exists_time", - "indexingDeleteCurrent": "indexing.delete_current", - "mergesCurrentDocs": "merges.current_docs", - "sth": "search.throttled", - "flushTotal": "flush.total", - "sfc": "search.fetch_current", - "wto": "warmer.total", - "suti": "suggest.time", - "shardsReplica": "rep", - "mergesCurrent": "merges.current", - "mcs": "merges.current_size", - "so": "search.open_contexts", - "i": "index", - "siwm": "segments.index_writer_memory", - "sfbm": "segments.fixed_bitset_memory", - "fe": "fielddata.evictions", - "requestCacheMissCount": "request_cache.miss_count", - "idto": "indexing.delete_total", - "mergesTotalSize": "merges.total_size", - "suc": "suggest.current", - "suggestCurrent": "suggest.current", - "flushTotalTime": "flush.total_time", - "getMissingTotal": "get.missing_total", - "sqto": "search.query_total", - "searchScrollTotal": "search.scroll_total", - "fixedBitsetMemory": "segments.fixed_bitset_memory", - "getMissingTime": "get.missing_time", - "indexingDeleteTotal": "indexing.delete_total", - "mcd": "merges.current_docs", - "docsCount": "docs.count", - "gto": "get.total", - "mc": "merges.current", - "fielddataEvictions": "fielddata.evictions", - "rcm": "request_cache.memory_size", - "requestCacheHitCount": "request_cache.hit_count", - "gmto": "get.missing_total", - "searchQueryTime": "search.query_time", - "shards.replica": "rep", - "requestCacheMemory": "request_cache.memory_size", - "rchc": "request_cache.hit_count", - "getCurrent": "get.current", - "indexingIndexTotal": "indexing.index_total", - "sc": "segments.count,segments.memory", - "shardsPrimary": "pri", - "indexingDeleteTime": "indexing.delete_time", - "searchQueryTotal": "search.query_total", -} diff --git a/vendor/github.com/olivere/elastic/v7/cat_shards.go b/vendor/github.com/olivere/elastic/v7/cat_shards.go deleted file mode 100644 index c2ccfc5..0000000 --- a/vendor/github.com/olivere/elastic/v7/cat_shards.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// CatShardsService returns the list of shards plus some additional -// information about them. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.6/cat-shards.html -// for details. -type CatShardsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - - index []string - bytes string // b, k, kb, m, mb, g, gb, t, tb, p, or pb - local *bool - masterTimeout string - columns []string - time string // d, h, m, s, ms, micros, or nanos - sort []string // list of columns for sort order - headers http.Header -} - -// NewCatShardsService creates a new CatShardsService. -func NewCatShardsService(client *Client) *CatShardsService { - return &CatShardsService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *CatShardsService) Pretty(pretty bool) *CatShardsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *CatShardsService) Human(human bool) *CatShardsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *CatShardsService) ErrorTrace(errorTrace bool) *CatShardsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *CatShardsService) FilterPath(filterPath ...string) *CatShardsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *CatShardsService) Header(name string, value string) *CatShardsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *CatShardsService) Headers(headers http.Header) *CatShardsService { - s.headers = headers - return s -} - -// Index is the name of the index to list (by default all indices are returned). -func (s *CatShardsService) Index(index ...string) *CatShardsService { - s.index = index - return s -} - -// Bytes represents the unit in which to display byte values. -// Valid values are: "b", "k", "kb", "m", "mb", "g", "gb", "t", "tb", "p" or "pb". -func (s *CatShardsService) Bytes(bytes string) *CatShardsService { - s.bytes = bytes - return s -} - -// Local indicates to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *CatShardsService) Local(local bool) *CatShardsService { - s.local = &local - return s -} - -// MasterTimeout is the explicit operation timeout for connection to master node. -func (s *CatShardsService) MasterTimeout(masterTimeout string) *CatShardsService { - s.masterTimeout = masterTimeout - return s -} - -// Columns to return in the response. -// -// To get a list of all possible columns to return, run the following command -// in your terminal: -// -// Example: -// curl 'http://localhost:9200/_cat/shards?help' -// -// You can use Columns("*") to return all possible columns. That might take -// a little longer than the default set of columns. -func (s *CatShardsService) Columns(columns ...string) *CatShardsService { - s.columns = columns - return s -} - -// Sort is a list of fields to sort by. -func (s *CatShardsService) Sort(fields ...string) *CatShardsService { - s.sort = fields - return s -} - -// Time specifies the way that time values are formatted with. -func (s *CatShardsService) Time(time string) *CatShardsService { - s.time = time - return s -} - -// buildURL builds the URL for the operation. -func (s *CatShardsService) buildURL() (string, url.Values, error) { - // Build URL - var ( - path string - err error - ) - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/_cat/shards/{index}", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_cat/shards" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{ - "format": []string{"json"}, // always returns as JSON - } - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.bytes != "" { - params.Set("bytes", s.bytes) - } - if s.time != "" { - params.Set("time", s.time) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if len(s.columns) > 0 { - // loop through all columns and apply alias if needed - for i, column := range s.columns { - if fullValueRaw, isAliased := catShardsResponseRowAliasesMap[column]; isAliased { - // alias can be translated to multiple fields, - // so if translated value contains a comma, than replace the first value - // and append the others - if strings.Contains(fullValueRaw, ",") { - fullValues := strings.Split(fullValueRaw, ",") - s.columns[i] = fullValues[0] - s.columns = append(s.columns, fullValues[1:]...) - } else { - s.columns[i] = fullValueRaw - } - } - } - params.Set("h", strings.Join(s.columns, ",")) - } - if len(s.sort) > 0 { - params.Set("s", strings.Join(s.sort, ",")) - } - return path, params, nil -} - -// Do executes the operation. -func (s *CatShardsService) Do(ctx context.Context) (CatShardsResponse, error) { - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret CatShardsResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a get request. - -// CatShardsResponse is the outcome of CatShardsService.Do. -type CatShardsResponse []CatShardsResponseRow - -// CatShardsResponseRow specifies the data returned for one index -// of a CatShardsResponse. Notice that not all of these fields might -// be filled; that depends on the number of columns chose in the -// request (see CatShardsService.Columns). -type CatShardsResponseRow struct { - Index string `json:"index"` // index name - UUID string `json:"uuid"` // index uuid - Shard int `json:"shard,string"` // shard number, e.g. 1 - Prirep string `json:"prirep"` // "r" for replica, "p" for primary - State string `json:"state"` // STARTED, INITIALIZING, RELOCATING, or UNASSIGNED - Docs int64 `json:"docs,string"` // number of documents, e.g. 142847 - Store string `json:"store"` // size, e.g. "40mb" - IP string `json:"ip"` // IP address - ID string `json:"id"` - Node string `json:"node"` // Node name - SyncID string `json:"sync_id"` - UnassignedReason string `json:"unassigned.reason"` - UnassignedAt string `json:"unassigned.at"` - UnassignedFor string `json:"unassigned.for"` - UnassignedDetails string `json:"unassigned.details"` - RecoverysourceType string `json:"recoverysource.type"` - CompletionSize string `json:"completion.size"` // size of completion on primaries & replicas - FielddataMemorySize string `json:"fielddata.memory_size"` // used fielddata cache on primaries & replicas - FielddataEvictions int `json:"fielddata.evictions,string"` // fielddata evictions on primaries & replicas - QueryCacheMemorySize string `json:"query_cache.memory_size"` // used query cache on primaries & replicas - QueryCacheEvictions int `json:"query_cache.evictions,string"` // query cache evictions on primaries & replicas - FlushTotal int `json:"flush.total,string"` // number of flushes on primaries & replicas - FlushTotalTime string `json:"flush.total_time"` // time spent in flush on primaries & replicas - GetCurrent int `json:"get.current,string"` // number of current get ops on primaries & replicas - GetTime string `json:"get.time"` // time spent in get on primaries & replicas - GetTotal int `json:"get.total,string"` // number of get ops on primaries & replicas - GetExistsTime string `json:"get.exists_time"` // time spent in successful gets on primaries & replicas - GetExistsTotal int `json:"get.exists_total,string"` // number of successful gets on primaries & replicas - GetMissingTime string `json:"get.missing_time"` // time spent in failed gets on primaries & replicas - GetMissingTotal int `json:"get.missing_total,string"` // number of failed gets on primaries & replicas - IndexingDeleteCurrent int `json:"indexing.delete_current,string"` // number of current deletions on primaries & replicas - IndexingDeleteTime string `json:"indexing.delete_time"` // time spent in deletions on primaries & replicas - IndexingDeleteTotal int `json:"indexing.delete_total,string"` // number of delete ops on primaries & replicas - IndexingIndexCurrent int `json:"indexing.index_current,string"` // number of current indexing on primaries & replicas - IndexingIndexTime string `json:"indexing.index_time"` // time spent in indexing on primaries & replicas - IndexingIndexTotal int `json:"indexing.index_total,string"` // number of index ops on primaries & replicas - IndexingIndexFailed int `json:"indexing.index_failed,string"` // number of failed indexing ops on primaries & replicas - MergesCurrent int `json:"merges.current,string"` // number of current merges on primaries & replicas - MergesCurrentDocs int `json:"merges.current_docs,string"` // number of current merging docs on primaries & replicas - MergesCurrentSize string `json:"merges.current_size"` // size of current merges on primaries & replicas - MergesTotal int `json:"merges.total,string"` // number of completed merge ops on primaries & replicas - MergesTotalDocs int `json:"merges.total_docs,string"` // docs merged on primaries & replicas - MergesTotalSize string `json:"merges.total_size"` // size merged on primaries & replicas - MergesTotalTime string `json:"merges.total_time"` // time spent in merges on primaries & replicas - RefreshTotal int `json:"refresh.total,string"` // total refreshes on primaries & replicas - RefreshExternalTotal int `json:"refresh.external_total,string"` // total external refreshes on primaries & replicas - RefreshTime string `json:"refresh.time"` // time spent in refreshes on primaries & replicas - RefreshExternalTime string `json:"refresh.external_time"` // external time spent in refreshes on primaries & replicas - RefreshListeners int `json:"refresh.listeners,string"` // number of pending refresh listeners on primaries & replicas - SearchFetchCurrent int `json:"search.fetch_current,string"` // current fetch phase ops on primaries & replicas - SearchFetchTime string `json:"search.fetch_time"` // time spent in fetch phase on primaries & replicas - SearchFetchTotal int `json:"search.fetch_total,string"` // total fetch ops on primaries & replicas - SearchOpenContexts int `json:"search.open_contexts,string"` // open search contexts on primaries & replicas - SearchQueryCurrent int `json:"search.query_current,string"` // current query phase ops on primaries & replicas - SearchQueryTime string `json:"search.query_time"` // time spent in query phase on primaries & replicas, e.g. "0s" - SearchQueryTotal int `json:"search.query_total,string"` // total query phase ops on primaries & replicas - SearchScrollCurrent int `json:"search.scroll_current,string"` // open scroll contexts on primaries & replicas - SearchScrollTime string `json:"search.scroll_time"` // time scroll contexts held open on primaries & replicas, e.g. "0s" - SearchScrollTotal int `json:"search.scroll_total,string"` // completed scroll contexts on primaries & replicas - SearchThrottled bool `json:"search.throttled,string"` // indicates if the index is search throttled - SegmentsCount int `json:"segments.count,string"` // number of segments on primaries & replicas - SegmentsMemory string `json:"segments.memory"` // memory used by segments on primaries & replicas, e.g. "1.3kb" - SegmentsIndexWriterMemory string `json:"segments.index_writer_memory"` // memory used by index writer on primaries & replicas, e.g. "0b" - SegmentsVersionMapMemory string `json:"segments.version_map_memory"` // memory used by version map on primaries & replicas, e.g. "0b" - SegmentsFixedBitsetMemory string `json:"segments.fixed_bitset_memory"` // memory used by fixed bit sets for nested object field types and type filters for types referred in _parent fields on primaries & replicas, e.g. "0b" - SeqNoMax int `json:"seq_no.max,string"` - SeqNoLocalCheckpoint int `json:"seq_no.local_checkpoint,string"` - SeqNoGlobalCheckpoint int `json:"seq_no.global_checkpoint,string"` - WarmerCurrent int `json:"warmer.current,string"` // current warmer ops on primaries & replicas - WarmerTotal int `json:"warmer.total,string"` // total warmer ops on primaries & replicas - WarmerTotalTime string `json:"warmer.total_time"` // time spent in warmers on primaries & replicas, e.g. "47s" -} - -// catShardsResponseRowAliasesMap holds the global map for columns aliases -// the map is used by CatShardsService.buildURL. -// For backwards compatibility some fields are able to have the same aliases -// that means that one alias can be translated to different columns (from different elastic versions) -// example for understanding: rto -> RefreshTotal, RefreshExternalTotal -var catShardsResponseRowAliasesMap = map[string]string{ - "sync_id": "sync_id", - "ur": "unassigned.reason", - "ua": "unassigned.at", - "uf": "unassigned.for", - "ud": "unassigned.details", - "rs": "recoverysource.type", - "cs": "completion.size", - "fm": "fielddata.memory_size", - "fe": "fielddata.evictions", - "qcm": "query_cache.memory_size", - "qce": "query_cache.evictions", - "ft": "flush.total", - "ftt": "flush.total_time", - "gc": "get.current", - "gti": "get.time", - "gto": "get.total", - "geti": "get.exists_time", - "geto": "get.exists_total", - "gmti": "get.missing_time", - "gmto": "get.missing_total", - "idc": "indexing.delete_current", - "idti": "indexing.delete_time", - "idto": "indexing.delete_total", - "iic": "indexing.index_current", - "iiti": "indexing.index_time", - "iito": "indexing.index_total", - "iif": "indexing.index_failed", - "mc": "merges.current", - "mcd": "merges.current_docs", - "mcs": "merges.current_size", - "mt": "merges.total", - "mtd": "merges.total_docs", - "mts": "merges.total_size", - "mtt": "merges.total_time", - "rto": "refresh.total", - "rti": "refresh.time", - // "rto": "refresh.external_total", - // "rti": "refresh.external_time", - "rli": "refresh.listeners", - "sfc": "search.fetch_current", - "sfti": "search.fetch_time", - "sfto": "search.fetch_total", - "so": "search.open_contexts", - "sqc": "search.query_current", - "sqti": "search.query_time", - "sqto": "search.query_total", - "scc": "search.scroll_current", - "scti": "search.scroll_time", - "scto": "search.scroll_total", - "sc": "segments.count", - "sm": "segments.memory", - "siwm": "segments.index_writer_memory", - "svmm": "segments.version_map_memory", - "sfbm": "segments.fixed_bitset_memory", - "sqm": "seq_no.max", - "sql": "seq_no.local_checkpoint", - "sqg": "seq_no.global_checkpoint", - "wc": "warmer.current", - "wto": "warmer.total", - "wtt": "warmer.total_time", -} diff --git a/vendor/github.com/olivere/elastic/v7/clear_scroll.go b/vendor/github.com/olivere/elastic/v7/clear_scroll.go deleted file mode 100644 index a71cad6..0000000 --- a/vendor/github.com/olivere/elastic/v7/clear_scroll.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" -) - -// ClearScrollService clears one or more scroll contexts by their ids. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-scroll.html#_clear_scroll_api -// for details. -type ClearScrollService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - scrollId []string -} - -// NewClearScrollService creates a new ClearScrollService. -func NewClearScrollService(client *Client) *ClearScrollService { - return &ClearScrollService{ - client: client, - scrollId: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ClearScrollService) Pretty(pretty bool) *ClearScrollService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ClearScrollService) Human(human bool) *ClearScrollService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ClearScrollService) ErrorTrace(errorTrace bool) *ClearScrollService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ClearScrollService) FilterPath(filterPath ...string) *ClearScrollService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ClearScrollService) Header(name string, value string) *ClearScrollService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ClearScrollService) Headers(headers http.Header) *ClearScrollService { - s.headers = headers - return s -} - -// ScrollId is a list of scroll IDs to clear. -// Use _all to clear all search contexts. -func (s *ClearScrollService) ScrollId(scrollIds ...string) *ClearScrollService { - s.scrollId = append(s.scrollId, scrollIds...) - return s -} - -// buildURL builds the URL for the operation. -func (s *ClearScrollService) buildURL() (string, url.Values, error) { - // Build URL - path := "/_search/scroll/" - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClearScrollService) Validate() error { - var invalid []string - if len(s.scrollId) == 0 { - invalid = append(invalid, "ScrollId") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *ClearScrollService) Do(ctx context.Context) (*ClearScrollResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - body := map[string][]string{ - "scroll_id": s.scrollId, - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ClearScrollResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ClearScrollResponse is the response of ClearScrollService.Do. -type ClearScrollResponse struct { - Succeeded bool `json:"succeeded,omitempty"` - NumFreed int `json:"num_freed,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/client.go b/vendor/github.com/olivere/elastic/v7/client.go deleted file mode 100644 index ca50487..0000000 --- a/vendor/github.com/olivere/elastic/v7/client.go +++ /dev/null @@ -1,2108 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "log" - "net/http" - "net/http/httputil" - "net/url" - "os" - "strings" - "sync" - "time" - - "github.com/pkg/errors" - - "github.com/olivere/elastic/v7/config" -) - -const ( - // Version is the current version of Elastic. - Version = "7.0.14" - - // DefaultURL is the default endpoint of Elasticsearch on the local machine. - // It is used e.g. when initializing a new Client without a specific URL. - DefaultURL = "http://127.0.0.1:9200" - - // DefaultScheme is the default protocol scheme to use when sniffing - // the Elasticsearch cluster. - DefaultScheme = "http" - - // DefaultHealthcheckEnabled specifies if healthchecks are enabled by default. - DefaultHealthcheckEnabled = true - - // DefaultHealthcheckTimeoutStartup is the time the healthcheck waits - // for a response from Elasticsearch on startup, i.e. when creating a - // client. After the client is started, a shorter timeout is commonly used - // (its default is specified in DefaultHealthcheckTimeout). - DefaultHealthcheckTimeoutStartup = 5 * time.Second - - // DefaultHealthcheckTimeout specifies the time a running client waits for - // a response from Elasticsearch. Notice that the healthcheck timeout - // when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup). - DefaultHealthcheckTimeout = 1 * time.Second - - // DefaultHealthcheckInterval is the default interval between - // two health checks of the nodes in the cluster. - DefaultHealthcheckInterval = 60 * time.Second - - // DefaultSnifferEnabled specifies if the sniffer is enabled by default. - DefaultSnifferEnabled = true - - // DefaultSnifferInterval is the interval between two sniffing procedures, - // i.e. the lookup of all nodes in the cluster and their addition/removal - // from the list of actual connections. - DefaultSnifferInterval = 15 * time.Minute - - // DefaultSnifferTimeoutStartup is the default timeout for the sniffing - // process that is initiated while creating a new client. For subsequent - // sniffing processes, DefaultSnifferTimeout is used (by default). - DefaultSnifferTimeoutStartup = 5 * time.Second - - // DefaultSnifferTimeout is the default timeout after which the - // sniffing process times out. Notice that for the initial sniffing - // process, DefaultSnifferTimeoutStartup is used. - DefaultSnifferTimeout = 2 * time.Second - - // DefaultSendGetBodyAs is the HTTP method to use when elastic is sending - // a GET request with a body. - DefaultSendGetBodyAs = "GET" - - // DefaultGzipEnabled specifies if gzip compression is enabled by default. - DefaultGzipEnabled = false - - // off is used to disable timeouts. - off = -1 * time.Second -) - -var ( - // ErrNoClient is raised when no Elasticsearch node is available. - ErrNoClient = errors.New("no Elasticsearch node available") - - // ErrRetry is raised when a request cannot be executed after the configured - // number of retries. - ErrRetry = errors.New("cannot connect after several retries") - - // ErrTimeout is raised when a request timed out, e.g. when WaitForStatus - // didn't return in time. - ErrTimeout = errors.New("timeout") - - // noRetries is a retrier that does not retry. - noRetries = NewStopRetrier() - - // noDeprecationLog is a no-op for logging deprecations. - noDeprecationLog = func(*http.Request, *http.Response) {} -) - -// Doer is an interface to perform HTTP requests. -// It can be used for mocking. -type Doer interface { - Do(*http.Request) (*http.Response, error) -} - -// ClientOptionFunc is a function that configures a Client. -// It is used in NewClient. -type ClientOptionFunc func(*Client) error - -// Client is an Elasticsearch client. Create one by calling NewClient. -type Client struct { - c Doer // e.g. a net/*http.Client to use for requests - - connsMu sync.RWMutex // connsMu guards the next block - conns []*conn // all connections - cindex int // index into conns - - mu sync.RWMutex // guards the next block - urls []string // set of URLs passed initially to the client - running bool // true if the client's background processes are running - errorlog Logger // error log for critical messages - infolog Logger // information log for e.g. response times - tracelog Logger // trace log for debugging - deprecationlog func(*http.Request, *http.Response) - scheme string // http or https - healthcheckEnabled bool // healthchecks enabled or disabled - healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup - healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch - healthcheckInterval time.Duration // interval between healthchecks - healthcheckStop chan bool // notify healthchecker to stop, and notify back - snifferEnabled bool // sniffer enabled or disabled - snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup - snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API - snifferInterval time.Duration // interval between sniffing - snifferCallback SnifferCallback // callback to modify the sniffing decision - snifferStop chan bool // notify sniffer to stop, and notify back - decoder Decoder // used to decode data sent from Elasticsearch - basicAuth bool // indicates whether to send HTTP Basic Auth credentials - basicAuthUsername string // username for HTTP Basic Auth - basicAuthPassword string // password for HTTP Basic Auth - sendGetBodyAs string // override for when sending a GET with a body - gzipEnabled bool // gzip compression enabled or disabled (default) - requiredPlugins []string // list of required plugins - retrier Retrier // strategy for retries - headers http.Header // a list of default headers to add to each request -} - -// NewClient creates a new client to work with Elasticsearch. -// -// NewClient, by default, is meant to be long-lived and shared across -// your application. If you need a short-lived client, e.g. for request-scope, -// consider using NewSimpleClient instead. -// -// The caller can configure the new client by passing configuration options -// to the func. -// -// Example: -// -// client, err := elastic.NewClient( -// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"), -// elastic.SetBasicAuth("user", "secret")) -// -// If no URL is configured, Elastic uses DefaultURL by default. -// -// If the sniffer is enabled (the default), the new client then sniffes -// the cluster via the Nodes Info API -// (see https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-nodes-info.html#cluster-nodes-info). -// It uses the URLs specified by the caller. The caller is responsible -// to only pass a list of URLs of nodes that belong to the same cluster. -// This sniffing process is run on startup and periodically. -// Use SnifferInterval to set the interval between two sniffs (default is -// 15 minutes). In other words: By default, the client will find new nodes -// in the cluster and remove those that are no longer available every -// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient. -// -// The list of nodes found in the sniffing process will be used to make -// connections to the REST API of Elasticsearch. These nodes are also -// periodically checked in a shorter time frame. This process is called -// a health check. By default, a health check is done every 60 seconds. -// You can set a shorter or longer interval by SetHealthcheckInterval. -// Disabling health checks is not recommended, but can be done by -// SetHealthcheck(false). -// -// Connections are automatically marked as dead or healthy while -// making requests to Elasticsearch. When a request fails, Elastic will -// call into the Retry strategy which can be specified with SetRetry. -// The Retry strategy is also responsible for handling backoff i.e. the time -// to wait before starting the next request. There are various standard -// backoff implementations, e.g. ExponentialBackoff or SimpleBackoff. -// Retries are disabled by default. -// -// If no HttpClient is configured, then http.DefaultClient is used. -// You can use your own http.Client with some http.Transport for -// advanced scenarios. -// -// An error is also returned when some configuration option is invalid or -// the new client cannot sniff the cluster (if enabled). -func NewClient(options ...ClientOptionFunc) (*Client, error) { - return DialContext(context.Background(), options...) -} - -// NewClientFromConfig initializes a client from a configuration. -func NewClientFromConfig(cfg *config.Config) (*Client, error) { - options, err := configToOptions(cfg) - if err != nil { - return nil, err - } - return DialContext(context.Background(), options...) -} - -// NewSimpleClient creates a new short-lived Client that can be used in -// use cases where you need e.g. one client per request. -// -// While NewClient by default sets up e.g. periodic health checks -// and sniffing for new nodes in separate goroutines, NewSimpleClient does -// not and is meant as a simple replacement where you don't need all the -// heavy lifting of NewClient. -// -// NewSimpleClient does the following by default: First, all health checks -// are disabled, including timeouts and periodic checks. Second, sniffing -// is disabled, including timeouts and periodic checks. The number of retries -// is set to 1. NewSimpleClient also does not start any goroutines. -// -// Notice that you can still override settings by passing additional options, -// just like with NewClient. -func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) { - c := &Client{ - c: http.DefaultClient, - conns: make([]*conn, 0), - cindex: -1, - scheme: DefaultScheme, - decoder: &DefaultDecoder{}, - healthcheckEnabled: false, - healthcheckTimeoutStartup: off, - healthcheckTimeout: off, - healthcheckInterval: off, - healthcheckStop: make(chan bool), - snifferEnabled: false, - snifferTimeoutStartup: off, - snifferTimeout: off, - snifferInterval: off, - snifferCallback: nopSnifferCallback, - snifferStop: make(chan bool), - sendGetBodyAs: DefaultSendGetBodyAs, - gzipEnabled: DefaultGzipEnabled, - retrier: noRetries, // no retries by default - deprecationlog: noDeprecationLog, - } - - // Run the options on it - for _, option := range options { - if err := option(c); err != nil { - return nil, err - } - } - - // Use a default URL and normalize them - if len(c.urls) == 0 { - c.urls = []string{DefaultURL} - } - c.urls = canonicalize(c.urls...) - - // If the URLs have auth info, use them here as an alternative to SetBasicAuth - if !c.basicAuth { - for _, urlStr := range c.urls { - u, err := url.Parse(urlStr) - if err == nil && u.User != nil { - c.basicAuth = true - c.basicAuthUsername = u.User.Username() - c.basicAuthPassword, _ = u.User.Password() - break - } - } - } - - for _, url := range c.urls { - c.conns = append(c.conns, newConn(url, url)) - } - - // Ensure that we have at least one connection available - if err := c.mustActiveConn(); err != nil { - return nil, err - } - - // Check the required plugins - for _, plugin := range c.requiredPlugins { - found, err := c.HasPlugin(plugin) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("elastic: plugin %s not found", plugin) - } - } - - c.mu.Lock() - c.running = true - c.mu.Unlock() - - return c, nil -} - -// Dial will call DialContext with a background context. -func Dial(options ...ClientOptionFunc) (*Client, error) { - return DialContext(context.Background(), options...) -} - -// DialContext will connect to Elasticsearch, just like NewClient does. -// -// The context is honoured in terms of e.g. cancellation. -func DialContext(ctx context.Context, options ...ClientOptionFunc) (*Client, error) { - // Set up the client - c := &Client{ - c: http.DefaultClient, - conns: make([]*conn, 0), - cindex: -1, - scheme: DefaultScheme, - decoder: &DefaultDecoder{}, - healthcheckEnabled: DefaultHealthcheckEnabled, - healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup, - healthcheckTimeout: DefaultHealthcheckTimeout, - healthcheckInterval: DefaultHealthcheckInterval, - healthcheckStop: make(chan bool), - snifferEnabled: DefaultSnifferEnabled, - snifferTimeoutStartup: DefaultSnifferTimeoutStartup, - snifferTimeout: DefaultSnifferTimeout, - snifferInterval: DefaultSnifferInterval, - snifferCallback: nopSnifferCallback, - snifferStop: make(chan bool), - sendGetBodyAs: DefaultSendGetBodyAs, - gzipEnabled: DefaultGzipEnabled, - retrier: noRetries, // no retries by default - deprecationlog: noDeprecationLog, - } - - // Run the options on it - for _, option := range options { - if err := option(c); err != nil { - return nil, err - } - } - - // Use a default URL and normalize them - if len(c.urls) == 0 { - c.urls = []string{DefaultURL} - } - c.urls = canonicalize(c.urls...) - - // If the URLs have auth info, use them here as an alternative to SetBasicAuth - if !c.basicAuth { - for _, urlStr := range c.urls { - u, err := url.Parse(urlStr) - if err == nil && u.User != nil { - c.basicAuth = true - c.basicAuthUsername = u.User.Username() - c.basicAuthPassword, _ = u.User.Password() - break - } - } - } - - // Check if we can make a request to any of the specified URLs - if c.healthcheckEnabled { - if err := c.startupHealthcheck(ctx, c.healthcheckTimeoutStartup); err != nil { - return nil, err - } - } - - if c.snifferEnabled { - // Sniff the cluster initially - if err := c.sniff(ctx, c.snifferTimeoutStartup); err != nil { - return nil, err - } - } else { - // Do not sniff the cluster initially. Use the provided URLs instead. - for _, url := range c.urls { - c.conns = append(c.conns, newConn(url, url)) - } - } - - if c.healthcheckEnabled { - // Perform an initial health check - c.healthcheck(ctx, c.healthcheckTimeoutStartup, true) - } - // Ensure that we have at least one connection available - if err := c.mustActiveConn(); err != nil { - return nil, err - } - - // Check the required plugins - for _, plugin := range c.requiredPlugins { - found, err := c.HasPlugin(plugin) - if err != nil { - return nil, err - } - if !found { - return nil, fmt.Errorf("elastic: plugin %s not found", plugin) - } - } - - if c.snifferEnabled { - go c.sniffer() // periodically update cluster information - } - if c.healthcheckEnabled { - go c.healthchecker() // start goroutine periodically ping all nodes of the cluster - } - - c.mu.Lock() - c.running = true - c.mu.Unlock() - - return c, nil -} - -// DialWithConfig will use the configuration settings parsed from config package -// to connect to Elasticsearch. -// -// The context is honoured in terms of e.g. cancellation. -func DialWithConfig(ctx context.Context, cfg *config.Config) (*Client, error) { - options, err := configToOptions(cfg) - if err != nil { - return nil, err - } - return DialContext(ctx, options...) -} - -func configToOptions(cfg *config.Config) ([]ClientOptionFunc, error) { - var options []ClientOptionFunc - if cfg != nil { - if cfg.URL != "" { - options = append(options, SetURL(cfg.URL)) - } - if cfg.Errorlog != "" { - f, err := os.OpenFile(cfg.Errorlog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, errors.Wrap(err, "unable to initialize error log") - } - l := log.New(f, "", 0) - options = append(options, SetErrorLog(l)) - } - if cfg.Tracelog != "" { - f, err := os.OpenFile(cfg.Tracelog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, errors.Wrap(err, "unable to initialize trace log") - } - l := log.New(f, "", 0) - options = append(options, SetTraceLog(l)) - } - if cfg.Infolog != "" { - f, err := os.OpenFile(cfg.Infolog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, errors.Wrap(err, "unable to initialize info log") - } - l := log.New(f, "", 0) - options = append(options, SetInfoLog(l)) - } - if cfg.Username != "" || cfg.Password != "" { - options = append(options, SetBasicAuth(cfg.Username, cfg.Password)) - } - if cfg.Sniff != nil { - options = append(options, SetSniff(*cfg.Sniff)) - } - if cfg.Healthcheck != nil { - options = append(options, SetHealthcheck(*cfg.Healthcheck)) - } - } - return options, nil -} - -// SetHttpClient can be used to specify the http.Client to use when making -// HTTP requests to Elasticsearch. -func SetHttpClient(httpClient Doer) ClientOptionFunc { - return func(c *Client) error { - if httpClient != nil { - c.c = httpClient - } else { - c.c = http.DefaultClient - } - return nil - } -} - -// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to -// use when making HTTP requests to Elasticsearch. -func SetBasicAuth(username, password string) ClientOptionFunc { - return func(c *Client) error { - c.basicAuthUsername = username - c.basicAuthPassword = password - c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != "" - return nil - } -} - -// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that -// when sniffing is enabled, these URLs are used to initially sniff the -// cluster on startup. -func SetURL(urls ...string) ClientOptionFunc { - return func(c *Client) error { - switch len(urls) { - case 0: - c.urls = []string{DefaultURL} - default: - c.urls = urls - } - return nil - } -} - -// SetScheme sets the HTTP scheme to look for when sniffing (http or https). -// This is http by default. -func SetScheme(scheme string) ClientOptionFunc { - return func(c *Client) error { - c.scheme = scheme - return nil - } -} - -// SetSniff enables or disables the sniffer (enabled by default). -func SetSniff(enabled bool) ClientOptionFunc { - return func(c *Client) error { - c.snifferEnabled = enabled - return nil - } -} - -// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used -// when creating a new client. The default is 5 seconds. Notice that the -// timeout being used for subsequent sniffing processes is set with -// SetSnifferTimeout. -func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.snifferTimeoutStartup = timeout - return nil - } -} - -// SetSnifferTimeout sets the timeout for the sniffer that finds the -// nodes in a cluster. The default is 2 seconds. Notice that the timeout -// used when creating a new client on startup is usually greater and can -// be set with SetSnifferTimeoutStartup. -func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.snifferTimeout = timeout - return nil - } -} - -// SetSnifferInterval sets the interval between two sniffing processes. -// The default interval is 15 minutes. -func SetSnifferInterval(interval time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.snifferInterval = interval - return nil - } -} - -// SnifferCallback defines the protocol for sniffing decisions. -type SnifferCallback func(*NodesInfoNode) bool - -// nopSnifferCallback is the default sniffer callback: It accepts -// all nodes the sniffer finds. -var nopSnifferCallback = func(*NodesInfoNode) bool { return true } - -// SetSnifferCallback allows the caller to modify sniffer decisions. -// When setting the callback, the given SnifferCallback is called for -// each (healthy) node found during the sniffing process. -// If the callback returns false, the node is ignored: No requests -// are routed to it. -func SetSnifferCallback(f SnifferCallback) ClientOptionFunc { - return func(c *Client) error { - if f != nil { - c.snifferCallback = f - } - return nil - } -} - -// SetHealthcheck enables or disables healthchecks (enabled by default). -func SetHealthcheck(enabled bool) ClientOptionFunc { - return func(c *Client) error { - c.healthcheckEnabled = enabled - return nil - } -} - -// SetHealthcheckTimeoutStartup sets the timeout for the initial health check. -// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup). -// Notice that timeouts for subsequent health checks can be modified with -// SetHealthcheckTimeout. -func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.healthcheckTimeoutStartup = timeout - return nil - } -} - -// SetHealthcheckTimeout sets the timeout for periodic health checks. -// The default timeout is 1 second (see DefaultHealthcheckTimeout). -// Notice that a different (usually larger) timeout is used for the initial -// healthcheck, which is initiated while creating a new client. -// The startup timeout can be modified with SetHealthcheckTimeoutStartup. -func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.healthcheckTimeout = timeout - return nil - } -} - -// SetHealthcheckInterval sets the interval between two health checks. -// The default interval is 60 seconds. -func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc { - return func(c *Client) error { - c.healthcheckInterval = interval - return nil - } -} - -// SetMaxRetries sets the maximum number of retries before giving up when -// performing a HTTP request to Elasticsearch. -// -// Deprecated: Replace with a Retry implementation. -func SetMaxRetries(maxRetries int) ClientOptionFunc { - return func(c *Client) error { - if maxRetries < 0 { - return errors.New("MaxRetries must be greater than or equal to 0") - } else if maxRetries == 0 { - c.retrier = noRetries - } else { - // Create a Retrier that will wait for 100ms (+/- jitter) between requests. - // This resembles the old behavior with maxRetries. - ticks := make([]int, maxRetries) - for i := 0; i < len(ticks); i++ { - ticks[i] = 100 - } - backoff := NewSimpleBackoff(ticks...) - c.retrier = NewBackoffRetrier(backoff) - } - return nil - } -} - -// SetGzip enables or disables gzip compression (disabled by default). -func SetGzip(enabled bool) ClientOptionFunc { - return func(c *Client) error { - c.gzipEnabled = enabled - return nil - } -} - -// SetDecoder sets the Decoder to use when decoding data from Elasticsearch. -// DefaultDecoder is used by default. -func SetDecoder(decoder Decoder) ClientOptionFunc { - return func(c *Client) error { - if decoder != nil { - c.decoder = decoder - } else { - c.decoder = &DefaultDecoder{} - } - return nil - } -} - -// SetRequiredPlugins can be used to indicate that some plugins are required -// before a Client will be created. -func SetRequiredPlugins(plugins ...string) ClientOptionFunc { - return func(c *Client) error { - if c.requiredPlugins == nil { - c.requiredPlugins = make([]string, 0) - } - c.requiredPlugins = append(c.requiredPlugins, plugins...) - return nil - } -} - -// SetErrorLog sets the logger for critical messages like nodes joining -// or leaving the cluster or failing requests. It is nil by default. -func SetErrorLog(logger Logger) ClientOptionFunc { - return func(c *Client) error { - c.errorlog = logger - return nil - } -} - -// SetInfoLog sets the logger for informational messages, e.g. requests -// and their response times. It is nil by default. -func SetInfoLog(logger Logger) ClientOptionFunc { - return func(c *Client) error { - c.infolog = logger - return nil - } -} - -// SetTraceLog specifies the log.Logger to use for output of HTTP requests -// and responses which is helpful during debugging. It is nil by default. -func SetTraceLog(logger Logger) ClientOptionFunc { - return func(c *Client) error { - c.tracelog = logger - return nil - } -} - -// SetSendGetBodyAs specifies the HTTP method to use when sending a GET request -// with a body. It is GET by default. -func SetSendGetBodyAs(httpMethod string) ClientOptionFunc { - return func(c *Client) error { - c.sendGetBodyAs = httpMethod - return nil - } -} - -// SetRetrier specifies the retry strategy that handles errors during -// HTTP request/response with Elasticsearch. -func SetRetrier(retrier Retrier) ClientOptionFunc { - return func(c *Client) error { - if retrier == nil { - retrier = noRetries // no retries by default - } - c.retrier = retrier - return nil - } -} - -// SetHeaders adds a list of default HTTP headers that will be added to -// each requests executed by PerformRequest. -func SetHeaders(headers http.Header) ClientOptionFunc { - return func(c *Client) error { - c.headers = headers - return nil - } -} - -// String returns a string representation of the client status. -func (c *Client) String() string { - c.connsMu.Lock() - conns := c.conns - c.connsMu.Unlock() - - var buf bytes.Buffer - for i, conn := range conns { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(conn.String()) - } - return buf.String() -} - -// IsRunning returns true if the background processes of the client are -// running, false otherwise. -func (c *Client) IsRunning() bool { - c.mu.RLock() - defer c.mu.RUnlock() - return c.running -} - -// Start starts the background processes like sniffing the cluster and -// periodic health checks. You don't need to run Start when creating a -// client with NewClient; the background processes are run by default. -// -// If the background processes are already running, this is a no-op. -func (c *Client) Start() { - c.mu.RLock() - if c.running { - c.mu.RUnlock() - return - } - c.mu.RUnlock() - - if c.snifferEnabled { - go c.sniffer() - } - if c.healthcheckEnabled { - go c.healthchecker() - } - - c.mu.Lock() - c.running = true - c.mu.Unlock() - - c.infof("elastic: client started") -} - -// Stop stops the background processes that the client is running, -// i.e. sniffing the cluster periodically and running health checks -// on the nodes. -// -// If the background processes are not running, this is a no-op. -func (c *Client) Stop() { - c.mu.RLock() - if !c.running { - c.mu.RUnlock() - return - } - c.mu.RUnlock() - - if c.healthcheckEnabled { - c.healthcheckStop <- true - <-c.healthcheckStop - } - - if c.snifferEnabled { - c.snifferStop <- true - <-c.snifferStop - } - - c.mu.Lock() - c.running = false - c.mu.Unlock() - - c.infof("elastic: client stopped") -} - -// errorf logs to the error log. -func (c *Client) errorf(format string, args ...interface{}) { - if c.errorlog != nil { - c.errorlog.Printf(format, args...) - } -} - -// infof logs informational messages. -func (c *Client) infof(format string, args ...interface{}) { - if c.infolog != nil { - c.infolog.Printf(format, args...) - } -} - -// tracef logs to the trace log. -func (c *Client) tracef(format string, args ...interface{}) { - if c.tracelog != nil { - c.tracelog.Printf(format, args...) - } -} - -// dumpRequest dumps the given HTTP request to the trace log. -func (c *Client) dumpRequest(r *http.Request) { - if c.tracelog != nil { - out, err := httputil.DumpRequestOut(r, true) - if err == nil { - c.tracef("%s\n", string(out)) - } - } -} - -// dumpResponse dumps the given HTTP response to the trace log. -func (c *Client) dumpResponse(resp *http.Response) { - if c.tracelog != nil { - out, err := httputil.DumpResponse(resp, true) - if err == nil { - c.tracef("%s\n", string(out)) - } - } -} - -// sniffer periodically runs sniff. -func (c *Client) sniffer() { - c.mu.RLock() - timeout := c.snifferTimeout - interval := c.snifferInterval - c.mu.RUnlock() - - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-c.snifferStop: - // we are asked to stop, so we signal back that we're stopping now - c.snifferStop <- true - return - case <-ticker.C: - c.sniff(context.Background(), timeout) - } - } -} - -// sniff uses the Node Info API to return the list of nodes in the cluster. -// It uses the list of URLs passed on startup plus the list of URLs found -// by the preceding sniffing process (if sniffing is enabled). -// -// If sniffing is disabled, this is a no-op. -func (c *Client) sniff(parentCtx context.Context, timeout time.Duration) error { - c.mu.RLock() - if !c.snifferEnabled { - c.mu.RUnlock() - return nil - } - - // Use all available URLs provided to sniff the cluster. - var urls []string - urlsMap := make(map[string]bool) - - // Add all URLs provided on startup - for _, url := range c.urls { - urlsMap[url] = true - urls = append(urls, url) - } - c.mu.RUnlock() - - // Add all URLs found by sniffing - c.connsMu.RLock() - for _, conn := range c.conns { - if !conn.IsDead() { - url := conn.URL() - if _, found := urlsMap[url]; !found { - urls = append(urls, url) - } - } - } - c.connsMu.RUnlock() - - if len(urls) == 0 { - return errors.Wrap(ErrNoClient, "no URLs found") - } - - // Start sniffing on all found URLs - ch := make(chan []*conn, len(urls)) - - ctx, cancel := context.WithTimeout(parentCtx, timeout) - defer cancel() - - for _, url := range urls { - go func(url string) { ch <- c.sniffNode(ctx, url) }(url) - } - - // Wait for the results to come back, or the process times out. - for { - select { - case conns := <-ch: - if len(conns) > 0 { - c.updateConns(conns) - return nil - } - case <-ctx.Done(): - if err := ctx.Err(); err != nil { - switch { - case IsContextErr(err): - return err - } - return errors.Wrapf(ErrNoClient, "sniff timeout: %v", err) - } - // We get here if no cluster responds in time - return errors.Wrap(ErrNoClient, "sniff timeout") - } - } -} - -// sniffNode sniffs a single node. This method is run as a goroutine -// in sniff. If successful, it returns the list of node URLs extracted -// from the result of calling Nodes Info API. Otherwise, an empty array -// is returned. -func (c *Client) sniffNode(ctx context.Context, url string) []*conn { - var nodes []*conn - - // Call the Nodes Info API at /_nodes/http - req, err := NewRequest("GET", url+"/_nodes/http") - if err != nil { - return nodes - } - - c.mu.RLock() - if c.basicAuth { - req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword) - } - c.mu.RUnlock() - - res, err := c.c.Do((*http.Request)(req).WithContext(ctx)) - if err != nil { - return nodes - } - defer res.Body.Close() - - var info NodesInfoResponse - if err := json.NewDecoder(res.Body).Decode(&info); err == nil { - if len(info.Nodes) > 0 { - for nodeID, node := range info.Nodes { - if c.snifferCallback(node) { - if node.HTTP != nil && len(node.HTTP.PublishAddress) > 0 { - url := c.extractHostname(c.scheme, node.HTTP.PublishAddress) - if url != "" { - nodes = append(nodes, newConn(nodeID, url)) - } - } - } - } - } - } - return nodes -} - -// extractHostname returns the URL from the http.publish_address setting. -func (c *Client) extractHostname(scheme, address string) string { - var ( - host string - port string - - addrs = strings.Split(address, "/") - ports = strings.Split(address, ":") - ) - - if len(addrs) > 1 { - host = addrs[0] - } else { - host = strings.Split(addrs[0], ":")[0] - } - port = ports[len(ports)-1] - - return fmt.Sprintf("%s://%s:%s", scheme, host, port) -} - -// updateConns updates the clients' connections with new information -// gather by a sniff operation. -func (c *Client) updateConns(conns []*conn) { - c.connsMu.Lock() - - // Build up new connections: - // If we find an existing connection, use that (including no. of failures etc.). - // If we find a new connection, add it. - var newConns []*conn - for _, conn := range conns { - var found bool - for _, oldConn := range c.conns { - // Notice that e.g. in a Kubernetes cluster the NodeID might be - // stable while the URL has changed. - if oldConn.NodeID() == conn.NodeID() && oldConn.URL() == conn.URL() { - // Take over the old connection - newConns = append(newConns, oldConn) - found = true - break - } - } - if !found { - // New connection didn't exist, so add it to our list of new conns. - c.infof("elastic: %s joined the cluster", conn.URL()) - newConns = append(newConns, conn) - } - } - - c.conns = newConns - c.cindex = -1 - c.connsMu.Unlock() -} - -// healthchecker periodically runs healthcheck. -func (c *Client) healthchecker() { - c.mu.RLock() - timeout := c.healthcheckTimeout - interval := c.healthcheckInterval - c.mu.RUnlock() - - ticker := time.NewTicker(interval) - defer ticker.Stop() - - for { - select { - case <-c.healthcheckStop: - // we are asked to stop, so we signal back that we're stopping now - c.healthcheckStop <- true - return - case <-ticker.C: - c.healthcheck(context.Background(), timeout, false) - } - } -} - -// healthcheck does a health check on all nodes in the cluster. Depending on -// the node state, it marks connections as dead, sets them alive etc. -// If healthchecks are disabled and force is false, this is a no-op. -// The timeout specifies how long to wait for a response from Elasticsearch. -func (c *Client) healthcheck(parentCtx context.Context, timeout time.Duration, force bool) { - c.mu.RLock() - if !c.healthcheckEnabled && !force { - c.mu.RUnlock() - return - } - basicAuth := c.basicAuth - basicAuthUsername := c.basicAuthUsername - basicAuthPassword := c.basicAuthPassword - c.mu.RUnlock() - - c.connsMu.RLock() - conns := c.conns - c.connsMu.RUnlock() - - for _, conn := range conns { - // Run the HEAD request against ES with a timeout - ctx, cancel := context.WithTimeout(parentCtx, timeout) - defer cancel() - - // Goroutine executes the HTTP request, returns an error and sets status - var status int - errc := make(chan error, 1) - go func(url string) { - req, err := NewRequest("HEAD", url) - if err != nil { - errc <- err - return - } - if basicAuth { - req.SetBasicAuth(basicAuthUsername, basicAuthPassword) - } - res, err := c.c.Do((*http.Request)(req).WithContext(ctx)) - if res != nil { - status = res.StatusCode - if res.Body != nil { - res.Body.Close() - } - } - errc <- err - }(conn.URL()) - - // Wait for the Goroutine (or its timeout) - select { - case <-ctx.Done(): // timeout - c.errorf("elastic: %s is dead", conn.URL()) - conn.MarkAsDead() - case err := <-errc: - if err != nil { - c.errorf("elastic: %s is dead", conn.URL()) - conn.MarkAsDead() - break - } - if status >= 200 && status < 300 { - conn.MarkAsAlive() - } else { - conn.MarkAsDead() - c.errorf("elastic: %s is dead [status=%d]", conn.URL(), status) - } - } - } -} - -// startupHealthcheck is used at startup to check if the server is available -// at all. -func (c *Client) startupHealthcheck(parentCtx context.Context, timeout time.Duration) error { - c.mu.Lock() - urls := c.urls - basicAuth := c.basicAuth - basicAuthUsername := c.basicAuthUsername - basicAuthPassword := c.basicAuthPassword - c.mu.Unlock() - - // If we don't get a connection after "timeout", we bail. - var lastErr error - start := time.Now() - done := false - for !done { - for _, url := range urls { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return err - } - if basicAuth { - req.SetBasicAuth(basicAuthUsername, basicAuthPassword) - } - ctx, cancel := context.WithTimeout(parentCtx, timeout) - defer cancel() - req = req.WithContext(ctx) - res, err := c.c.Do(req) - if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 { - return nil - } else if err != nil { - lastErr = err - } - } - select { - case <-parentCtx.Done(): - lastErr = parentCtx.Err() - done = true - case <-time.After(1 * time.Second): - if time.Since(start) > timeout { - done = true - } - } - } - if lastErr != nil { - if IsContextErr(lastErr) { - return lastErr - } - return errors.Wrapf(ErrNoClient, "health check timeout: %v", lastErr) - } - return errors.Wrap(ErrNoClient, "health check timeout") -} - -// next returns the next available connection, or ErrNoClient. -func (c *Client) next() (*conn, error) { - // We do round-robin here. - // TODO(oe) This should be a pluggable strategy, like the Selector in the official clients. - c.connsMu.Lock() - defer c.connsMu.Unlock() - - i := 0 - numConns := len(c.conns) - for { - i++ - if i > numConns { - break // we visited all conns: they all seem to be dead - } - c.cindex++ - if c.cindex >= numConns { - c.cindex = 0 - } - conn := c.conns[c.cindex] - if !conn.IsDead() { - return conn, nil - } - } - - // We have a deadlock here: All nodes are marked as dead. - // If sniffing is disabled, connections will never be marked alive again. - // So we are marking them as alive--if sniffing is disabled. - // They'll then be picked up in the next call to PerformRequest. - if !c.snifferEnabled { - c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns)) - for _, conn := range c.conns { - conn.MarkAsAlive() - } - } - - // We tried hard, but there is no node available - return nil, errors.Wrap(ErrNoClient, "no available connection") -} - -// mustActiveConn returns nil if there is an active connection, -// otherwise ErrNoClient is returned. -func (c *Client) mustActiveConn() error { - c.connsMu.Lock() - defer c.connsMu.Unlock() - - for _, c := range c.conns { - if !c.IsDead() { - return nil - } - } - return errors.Wrap(ErrNoClient, "no active connection found") -} - -// -- PerformRequest -- - -// PerformRequestOptions must be passed into PerformRequest. -type PerformRequestOptions struct { - Method string - Path string - Params url.Values - Body interface{} - ContentType string - IgnoreErrors []int - Retrier Retrier - Headers http.Header - MaxResponseSize int64 -} - -// PerformRequest does a HTTP request to Elasticsearch. -// It returns a response (which might be nil) and an error on failure. -// -// Optionally, a list of HTTP error codes to ignore can be passed. -// This is necessary for services that expect e.g. HTTP status 404 as a -// valid outcome (Exists, IndicesExists, IndicesTypeExists). -func (c *Client) PerformRequest(ctx context.Context, opt PerformRequestOptions) (*Response, error) { - start := time.Now().UTC() - - c.mu.RLock() - timeout := c.healthcheckTimeout - basicAuth := c.basicAuth - basicAuthUsername := c.basicAuthUsername - basicAuthPassword := c.basicAuthPassword - sendGetBodyAs := c.sendGetBodyAs - gzipEnabled := c.gzipEnabled - healthcheckEnabled := c.healthcheckEnabled - retrier := c.retrier - if opt.Retrier != nil { - retrier = opt.Retrier - } - defaultHeaders := c.headers - c.mu.RUnlock() - - var err error - var conn *conn - var req *Request - var resp *Response - var retried bool - var n int - - // Change method if sendGetBodyAs is specified. - if opt.Method == "GET" && opt.Body != nil && sendGetBodyAs != "GET" { - opt.Method = sendGetBodyAs - } - - for { - pathWithParams := opt.Path - if len(opt.Params) > 0 { - pathWithParams += "?" + opt.Params.Encode() - } - - // Get a connection - conn, err = c.next() - if errors.Cause(err) == ErrNoClient { - n++ - if !retried { - // Force a healtcheck as all connections seem to be dead. - c.healthcheck(ctx, timeout, false) - if healthcheckEnabled { - retried = true - continue - } - } - wait, ok, rerr := retrier.Retry(ctx, n, nil, nil, err) - if rerr != nil { - return nil, rerr - } - if !ok { - return nil, err - } - retried = true - time.Sleep(wait) - continue // try again - } - if err != nil { - c.errorf("elastic: cannot get connection from pool") - return nil, err - } - - req, err = NewRequest(opt.Method, conn.URL()+pathWithParams) - if err != nil { - c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(opt.Method), conn.URL()+pathWithParams, err) - return nil, err - } - if basicAuth { - req.SetBasicAuth(basicAuthUsername, basicAuthPassword) - } - if opt.ContentType != "" { - req.Header.Set("Content-Type", opt.ContentType) - } - if len(opt.Headers) > 0 { - for key, value := range opt.Headers { - for _, v := range value { - req.Header.Add(key, v) - } - } - } - if len(defaultHeaders) > 0 { - for key, value := range defaultHeaders { - for _, v := range value { - req.Header.Add(key, v) - } - } - } - - // Set body - if opt.Body != nil { - err = req.SetBody(opt.Body, gzipEnabled) - if err != nil { - c.errorf("elastic: couldn't set body %+v for request: %v", opt.Body, err) - return nil, err - } - } - - // Tracing - c.dumpRequest((*http.Request)(req)) - - // Get response - res, err := c.c.Do((*http.Request)(req).WithContext(ctx)) - if IsContextErr(err) { - // Proceed, but don't mark the node as dead - return nil, err - } - if err != nil { - n++ - wait, ok, rerr := retrier.Retry(ctx, n, (*http.Request)(req), res, err) - if rerr != nil { - c.errorf("elastic: %s is dead", conn.URL()) - conn.MarkAsDead() - return nil, rerr - } - if !ok { - c.errorf("elastic: %s is dead", conn.URL()) - conn.MarkAsDead() - return nil, err - } - retried = true - time.Sleep(wait) - continue // try again - } - defer res.Body.Close() - - // Tracing - c.dumpResponse(res) - - // Log deprecation warnings as errors - if len(res.Header["Warning"]) > 0 { - c.deprecationlog((*http.Request)(req), res) - for _, warning := range res.Header["Warning"] { - c.errorf("Deprecation warning: %s", warning) - } - } - - // Check for errors - if err := checkResponse((*http.Request)(req), res, opt.IgnoreErrors...); err != nil { - // No retry if request succeeded - // We still try to return a response. - resp, _ = c.newResponse(res, opt.MaxResponseSize) - return resp, err - } - - // We successfully made a request with this connection - conn.MarkAsHealthy() - - resp, err = c.newResponse(res, opt.MaxResponseSize) - if err != nil { - return nil, err - } - - break - } - - duration := time.Now().UTC().Sub(start) - c.infof("%s %s [status:%d, request:%.3fs]", - strings.ToUpper(opt.Method), - req.URL, - resp.StatusCode, - float64(int64(duration/time.Millisecond))/1000) - - return resp, nil -} - -// -- Document APIs -- - -// Index a document. -func (c *Client) Index() *IndexService { - return NewIndexService(c) -} - -// Get a document. -func (c *Client) Get() *GetService { - return NewGetService(c) -} - -// MultiGet retrieves multiple documents in one roundtrip. -func (c *Client) MultiGet() *MgetService { - return NewMgetService(c) -} - -// Mget retrieves multiple documents in one roundtrip. -func (c *Client) Mget() *MgetService { - return NewMgetService(c) -} - -// Delete a document. -func (c *Client) Delete() *DeleteService { - return NewDeleteService(c) -} - -// DeleteByQuery deletes documents as found by a query. -func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService { - return NewDeleteByQueryService(c).Index(indices...) -} - -// Update a document. -func (c *Client) Update() *UpdateService { - return NewUpdateService(c) -} - -// UpdateByQuery performs an update on a set of documents. -func (c *Client) UpdateByQuery(indices ...string) *UpdateByQueryService { - return NewUpdateByQueryService(c).Index(indices...) -} - -// Bulk is the entry point to mass insert/update/delete documents. -func (c *Client) Bulk() *BulkService { - return NewBulkService(c) -} - -// BulkProcessor allows setting up a concurrent processor of bulk requests. -func (c *Client) BulkProcessor() *BulkProcessorService { - return NewBulkProcessorService(c) -} - -// Reindex copies data from a source index into a destination index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-reindex.html -// for details on the Reindex API. -func (c *Client) Reindex() *ReindexService { - return NewReindexService(c) -} - -// TermVectors returns information and statistics on terms in the fields -// of a particular document. -func (c *Client) TermVectors(index string) *TermvectorsService { - builder := NewTermvectorsService(c) - builder = builder.Index(index) - return builder -} - -// MultiTermVectors returns information and statistics on terms in the fields -// of multiple documents. -func (c *Client) MultiTermVectors() *MultiTermvectorService { - return NewMultiTermvectorService(c) -} - -// -- Search APIs -- - -// Search is the entry point for searches. -func (c *Client) Search(indices ...string) *SearchService { - return NewSearchService(c).Index(indices...) -} - -// MultiSearch is the entry point for multi searches. -func (c *Client) MultiSearch() *MultiSearchService { - return NewMultiSearchService(c) -} - -// Count documents. -func (c *Client) Count(indices ...string) *CountService { - return NewCountService(c).Index(indices...) -} - -// Explain computes a score explanation for a query and a specific document. -func (c *Client) Explain(index, typ, id string) *ExplainService { - return NewExplainService(c).Index(index).Type(typ).Id(id) -} - -// TODO Search Template -// TODO Search Exists API - -// Validate allows a user to validate a potentially expensive query without executing it. -func (c *Client) Validate(indices ...string) *ValidateService { - return NewValidateService(c).Index(indices...) -} - -// SearchShards returns statistical information about nodes and shards. -func (c *Client) SearchShards(indices ...string) *SearchShardsService { - return NewSearchShardsService(c).Index(indices...) -} - -// FieldCaps returns statistical information about fields in indices. -func (c *Client) FieldCaps(indices ...string) *FieldCapsService { - return NewFieldCapsService(c).Index(indices...) -} - -// Exists checks if a document exists. -func (c *Client) Exists() *ExistsService { - return NewExistsService(c) -} - -// Scroll through documents. Use this to efficiently scroll through results -// while returning the results to a client. -func (c *Client) Scroll(indices ...string) *ScrollService { - return NewScrollService(c).Index(indices...) -} - -// ClearScroll can be used to clear search contexts manually. -func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService { - return NewClearScrollService(c).ScrollId(scrollIds...) -} - -// -- Indices APIs -- - -// CreateIndex returns a service to create a new index. -func (c *Client) CreateIndex(name string) *IndicesCreateService { - return NewIndicesCreateService(c).Index(name) -} - -// DeleteIndex returns a service to delete an index. -func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService { - return NewIndicesDeleteService(c).Index(indices) -} - -// IndexExists allows to check if an index exists. -func (c *Client) IndexExists(indices ...string) *IndicesExistsService { - return NewIndicesExistsService(c).Index(indices) -} - -// ShrinkIndex returns a service to shrink one index into another. -func (c *Client) ShrinkIndex(source, target string) *IndicesShrinkService { - return NewIndicesShrinkService(c).Source(source).Target(target) -} - -// RolloverIndex rolls an alias over to a new index when the existing index -// is considered to be too large or too old. -func (c *Client) RolloverIndex(alias string) *IndicesRolloverService { - return NewIndicesRolloverService(c).Alias(alias) -} - -// IndexStats provides statistics on different operations happining -// in one or more indices. -func (c *Client) IndexStats(indices ...string) *IndicesStatsService { - return NewIndicesStatsService(c).Index(indices...) -} - -// OpenIndex opens an index. -func (c *Client) OpenIndex(name string) *IndicesOpenService { - return NewIndicesOpenService(c).Index(name) -} - -// CloseIndex closes an index. -func (c *Client) CloseIndex(name string) *IndicesCloseService { - return NewIndicesCloseService(c).Index(name) -} - -// FreezeIndex freezes an index. -func (c *Client) FreezeIndex(name string) *IndicesFreezeService { - return NewIndicesFreezeService(c).Index(name) -} - -// UnfreezeIndex unfreezes an index. -func (c *Client) UnfreezeIndex(name string) *IndicesUnfreezeService { - return NewIndicesUnfreezeService(c).Index(name) -} - -// IndexGet retrieves information about one or more indices. -// IndexGet is only available for Elasticsearch 1.4 or later. -func (c *Client) IndexGet(indices ...string) *IndicesGetService { - return NewIndicesGetService(c).Index(indices...) -} - -// IndexGetSettings retrieves settings of all, one or more indices. -func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService { - return NewIndicesGetSettingsService(c).Index(indices...) -} - -// IndexPutSettings sets settings for all, one or more indices. -func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService { - return NewIndicesPutSettingsService(c).Index(indices...) -} - -// IndexSegments retrieves low level segment information for all, one or more indices. -func (c *Client) IndexSegments(indices ...string) *IndicesSegmentsService { - return NewIndicesSegmentsService(c).Index(indices...) -} - -// IndexAnalyze performs the analysis process on a text and returns the -// token breakdown of the text. -func (c *Client) IndexAnalyze() *IndicesAnalyzeService { - return NewIndicesAnalyzeService(c) -} - -// Forcemerge optimizes one or more indices. -// It replaces the deprecated Optimize API. -func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService { - return NewIndicesForcemergeService(c).Index(indices...) -} - -// Refresh asks Elasticsearch to refresh one or more indices. -func (c *Client) Refresh(indices ...string) *RefreshService { - return NewRefreshService(c).Index(indices...) -} - -// Flush asks Elasticsearch to free memory from the index and -// flush data to disk. -func (c *Client) Flush(indices ...string) *IndicesFlushService { - return NewIndicesFlushService(c).Index(indices...) -} - -// SyncedFlush performs a synced flush. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-synced-flush.html -// for more details on synched flushes and how they differ from a normal -// Flush. -func (c *Client) SyncedFlush(indices ...string) *IndicesSyncedFlushService { - return NewIndicesSyncedFlushService(c).Index(indices...) -} - -// ClearCache clears caches for one or more indices. -func (c *Client) ClearCache(indices ...string) *IndicesClearCacheService { - return NewIndicesClearCacheService(c).Index(indices...) -} - -// Alias enables the caller to add and/or remove aliases. -func (c *Client) Alias() *AliasService { - return NewAliasService(c) -} - -// Aliases returns aliases by index name(s). -func (c *Client) Aliases() *AliasesService { - return NewAliasesService(c) -} - -// IndexGetTemplate gets an index template. -// Use XXXTemplate funcs to manage search templates. -func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService { - return NewIndicesGetTemplateService(c).Name(names...) -} - -// IndexTemplateExists gets check if an index template exists. -// Use XXXTemplate funcs to manage search templates. -func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService { - return NewIndicesExistsTemplateService(c).Name(name) -} - -// IndexPutTemplate creates or updates an index template. -// Use XXXTemplate funcs to manage search templates. -func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService { - return NewIndicesPutTemplateService(c).Name(name) -} - -// IndexDeleteTemplate deletes an index template. -// Use XXXTemplate funcs to manage search templates. -func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService { - return NewIndicesDeleteTemplateService(c).Name(name) -} - -// GetMapping gets a mapping. -func (c *Client) GetMapping() *IndicesGetMappingService { - return NewIndicesGetMappingService(c) -} - -// PutMapping registers a mapping. -func (c *Client) PutMapping() *IndicesPutMappingService { - return NewIndicesPutMappingService(c) -} - -// GetFieldMapping gets mapping for fields. -func (c *Client) GetFieldMapping() *IndicesGetFieldMappingService { - return NewIndicesGetFieldMappingService(c) -} - -// -- cat APIs -- - -// TODO cat fielddata -// TODO cat master -// TODO cat nodes -// TODO cat pending tasks -// TODO cat plugins -// TODO cat recovery -// TODO cat thread pool -// TODO cat shards -// TODO cat segments - -// CatAliases returns information about aliases. -func (c *Client) CatAliases() *CatAliasesService { - return NewCatAliasesService(c) -} - -// CatAllocation returns information about the allocation across nodes. -func (c *Client) CatAllocation() *CatAllocationService { - return NewCatAllocationService(c) -} - -// CatCount returns document counts for indices. -func (c *Client) CatCount() *CatCountService { - return NewCatCountService(c) -} - -// CatHealth returns information about cluster health. -func (c *Client) CatHealth() *CatHealthService { - return NewCatHealthService(c) -} - -// CatIndices returns information about indices. -func (c *Client) CatIndices() *CatIndicesService { - return NewCatIndicesService(c) -} - -// CatShards returns information about shards. -func (c *Client) CatShards() *CatShardsService { - return NewCatShardsService(c) -} - -// -- Ingest APIs -- - -// IngestPutPipeline adds pipelines and updates existing pipelines in -// the cluster. -func (c *Client) IngestPutPipeline(id string) *IngestPutPipelineService { - return NewIngestPutPipelineService(c).Id(id) -} - -// IngestGetPipeline returns pipelines based on ID. -func (c *Client) IngestGetPipeline(ids ...string) *IngestGetPipelineService { - return NewIngestGetPipelineService(c).Id(ids...) -} - -// IngestDeletePipeline deletes a pipeline by ID. -func (c *Client) IngestDeletePipeline(id string) *IngestDeletePipelineService { - return NewIngestDeletePipelineService(c).Id(id) -} - -// IngestSimulatePipeline executes a specific pipeline against the set of -// documents provided in the body of the request. -func (c *Client) IngestSimulatePipeline() *IngestSimulatePipelineService { - return NewIngestSimulatePipelineService(c) -} - -// -- Cluster APIs -- - -// ClusterHealth retrieves the health of the cluster. -func (c *Client) ClusterHealth() *ClusterHealthService { - return NewClusterHealthService(c) -} - -// ClusterReroute allows for manual changes to the allocation of -// individual shards in the cluster. -func (c *Client) ClusterReroute() *ClusterRerouteService { - return NewClusterRerouteService(c) -} - -// ClusterState retrieves the state of the cluster. -func (c *Client) ClusterState() *ClusterStateService { - return NewClusterStateService(c) -} - -// ClusterStats retrieves cluster statistics. -func (c *Client) ClusterStats() *ClusterStatsService { - return NewClusterStatsService(c) -} - -// NodesInfo retrieves one or more or all of the cluster nodes information. -func (c *Client) NodesInfo() *NodesInfoService { - return NewNodesInfoService(c) -} - -// NodesStats retrieves one or more or all of the cluster nodes statistics. -func (c *Client) NodesStats() *NodesStatsService { - return NewNodesStatsService(c) -} - -// TasksCancel cancels tasks running on the specified nodes. -func (c *Client) TasksCancel() *TasksCancelService { - return NewTasksCancelService(c) -} - -// TasksList retrieves the list of tasks running on the specified nodes. -func (c *Client) TasksList() *TasksListService { - return NewTasksListService(c) -} - -// TasksGetTask retrieves a task running on the cluster. -func (c *Client) TasksGetTask() *TasksGetTaskService { - return NewTasksGetTaskService(c) -} - -// TODO Pending cluster tasks -// TODO Cluster Reroute -// TODO Cluster Update Settings -// TODO Nodes Stats -// TODO Nodes hot_threads - -// -- Snapshot and Restore -- - -// SnapshotStatus returns information about the status of a snapshot. -func (c *Client) SnapshotStatus() *SnapshotStatusService { - return NewSnapshotStatusService(c) -} - -// SnapshotCreate creates a snapshot. -func (c *Client) SnapshotCreate(repository string, snapshot string) *SnapshotCreateService { - return NewSnapshotCreateService(c).Repository(repository).Snapshot(snapshot) -} - -// SnapshotCreateRepository creates or updates a snapshot repository. -func (c *Client) SnapshotCreateRepository(repository string) *SnapshotCreateRepositoryService { - return NewSnapshotCreateRepositoryService(c).Repository(repository) -} - -// SnapshotDelete deletes a snapshot in a snapshot repository. -func (c *Client) SnapshotDelete(repository string, snapshot string) *SnapshotDeleteService { - return NewSnapshotDeleteService(c).Repository(repository).Snapshot(snapshot) -} - -// SnapshotDeleteRepository deletes a snapshot repository. -func (c *Client) SnapshotDeleteRepository(repositories ...string) *SnapshotDeleteRepositoryService { - return NewSnapshotDeleteRepositoryService(c).Repository(repositories...) -} - -// SnapshotGetRepository gets a snapshot repository. -func (c *Client) SnapshotGetRepository(repositories ...string) *SnapshotGetRepositoryService { - return NewSnapshotGetRepositoryService(c).Repository(repositories...) -} - -// SnapshotGet lists snapshot for a repository. -func (c *Client) SnapshotGet(repository string) *SnapshotGetService { - return NewSnapshotGetService(c).Repository(repository) -} - -// SnapshotVerifyRepository verifies a snapshot repository. -func (c *Client) SnapshotVerifyRepository(repository string) *SnapshotVerifyRepositoryService { - return NewSnapshotVerifyRepositoryService(c).Repository(repository) -} - -// SnapshotRestore restores the specified indices from a given snapshot -func (c *Client) SnapshotRestore(repository string, snapshot string) *SnapshotRestoreService { - return NewSnapshotRestoreService(c).Repository(repository).Snapshot(snapshot) -} - -// -- Scripting APIs -- - -// GetScript reads a stored script in Elasticsearch. -// Use PutScript for storing a script. -func (c *Client) GetScript() *GetScriptService { - return NewGetScriptService(c) -} - -// PutScript allows saving a stored script in Elasticsearch. -func (c *Client) PutScript() *PutScriptService { - return NewPutScriptService(c) -} - -// DeleteScript allows removing a stored script from Elasticsearch. -func (c *Client) DeleteScript() *DeleteScriptService { - return NewDeleteScriptService(c) -} - -// -- X-Pack General -- - -// XPackInfo gets information on the xpack plugins enabled on the cluster - -func (c *Client) XPackInfo() *XPackInfoService { - return NewXPackInfoService(c) -} - -// -- X-Pack Index Lifecycle Management -- - -// XPackIlmPutLifecycle adds or modifies an ilm policy. -func (c *Client) XPackIlmPutLifecycle() *XPackIlmPutLifecycleService { - return NewXPackIlmPutLifecycleService(c) -} - -// XPackIlmGettLifecycle gets an ilm policy. -func (c *Client) XPackIlmGetLifecycle() *XPackIlmGetLifecycleService { - return NewXPackIlmGetLifecycleService(c) -} - -// XPackIlmDeleteLifecycle deletes an ilm policy. -func (c *Client) XPackIlmDeleteLifecycle() *XPackIlmDeleteLifecycleService { - return NewXPackIlmDeleteLifecycleService(c) -} - -// -- X-Pack Security -- - -// XPackSecurityGetRoleMapping gets a role mapping. -func (c *Client) XPackSecurityGetRoleMapping(roleMappingName string) *XPackSecurityGetRoleMappingService { - return NewXPackSecurityGetRoleMappingService(c).Name(roleMappingName) -} - -// XPackSecurityPutRoleMapping adds a role mapping. -func (c *Client) XPackSecurityPutRoleMapping(roleMappingName string) *XPackSecurityPutRoleMappingService { - return NewXPackSecurityPutRoleMappingService(c).Name(roleMappingName) -} - -// XPackSecurityDeleteRoleMapping deletes a role mapping. -func (c *Client) XPackSecurityDeleteRoleMapping(roleMappingName string) *XPackSecurityDeleteRoleMappingService { - return NewXPackSecurityDeleteRoleMappingService(c).Name(roleMappingName) -} - -// XPackSecurityGetRole gets a role. -func (c *Client) XPackSecurityGetRole(roleName string) *XPackSecurityGetRoleService { - return NewXPackSecurityGetRoleService(c).Name(roleName) -} - -// XPackSecurityPutRole adds a role. -func (c *Client) XPackSecurityPutRole(roleName string) *XPackSecurityPutRoleService { - return NewXPackSecurityPutRoleService(c).Name(roleName) -} - -// XPackSecurityDeleteRole deletes a role. -func (c *Client) XPackSecurityDeleteRole(roleName string) *XPackSecurityDeleteRoleService { - return NewXPackSecurityDeleteRoleService(c).Name(roleName) -} - -// TODO: Clear role cache API -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-clear-role-cache.html - -// XPackSecurityChangePassword changes the password of users in the native realm. -func (c *Client) XPackSecurityChangePassword(username string) *XPackSecurityChangePasswordService { - return NewXPackSecurityChangePasswordService(c).Username(username) -} - -// XPackSecurityGetUser gets details about one or more users. -func (c *Client) XPackSecurityGetUser(usernames ...string) *XPackSecurityGetUserService { - return NewXPackSecurityGetUserService(c).Usernames(usernames...) -} - -// XPackSecurityPutUser adds or updates a user. -func (c *Client) XPackSecurityPutUser(username string) *XPackSecurityPutUserService { - return NewXPackSecurityPutUserService(c).Username(username) -} - -// XPackSecurityEnableUser enables a user. -func (c *Client) XPackSecurityEnableUser(username string) *XPackSecurityEnableUserService { - return NewXPackSecurityEnableUserService(c).Username(username) -} - -// XPackSecurityDisableUser disables a user. -func (c *Client) XPackSecurityDisableUser(username string) *XPackSecurityDisableUserService { - return NewXPackSecurityDisableUserService(c).Username(username) -} - -// XPackSecurityDeleteUser deletes a user. -func (c *Client) XPackSecurityDeleteUser(username string) *XPackSecurityDeleteUserService { - return NewXPackSecurityDeleteUserService(c).Username(username) -} - -// -- X-Pack Watcher -- - -// XPackWatchPut adds a watch. -func (c *Client) XPackWatchPut(watchId string) *XPackWatcherPutWatchService { - return NewXPackWatcherPutWatchService(c).Id(watchId) -} - -// XPackWatchGet gets a watch. -func (c *Client) XPackWatchGet(watchId string) *XPackWatcherGetWatchService { - return NewXPackWatcherGetWatchService(c).Id(watchId) -} - -// XPackWatchDelete deletes a watch. -func (c *Client) XPackWatchDelete(watchId string) *XPackWatcherDeleteWatchService { - return NewXPackWatcherDeleteWatchService(c).Id(watchId) -} - -// XPackWatchExecute executes a watch. -func (c *Client) XPackWatchExecute() *XPackWatcherExecuteWatchService { - return NewXPackWatcherExecuteWatchService(c) -} - -// XPackWatchAck acknowledging a watch. -func (c *Client) XPackWatchAck(watchId string) *XPackWatcherAckWatchService { - return NewXPackWatcherAckWatchService(c).WatchId(watchId) -} - -// XPackWatchActivate activates a watch. -func (c *Client) XPackWatchActivate(watchId string) *XPackWatcherActivateWatchService { - return NewXPackWatcherActivateWatchService(c).WatchId(watchId) -} - -// XPackWatchDeactivate deactivates a watch. -func (c *Client) XPackWatchDeactivate(watchId string) *XPackWatcherDeactivateWatchService { - return NewXPackWatcherDeactivateWatchService(c).WatchId(watchId) -} - -// XPackWatchStats returns the current Watcher metrics. -func (c *Client) XPackWatchStats() *XPackWatcherStatsService { - return NewXPackWatcherStatsService(c) -} - -// XPackWatchStart starts a watch. -func (c *Client) XPackWatchStart() *XPackWatcherStartService { - return NewXPackWatcherStartService(c) -} - -// XPackWatchStop stops a watch. -func (c *Client) XPackWatchStop() *XPackWatcherStopService { - return NewXPackWatcherStopService(c) -} - -// -- Helpers and shortcuts -- - -// ElasticsearchVersion returns the version number of Elasticsearch -// running on the given URL. -func (c *Client) ElasticsearchVersion(url string) (string, error) { - res, _, err := c.Ping(url).Do(context.Background()) - if err != nil { - return "", err - } - return res.Version.Number, nil -} - -// IndexNames returns the names of all indices in the cluster. -func (c *Client) IndexNames() ([]string, error) { - res, err := c.IndexGetSettings().Index("_all").Do(context.Background()) - if err != nil { - return nil, err - } - var names []string - for name := range res { - names = append(names, name) - } - return names, nil -} - -// Ping checks if a given node in a cluster exists and (optionally) -// returns some basic information about the Elasticsearch server, -// e.g. the Elasticsearch version number. -// -// Notice that you need to specify a URL here explicitly. -func (c *Client) Ping(url string) *PingService { - return NewPingService(c).URL(url) -} - -// WaitForStatus waits for the cluster to have the given status. -// This is a shortcut method for the ClusterHealth service. -// -// WaitForStatus waits for the specified timeout, e.g. "10s". -// If the cluster will have the given state within the timeout, nil is returned. -// If the request timed out, ErrTimeout is returned. -func (c *Client) WaitForStatus(status string, timeout string) error { - health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do(context.Background()) - if err != nil { - return err - } - if health.TimedOut { - return ErrTimeout - } - return nil -} - -// WaitForGreenStatus waits for the cluster to have the "green" status. -// See WaitForStatus for more details. -func (c *Client) WaitForGreenStatus(timeout string) error { - return c.WaitForStatus("green", timeout) -} - -// WaitForYellowStatus waits for the cluster to have the "yellow" status. -// See WaitForStatus for more details. -func (c *Client) WaitForYellowStatus(timeout string) error { - return c.WaitForStatus("yellow", timeout) -} diff --git a/vendor/github.com/olivere/elastic/v7/cluster_health.go b/vendor/github.com/olivere/elastic/v7/cluster_health.go deleted file mode 100644 index eba5e22..0000000 --- a/vendor/github.com/olivere/elastic/v7/cluster_health.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// ClusterHealthService allows to get a very simple status on the health of the cluster. -// -// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-health.html -// for details. -type ClusterHealthService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - indices []string - level string - local *bool - masterTimeout string - timeout string - waitForActiveShards *int - waitForNodes string - waitForNoRelocatingShards *bool - waitForStatus string -} - -// NewClusterHealthService creates a new ClusterHealthService. -func NewClusterHealthService(client *Client) *ClusterHealthService { - return &ClusterHealthService{ - client: client, - indices: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ClusterHealthService) Pretty(pretty bool) *ClusterHealthService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ClusterHealthService) Human(human bool) *ClusterHealthService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ClusterHealthService) ErrorTrace(errorTrace bool) *ClusterHealthService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ClusterHealthService) FilterPath(filterPath ...string) *ClusterHealthService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ClusterHealthService) Header(name string, value string) *ClusterHealthService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ClusterHealthService) Headers(headers http.Header) *ClusterHealthService { - s.headers = headers - return s -} - -// Index limits the information returned to specific indices. -func (s *ClusterHealthService) Index(indices ...string) *ClusterHealthService { - s.indices = append(s.indices, indices...) - return s -} - -// Level specifies the level of detail for returned information. -func (s *ClusterHealthService) Level(level string) *ClusterHealthService { - s.level = level - return s -} - -// Local indicates whether to return local information. If it is true, -// we do not retrieve the state from master node (default: false). -func (s *ClusterHealthService) Local(local bool) *ClusterHealthService { - s.local = &local - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *ClusterHealthService) MasterTimeout(masterTimeout string) *ClusterHealthService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout specifies an explicit operation timeout. -func (s *ClusterHealthService) Timeout(timeout string) *ClusterHealthService { - s.timeout = timeout - return s -} - -// WaitForActiveShards can be used to wait until the specified number of shards are active. -func (s *ClusterHealthService) WaitForActiveShards(waitForActiveShards int) *ClusterHealthService { - s.waitForActiveShards = &waitForActiveShards - return s -} - -// WaitForNodes can be used to wait until the specified number of nodes are available. -// Example: "12" to wait for exact values, ">12" and "<12" for ranges. -func (s *ClusterHealthService) WaitForNodes(waitForNodes string) *ClusterHealthService { - s.waitForNodes = waitForNodes - return s -} - -// WaitForNoRelocatingShards can be used to wait until all shard relocations are finished. -func (s *ClusterHealthService) WaitForNoRelocatingShards(waitForNoRelocatingShards bool) *ClusterHealthService { - s.waitForNoRelocatingShards = &waitForNoRelocatingShards - return s -} - -// WaitForStatus can be used to wait until the cluster is in a specific state. -// Valid values are: green, yellow, or red. -func (s *ClusterHealthService) WaitForStatus(waitForStatus string) *ClusterHealthService { - s.waitForStatus = waitForStatus - return s -} - -// WaitForGreenStatus will wait for the "green" state. -func (s *ClusterHealthService) WaitForGreenStatus() *ClusterHealthService { - return s.WaitForStatus("green") -} - -// WaitForYellowStatus will wait for the "yellow" state. -func (s *ClusterHealthService) WaitForYellowStatus() *ClusterHealthService { - return s.WaitForStatus("yellow") -} - -// buildURL builds the URL for the operation. -func (s *ClusterHealthService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.indices) > 0 { - path, err = uritemplates.Expand("/_cluster/health/{index}", map[string]string{ - "index": strings.Join(s.indices, ","), - }) - } else { - path = "/_cluster/health" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.level != "" { - params.Set("level", s.level) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.waitForActiveShards != nil { - params.Set("wait_for_active_shards", fmt.Sprintf("%v", s.waitForActiveShards)) - } - if s.waitForNodes != "" { - params.Set("wait_for_nodes", s.waitForNodes) - } - if s.waitForNoRelocatingShards != nil { - params.Set("wait_for_no_relocating_shards", fmt.Sprintf("%v", *s.waitForNoRelocatingShards)) - } - if s.waitForStatus != "" { - params.Set("wait_for_status", s.waitForStatus) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClusterHealthService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *ClusterHealthService) Do(ctx context.Context) (*ClusterHealthResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ClusterHealthResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ClusterHealthResponse is the response of ClusterHealthService.Do. -type ClusterHealthResponse struct { - ClusterName string `json:"cluster_name"` - Status string `json:"status"` - TimedOut bool `json:"timed_out"` - NumberOfNodes int `json:"number_of_nodes"` - NumberOfDataNodes int `json:"number_of_data_nodes"` - ActivePrimaryShards int `json:"active_primary_shards"` - ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` - DelayedUnassignedShards int `json:"delayed_unassigned_shards"` - NumberOfPendingTasks int `json:"number_of_pending_tasks"` - NumberOfInFlightFetch int `json:"number_of_in_flight_fetch"` - TaskMaxWaitTimeInQueue string `json:"task_max_waiting_in_queue"` // "0s" - TaskMaxWaitTimeInQueueInMillis int `json:"task_max_waiting_in_queue_millis"` // 0 - ActiveShardsPercent string `json:"active_shards_percent"` // "100.0%" - ActiveShardsPercentAsNumber float64 `json:"active_shards_percent_as_number"` // 100.0 - - // Index name -> index health - Indices map[string]*ClusterIndexHealth `json:"indices"` -} - -// ClusterIndexHealth will be returned as part of ClusterHealthResponse. -type ClusterIndexHealth struct { - Status string `json:"status"` - NumberOfShards int `json:"number_of_shards"` - NumberOfReplicas int `json:"number_of_replicas"` - ActivePrimaryShards int `json:"active_primary_shards"` - ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` - // Shards by id, e.g. "0" or "1" - Shards map[string]*ClusterShardHealth `json:"shards"` -} - -// ClusterShardHealth will be returned as part of ClusterHealthResponse. -type ClusterShardHealth struct { - Status string `json:"status"` - PrimaryActive bool `json:"primary_active"` - ActiveShards int `json:"active_shards"` - RelocatingShards int `json:"relocating_shards"` - InitializingShards int `json:"initializing_shards"` - UnassignedShards int `json:"unassigned_shards"` -} diff --git a/vendor/github.com/olivere/elastic/v7/cluster_reroute.go b/vendor/github.com/olivere/elastic/v7/cluster_reroute.go deleted file mode 100644 index 92a9625..0000000 --- a/vendor/github.com/olivere/elastic/v7/cluster_reroute.go +++ /dev/null @@ -1,438 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -// ClusterRerouteService allows for manual changes to the allocation of -// individual shards in the cluster. For example, a shard can be moved from -// one node to another explicitly, an allocation can be cancelled, and -// an unassigned shard can be explicitly allocated to a specific node. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-reroute.html -// for details. -type ClusterRerouteService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - metrics []string - dryRun *bool - explain *bool - retryFailed *bool - masterTimeout string - timeout string - commands []AllocationCommand - body interface{} -} - -// NewClusterRerouteService creates a new ClusterRerouteService. -func NewClusterRerouteService(client *Client) *ClusterRerouteService { - return &ClusterRerouteService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ClusterRerouteService) Pretty(pretty bool) *ClusterRerouteService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ClusterRerouteService) Human(human bool) *ClusterRerouteService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ClusterRerouteService) ErrorTrace(errorTrace bool) *ClusterRerouteService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ClusterRerouteService) FilterPath(filterPath ...string) *ClusterRerouteService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ClusterRerouteService) Header(name string, value string) *ClusterRerouteService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ClusterRerouteService) Headers(headers http.Header) *ClusterRerouteService { - s.headers = headers - return s -} - -// Metric limits the information returned to the specified metric. -// It can be one of: "_all", "blocks", "metadata", "nodes", "routing_table", "master_node", "version". -// Defaults to all but metadata. -func (s *ClusterRerouteService) Metric(metrics ...string) *ClusterRerouteService { - s.metrics = append(s.metrics, metrics...) - return s -} - -// DryRun indicates whether to simulate the operation only and return the -// resulting state. -func (s *ClusterRerouteService) DryRun(dryRun bool) *ClusterRerouteService { - s.dryRun = &dryRun - return s -} - -// Explain, when set to true, returns an explanation of why the commands -// can or cannot be executed. -func (s *ClusterRerouteService) Explain(explain bool) *ClusterRerouteService { - s.explain = &explain - return s -} - -// RetryFailed indicates whether to retry allocation of shards that are blocked -// due to too many subsequent allocation failures. -func (s *ClusterRerouteService) RetryFailed(retryFailed bool) *ClusterRerouteService { - s.retryFailed = &retryFailed - return s -} - -// MasterTimeout specifies an explicit timeout for connection to master. -func (s *ClusterRerouteService) MasterTimeout(masterTimeout string) *ClusterRerouteService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout specifies an explicit operationtimeout. -func (s *ClusterRerouteService) Timeout(timeout string) *ClusterRerouteService { - s.timeout = timeout - return s -} - -// Add adds one or more commands to be executed. -func (s *ClusterRerouteService) Add(commands ...AllocationCommand) *ClusterRerouteService { - s.commands = append(s.commands, commands...) - return s -} - -// Body specifies the body to be sent. -// If you specify Body, the commands passed via Add are ignored. -// In other words: Body takes precedence over Add. -func (s *ClusterRerouteService) Body(body interface{}) *ClusterRerouteService { - s.body = body - return s -} - -// buildURL builds the URL for the operation. -func (s *ClusterRerouteService) buildURL() (string, url.Values, error) { - // Build URL - path := "/_cluster/reroute" - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.dryRun; v != nil { - params.Set("dry_run", fmt.Sprint(*v)) - } - if v := s.explain; v != nil { - params.Set("explain", fmt.Sprint(*v)) - } - if v := s.retryFailed; v != nil { - params.Set("retry_failed", fmt.Sprint(*v)) - } - if len(s.metrics) > 0 { - params.Set("metric", strings.Join(s.metrics, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClusterRerouteService) Validate() error { - if s.body == nil && len(s.commands) == 0 { - return errors.New("missing allocate commands or raw body") - } - return nil -} - -// Do executes the operation. -func (s *ClusterRerouteService) Do(ctx context.Context) (*ClusterRerouteResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.body != nil { - body = s.body - } else { - commands := make([]interface{}, len(s.commands)) - for i, cmd := range s.commands { - src, err := cmd.Source() - if err != nil { - return nil, err - } - commands[i] = map[string]interface{}{ - cmd.Name(): src, - } - } - query := make(map[string]interface{}) - query["commands"] = commands - body = query - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ClusterRerouteResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ClusterRerouteResponse is the response of ClusterRerouteService.Do. -type ClusterRerouteResponse struct { - State *ClusterStateResponse `json:"state"` - Explanations []RerouteExplanation `json:"explanations,omitempty"` -} - -// RerouteExplanation is returned in ClusterRerouteResponse if -// the "explain" parameter is set to "true". -type RerouteExplanation struct { - Command string `json:"command"` - Parameters map[string]interface{} `json:"parameters"` - Decisions []RerouteDecision `json:"decisions"` -} - -// RerouteDecision is a decision the decider made while rerouting. -type RerouteDecision interface{} - -// -- Allocation commands -- - -// AllocationCommand is a command to be executed in a call -// to Cluster Reroute API. -type AllocationCommand interface { - Name() string - Source() (interface{}, error) -} - -var _ AllocationCommand = (*MoveAllocationCommand)(nil) - -// MoveAllocationCommand moves a shard from a specific node to -// another node. -type MoveAllocationCommand struct { - index string - shardId int - fromNode string - toNode string -} - -// NewMoveAllocationCommand creates a new MoveAllocationCommand. -func NewMoveAllocationCommand(index string, shardId int, fromNode, toNode string) *MoveAllocationCommand { - return &MoveAllocationCommand{ - index: index, - shardId: shardId, - fromNode: fromNode, - toNode: toNode, - } -} - -// Name of the command in a request to the Cluster Reroute API. -func (cmd *MoveAllocationCommand) Name() string { return "move" } - -// Source generates the (inner) JSON to be used when serializing the command. -func (cmd *MoveAllocationCommand) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["index"] = cmd.index - source["shard"] = cmd.shardId - source["from_node"] = cmd.fromNode - source["to_node"] = cmd.toNode - return source, nil -} - -var _ AllocationCommand = (*CancelAllocationCommand)(nil) - -// CancelAllocationCommand cancels relocation, or recovery of a given shard on a node. -type CancelAllocationCommand struct { - index string - shardId int - node string - allowPrimary bool -} - -// NewCancelAllocationCommand creates a new CancelAllocationCommand. -func NewCancelAllocationCommand(index string, shardId int, node string, allowPrimary bool) *CancelAllocationCommand { - return &CancelAllocationCommand{ - index: index, - shardId: shardId, - node: node, - allowPrimary: allowPrimary, - } -} - -// Name of the command in a request to the Cluster Reroute API. -func (cmd *CancelAllocationCommand) Name() string { return "cancel" } - -// Source generates the (inner) JSON to be used when serializing the command. -func (cmd *CancelAllocationCommand) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["index"] = cmd.index - source["shard"] = cmd.shardId - source["node"] = cmd.node - source["allow_primary"] = cmd.allowPrimary - return source, nil -} - -var _ AllocationCommand = (*AllocateStalePrimaryAllocationCommand)(nil) - -// AllocateStalePrimaryAllocationCommand allocates an unassigned stale -// primary shard to a specific node. Use with extreme care as this will -// result in data loss. Allocation deciders are ignored. -type AllocateStalePrimaryAllocationCommand struct { - index string - shardId int - node string - acceptDataLoss bool -} - -// NewAllocateStalePrimaryAllocationCommand creates a new -// AllocateStalePrimaryAllocationCommand. -func NewAllocateStalePrimaryAllocationCommand(index string, shardId int, node string, acceptDataLoss bool) *AllocateStalePrimaryAllocationCommand { - return &AllocateStalePrimaryAllocationCommand{ - index: index, - shardId: shardId, - node: node, - acceptDataLoss: acceptDataLoss, - } -} - -// Name of the command in a request to the Cluster Reroute API. -func (cmd *AllocateStalePrimaryAllocationCommand) Name() string { return "allocate_stale_primary" } - -// Source generates the (inner) JSON to be used when serializing the command. -func (cmd *AllocateStalePrimaryAllocationCommand) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["index"] = cmd.index - source["shard"] = cmd.shardId - source["node"] = cmd.node - source["accept_data_loss"] = cmd.acceptDataLoss - return source, nil -} - -var _ AllocationCommand = (*AllocateReplicaAllocationCommand)(nil) - -// AllocateReplicaAllocationCommand allocates an unassigned replica shard -// to a specific node. Checks if allocation deciders allow allocation. -type AllocateReplicaAllocationCommand struct { - index string - shardId int - node string -} - -// NewAllocateReplicaAllocationCommand creates a new -// AllocateReplicaAllocationCommand. -func NewAllocateReplicaAllocationCommand(index string, shardId int, node string) *AllocateReplicaAllocationCommand { - return &AllocateReplicaAllocationCommand{ - index: index, - shardId: shardId, - node: node, - } -} - -// Name of the command in a request to the Cluster Reroute API. -func (cmd *AllocateReplicaAllocationCommand) Name() string { return "allocate_replica" } - -// Source generates the (inner) JSON to be used when serializing the command. -func (cmd *AllocateReplicaAllocationCommand) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["index"] = cmd.index - source["shard"] = cmd.shardId - source["node"] = cmd.node - return source, nil -} - -// AllocateEmptyPrimaryAllocationCommand allocates an unassigned empty -// primary shard to a specific node. Use with extreme care as this will -// result in data loss. Allocation deciders are ignored. -type AllocateEmptyPrimaryAllocationCommand struct { - index string - shardId int - node string - acceptDataLoss bool -} - -// NewAllocateEmptyPrimaryAllocationCommand creates a new -// AllocateEmptyPrimaryAllocationCommand. -func NewAllocateEmptyPrimaryAllocationCommand(index string, shardId int, node string, acceptDataLoss bool) *AllocateEmptyPrimaryAllocationCommand { - return &AllocateEmptyPrimaryAllocationCommand{ - index: index, - shardId: shardId, - node: node, - acceptDataLoss: acceptDataLoss, - } -} - -// Name of the command in a request to the Cluster Reroute API. -func (cmd *AllocateEmptyPrimaryAllocationCommand) Name() string { return "allocate_empty_primary" } - -// Source generates the (inner) JSON to be used when serializing the command. -func (cmd *AllocateEmptyPrimaryAllocationCommand) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["index"] = cmd.index - source["shard"] = cmd.shardId - source["node"] = cmd.node - source["accept_data_loss"] = cmd.acceptDataLoss - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/cluster_state.go b/vendor/github.com/olivere/elastic/v7/cluster_state.go deleted file mode 100644 index 7d30086..0000000 --- a/vendor/github.com/olivere/elastic/v7/cluster_state.go +++ /dev/null @@ -1,355 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// ClusterStateService allows to get a comprehensive state information of the whole cluster. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-state.html -// for details. -type ClusterStateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - indices []string - metrics []string - allowNoIndices *bool - expandWildcards string - flatSettings *bool - ignoreUnavailable *bool - local *bool - masterTimeout string -} - -// NewClusterStateService creates a new ClusterStateService. -func NewClusterStateService(client *Client) *ClusterStateService { - return &ClusterStateService{ - client: client, - indices: make([]string, 0), - metrics: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ClusterStateService) Pretty(pretty bool) *ClusterStateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ClusterStateService) Human(human bool) *ClusterStateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ClusterStateService) ErrorTrace(errorTrace bool) *ClusterStateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ClusterStateService) FilterPath(filterPath ...string) *ClusterStateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ClusterStateService) Header(name string, value string) *ClusterStateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ClusterStateService) Headers(headers http.Header) *ClusterStateService { - s.headers = headers - return s -} - -// Index is a list of index names. Use _all or an empty string to -// perform the operation on all indices. -func (s *ClusterStateService) Index(indices ...string) *ClusterStateService { - s.indices = append(s.indices, indices...) - return s -} - -// Metric limits the information returned to the specified metric. -// It can be one of: version, master_node, nodes, routing_table, metadata, -// blocks, or customs. -func (s *ClusterStateService) Metric(metrics ...string) *ClusterStateService { - s.metrics = append(s.metrics, metrics...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *ClusterStateService) AllowNoIndices(allowNoIndices bool) *ClusterStateService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *ClusterStateService) ExpandWildcards(expandWildcards string) *ClusterStateService { - s.expandWildcards = expandWildcards - return s -} - -// FlatSettings, when set, returns settings in flat format (default: false). -func (s *ClusterStateService) FlatSettings(flatSettings bool) *ClusterStateService { - s.flatSettings = &flatSettings - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *ClusterStateService) IgnoreUnavailable(ignoreUnavailable bool) *ClusterStateService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Local indicates whether to return local information. When set, it does not -// retrieve the state from master node (default: false). -func (s *ClusterStateService) Local(local bool) *ClusterStateService { - s.local = &local - return s -} - -// MasterTimeout specifies timeout for connection to master. -func (s *ClusterStateService) MasterTimeout(masterTimeout string) *ClusterStateService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *ClusterStateService) buildURL() (string, url.Values, error) { - // Build URL - metrics := strings.Join(s.metrics, ",") - if metrics == "" { - metrics = "_all" - } - indices := strings.Join(s.indices, ",") - if indices == "" { - indices = "_all" - } - path, err := uritemplates.Expand("/_cluster/state/{metrics}/{indices}", map[string]string{ - "metrics": metrics, - "indices": indices, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClusterStateService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *ClusterStateService) Do(ctx context.Context) (*ClusterStateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ClusterStateResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ClusterStateResponse is the response of ClusterStateService.Do. -type ClusterStateResponse struct { - ClusterName string `json:"cluster_name"` - ClusterUUID string `json:"cluster_uuid"` - Version int64 `json:"version"` - StateUUID string `json:"state_uuid"` - MasterNode string `json:"master_node"` - Blocks map[string]*clusterBlocks `json:"blocks"` - Nodes map[string]*discoveryNode `json:"nodes"` - Metadata *clusterStateMetadata `json:"metadata"` - RoutingTable *clusterStateRoutingTable `json:"routing_table"` - RoutingNodes *clusterStateRoutingNode `json:"routing_nodes"` - Customs map[string]interface{} `json:"customs"` -} - -type clusterBlocks struct { - Global map[string]*clusterBlock `json:"global"` // id -> cluster block - Indices map[string]*clusterBlock `json:"indices"` // index name -> cluster block -} - -type clusterBlock struct { - Description string `json:"description"` - Retryable bool `json:"retryable"` - DisableStatePersistence bool `json:"disable_state_persistence"` - Levels []string `json:"levels"` -} - -type clusterStateMetadata struct { - ClusterUUID string `json:"cluster_uuid"` - ClusterUUIDCommitted string `json:"cluster_uuid_committed"` - ClusterCoordination *clusterCoordinationMetaData `json:"cluster_coordination"` - Templates map[string]*indexTemplateMetaData `json:"templates"` // template name -> index template metadata - Indices map[string]*indexMetaData `json:"indices"` // index name _> meta data - RoutingTable struct { - Indices map[string]*indexRoutingTable `json:"indices"` // index name -> routing table - } `json:"routing_table"` - RoutingNodes struct { - Unassigned []*shardRouting `json:"unassigned"` - Nodes []*shardRouting `json:"nodes"` - } `json:"routing_nodes"` - Customs map[string]interface{} `json:"customs"` - Ingest map[string]interface{} `json:"ingest"` - StoredScripts map[string]interface{} `json:"stored_scripts"` - IndexGraveyard map[string]interface{} `json:"index-graveyard"` -} - -type clusterCoordinationMetaData struct { - Term int64 `json:"term"` - LastCommittedConfig interface{} `json:"last_committed_config,omitempty"` - LastAcceptedConfig interface{} `json:"last_accepted_config,omitempty"` - VotingConfigExclusions []interface{} `json:"voting_config_exclusions,omitempty"` -} - -type discoveryNode struct { - Name string `json:"name"` // server name, e.g. "es1" - EphemeralID string `json:"ephemeral_id"` // e.g. "paHSLpn6QyuVy_n-GM1JAQ" - TransportAddress string `json:"transport_address"` // e.g. inet[/1.2.3.4:9300] - Attributes map[string]interface{} `json:"attributes"` // e.g. { "data": true, "master": true } -} - -type clusterStateRoutingTable struct { - Indices map[string]interface{} `json:"indices"` -} - -type clusterStateRoutingNode struct { - Unassigned []*shardRouting `json:"unassigned"` - // Node Id -> shardRouting - Nodes map[string][]*shardRouting `json:"nodes"` -} - -type indexTemplateMetaData struct { - IndexPatterns []string `json:"index_patterns"` // e.g. ["store-*"] - Order int `json:"order"` - Settings map[string]interface{} `json:"settings"` // index settings - Mappings map[string]interface{} `json:"mappings"` // type name -> mapping -} - -type indexMetaData struct { - State string `json:"state"` - Settings map[string]interface{} `json:"settings"` - Mappings map[string]interface{} `json:"mappings"` - Aliases []string `json:"aliases"` // e.g. [ "alias1", "alias2" ] - PrimaryTerms map[string]interface{} `json:"primary_terms"` - InSyncAllocations map[string]interface{} `json:"in_sync_allocations"` -} - -type indexRoutingTable struct { - Shards map[string]*shardRouting `json:"shards"` -} - -type shardRouting struct { - State string `json:"state"` - Primary bool `json:"primary"` - Node string `json:"node"` - RelocatingNode string `json:"relocating_node"` - Shard int `json:"shard"` - Index string `json:"index"` - Version int64 `json:"version"` - RestoreSource *RestoreSource `json:"restore_source"` - AllocationId *allocationId `json:"allocation_id"` - UnassignedInfo *unassignedInfo `json:"unassigned_info"` -} - -type RestoreSource struct { - Repository string `json:"repository"` - Snapshot string `json:"snapshot"` - Version string `json:"version"` - Index string `json:"index"` -} - -type allocationId struct { - Id string `json:"id"` - RelocationId string `json:"relocation_id"` -} - -type unassignedInfo struct { - Reason string `json:"reason"` - At string `json:"at"` - Details string `json:"details"` -} diff --git a/vendor/github.com/olivere/elastic/v7/cluster_stats.go b/vendor/github.com/olivere/elastic/v7/cluster_stats.go deleted file mode 100644 index 711c2a7..0000000 --- a/vendor/github.com/olivere/elastic/v7/cluster_stats.go +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// ClusterStatsService is documented at -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-stats.html. -type ClusterStatsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - nodeId []string - flatSettings *bool -} - -// NewClusterStatsService creates a new ClusterStatsService. -func NewClusterStatsService(client *Client) *ClusterStatsService { - return &ClusterStatsService{ - client: client, - nodeId: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ClusterStatsService) Pretty(pretty bool) *ClusterStatsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ClusterStatsService) Human(human bool) *ClusterStatsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ClusterStatsService) ErrorTrace(errorTrace bool) *ClusterStatsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ClusterStatsService) FilterPath(filterPath ...string) *ClusterStatsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ClusterStatsService) Header(name string, value string) *ClusterStatsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ClusterStatsService) Headers(headers http.Header) *ClusterStatsService { - s.headers = headers - return s -} - -// NodeId is documented as: A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes. -func (s *ClusterStatsService) NodeId(nodeId []string) *ClusterStatsService { - s.nodeId = nodeId - return s -} - -// FlatSettings is documented as: Return settings in flat format (default: false). -func (s *ClusterStatsService) FlatSettings(flatSettings bool) *ClusterStatsService { - s.flatSettings = &flatSettings - return s -} - -// buildURL builds the URL for the operation. -func (s *ClusterStatsService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - - if len(s.nodeId) > 0 { - path, err = uritemplates.Expand("/_cluster/stats/nodes/{node_id}", map[string]string{ - "node_id": strings.Join(s.nodeId, ","), - }) - if err != nil { - return "", url.Values{}, err - } - } else { - path, err = uritemplates.Expand("/_cluster/stats", map[string]string{}) - if err != nil { - return "", url.Values{}, err - } - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ClusterStatsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *ClusterStatsService) Do(ctx context.Context) (*ClusterStatsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ClusterStatsResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ClusterStatsResponse is the response of ClusterStatsService.Do. -type ClusterStatsResponse struct { - NodesStats *ShardsInfo `json:"_nodes,omitempty"` - Timestamp int64 `json:"timestamp"` - ClusterName string `json:"cluster_name"` - ClusterUUID string `json:"cluster_uuid"` - Status string `json:"status,omitempty"` // e.g. green - Indices *ClusterStatsIndices `json:"indices"` - Nodes *ClusterStatsNodes `json:"nodes"` -} - -type ClusterStatsIndices struct { - Count int `json:"count"` // number of indices - Shards *ClusterStatsIndicesShards `json:"shards"` - Docs *ClusterStatsIndicesDocs `json:"docs"` - Store *ClusterStatsIndicesStore `json:"store"` - FieldData *ClusterStatsIndicesFieldData `json:"fielddata"` - QueryCache *ClusterStatsIndicesQueryCache `json:"query_cache"` - Completion *ClusterStatsIndicesCompletion `json:"completion"` - Segments *IndexStatsSegments `json:"segments"` -} - -type ClusterStatsIndicesShards struct { - Total int `json:"total"` - Primaries int `json:"primaries"` - Replication float64 `json:"replication"` - Index *ClusterStatsIndicesShardsIndex `json:"index"` -} - -type ClusterStatsIndicesShardsIndex struct { - Shards *ClusterStatsIndicesShardsIndexIntMinMax `json:"shards"` - Primaries *ClusterStatsIndicesShardsIndexIntMinMax `json:"primaries"` - Replication *ClusterStatsIndicesShardsIndexFloat64MinMax `json:"replication"` -} - -type ClusterStatsIndicesShardsIndexIntMinMax struct { - Min int `json:"min"` - Max int `json:"max"` - Avg float64 `json:"avg"` -} - -type ClusterStatsIndicesShardsIndexFloat64MinMax struct { - Min float64 `json:"min"` - Max float64 `json:"max"` - Avg float64 `json:"avg"` -} - -type ClusterStatsIndicesDocs struct { - Count int `json:"count"` - Deleted int `json:"deleted"` -} - -type ClusterStatsIndicesStore struct { - Size string `json:"size"` // e.g. "5.3gb" - SizeInBytes int64 `json:"size_in_bytes"` -} - -type ClusterStatsIndicesFieldData struct { - MemorySize string `json:"memory_size"` // e.g. "61.3kb" - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - Evictions int64 `json:"evictions"` - Fields map[string]struct { - MemorySize string `json:"memory_size"` // e.g. "61.3kb" - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - } `json:"fields,omitempty"` -} - -type ClusterStatsIndicesQueryCache struct { - MemorySize string `json:"memory_size"` // e.g. "61.3kb" - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - TotalCount int64 `json:"total_count"` - HitCount int64 `json:"hit_count"` - MissCount int64 `json:"miss_count"` - CacheSize int64 `json:"cache_size"` - CacheCount int64 `json:"cache_count"` - Evictions int64 `json:"evictions"` -} - -type ClusterStatsIndicesCompletion struct { - Size string `json:"size"` // e.g. "61.3kb" - SizeInBytes int64 `json:"size_in_bytes"` - Fields map[string]struct { - Size string `json:"size"` // e.g. "61.3kb" - SizeInBytes int64 `json:"size_in_bytes"` - } `json:"fields,omitempty"` -} - -type ClusterStatsIndicesSegmentsFile struct { - Size string `json:"size"` // e.g. "61.3kb" - SizeInBytes int64 `json:"size_in_bytes"` - Description string `json:"description,omitempty"` -} - -// --- - -type ClusterStatsNodes struct { - Count *ClusterStatsNodesCount `json:"count"` - Versions []string `json:"versions"` - OS *ClusterStatsNodesOsStats `json:"os"` - Process *ClusterStatsNodesProcessStats `json:"process"` - JVM *ClusterStatsNodesJvmStats `json:"jvm"` - FS *ClusterStatsNodesFsStats `json:"fs"` - Plugins []*ClusterStatsNodesPlugin `json:"plugins"` - - NetworkTypes *ClusterStatsNodesNetworkTypes `json:"network_types"` - DiscoveryTypes *ClusterStatsNodesDiscoveryTypes `json:"discovery_types"` - PackagingTypes *ClusterStatsNodesPackagingTypes `json:"packaging_types"` -} - -type ClusterStatsNodesCount struct { - Total int `json:"total"` - Data int `json:"data"` - CoordinatingOnly int `json:"coordinating_only"` - Master int `json:"master"` - Ingest int `json:"ingest"` -} - -type ClusterStatsNodesOsStats struct { - AvailableProcessors int `json:"available_processors"` - AllocatedProcessors int `json:"allocated_processors"` - Names []struct { - Name string `json:"name"` - Value int `json:"count"` - } `json:"names"` - PrettyNames []struct { - PrettyName string `json:"pretty_name"` - Value int `json:"count"` - } `json:"pretty_names"` - Mem *ClusterStatsNodesOsStatsMem `json:"mem"` - // CPU []*ClusterStatsNodesOsStatsCPU `json:"cpu"` -} - -type ClusterStatsNodesOsStatsMem struct { - Total string `json:"total"` // e.g. "16gb" - TotalInBytes int64 `json:"total_in_bytes"` - Free string `json:"free"` // e.g. "12gb" - FreeInBytes int64 `json:"free_in_bytes"` - Used string `json:"used"` // e.g. "4gb" - UsedInBytes int64 `json:"used_in_bytes"` - FreePercent int `json:"free_percent"` - UsedPercent int `json:"used_percent"` -} - -type ClusterStatsNodesOsStatsCPU struct { - Vendor string `json:"vendor"` - Model string `json:"model"` - MHz int `json:"mhz"` - TotalCores int `json:"total_cores"` - TotalSockets int `json:"total_sockets"` - CoresPerSocket int `json:"cores_per_socket"` - CacheSize string `json:"cache_size"` // e.g. "256b" - CacheSizeInBytes int64 `json:"cache_size_in_bytes"` - Count int `json:"count"` -} - -type ClusterStatsNodesProcessStats struct { - CPU *ClusterStatsNodesProcessStatsCPU `json:"cpu"` - OpenFileDescriptors *ClusterStatsNodesProcessStatsOpenFileDescriptors `json:"open_file_descriptors"` -} - -type ClusterStatsNodesProcessStatsCPU struct { - Percent float64 `json:"percent"` -} - -type ClusterStatsNodesProcessStatsOpenFileDescriptors struct { - Min int64 `json:"min"` - Max int64 `json:"max"` - Avg int64 `json:"avg"` -} - -type ClusterStatsNodesJvmStats struct { - MaxUptime string `json:"max_uptime"` // e.g. "5h" - MaxUptimeInMillis int64 `json:"max_uptime_in_millis"` - Versions []*ClusterStatsNodesJvmStatsVersion `json:"versions"` - Mem *ClusterStatsNodesJvmStatsMem `json:"mem"` - Threads int64 `json:"threads"` -} - -type ClusterStatsNodesJvmStatsVersion struct { - Version string `json:"version"` // e.g. "1.8.0_45" - VMName string `json:"vm_name"` // e.g. "Java HotSpot(TM) 64-Bit Server VM" - VMVersion string `json:"vm_version"` // e.g. "25.45-b02" - VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" - BundledJDK bool `json:"bundled_jdk"` - UsingBundledJDK bool `json:"using_bundled_jdk"` - Count int `json:"count"` -} - -type ClusterStatsNodesJvmStatsMem struct { - HeapUsed string `json:"heap_used"` - HeapUsedInBytes int64 `json:"heap_used_in_bytes"` - HeapMax string `json:"heap_max"` - HeapMaxInBytes int64 `json:"heap_max_in_bytes"` -} - -type ClusterStatsNodesFsStats struct { - Path string `json:"path"` - Mount string `json:"mount"` - Dev string `json:"dev"` - Total string `json:"total"` // e.g. "930.7gb"` - TotalInBytes int64 `json:"total_in_bytes"` - Free string `json:"free"` // e.g. "930.7gb"` - FreeInBytes int64 `json:"free_in_bytes"` - Available string `json:"available"` // e.g. "930.7gb"` - AvailableInBytes int64 `json:"available_in_bytes"` - DiskReads int64 `json:"disk_reads"` - DiskWrites int64 `json:"disk_writes"` - DiskIOOp int64 `json:"disk_io_op"` - DiskReadSize string `json:"disk_read_size"` // e.g. "0b"` - DiskReadSizeInBytes int64 `json:"disk_read_size_in_bytes"` - DiskWriteSize string `json:"disk_write_size"` // e.g. "0b"` - DiskWriteSizeInBytes int64 `json:"disk_write_size_in_bytes"` - DiskIOSize string `json:"disk_io_size"` // e.g. "0b"` - DiskIOSizeInBytes int64 `json:"disk_io_size_in_bytes"` - DiskQueue string `json:"disk_queue"` - DiskServiceTime string `json:"disk_service_time"` -} - -type ClusterStatsNodesPlugin struct { - Name string `json:"name"` - Version string `json:"version"` - Description string `json:"description"` - URL string `json:"url"` - JVM bool `json:"jvm"` - Site bool `json:"site"` -} - -type ClusterStatsNodesNetworkTypes struct { - TransportTypes map[string]interface{} `json:"transport_types"` // e.g. "netty4": 1 - HTTPTypes map[string]interface{} `json:"http_types"` // e.g. "netty4": 1 -} - -type ClusterStatsNodesDiscoveryTypes interface{} - -type ClusterStatsNodesPackagingTypes []*ClusterStatsNodesPackagingType - -type ClusterStatsNodesPackagingType struct { - Flavor string `json:"flavor"` // e.g. "oss" - Type string `json:"type"` // e.g. "docker" - Count int `json:"count"` // e.g. 1 -} diff --git a/vendor/github.com/olivere/elastic/v7/config/config.go b/vendor/github.com/olivere/elastic/v7/config/config.go deleted file mode 100644 index 208eb2b..0000000 --- a/vendor/github.com/olivere/elastic/v7/config/config.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package config - -import ( - "fmt" - "net/url" - "strconv" - "strings" -) - -// Config represents an Elasticsearch configuration. -type Config struct { - URL string - Index string - Username string - Password string - Shards int - Replicas int - Sniff *bool - Healthcheck *bool - Infolog string - Errorlog string - Tracelog string -} - -// Parse returns the Elasticsearch configuration by extracting it -// from the URL, its path, and its query string. -// -// Example: -// http://127.0.0.1:9200/store-blobs?shards=1&replicas=0&sniff=false&tracelog=elastic.trace.log -// -// The code above will return a URL of http://127.0.0.1:9200, an index name -// of store-blobs, and the related settings from the query string. -func Parse(elasticURL string) (*Config, error) { - cfg := &Config{ - Shards: 1, - Replicas: 0, - Sniff: nil, - } - - uri, err := url.Parse(elasticURL) - if err != nil { - return nil, fmt.Errorf("error parsing elastic parameter %q: %v", elasticURL, err) - } - index := strings.TrimSuffix(strings.TrimPrefix(uri.Path, "/"), "/") - if uri.User != nil { - cfg.Username = uri.User.Username() - cfg.Password, _ = uri.User.Password() - } - uri.User = nil - - if i, err := strconv.Atoi(uri.Query().Get("shards")); err == nil { - cfg.Shards = i - } - if i, err := strconv.Atoi(uri.Query().Get("replicas")); err == nil { - cfg.Replicas = i - } - if s := uri.Query().Get("sniff"); s != "" { - if b, err := strconv.ParseBool(s); err == nil { - cfg.Sniff = &b - } - } - if s := uri.Query().Get("healthcheck"); s != "" { - if b, err := strconv.ParseBool(s); err == nil { - cfg.Healthcheck = &b - } - } - if s := uri.Query().Get("infolog"); s != "" { - cfg.Infolog = s - } - if s := uri.Query().Get("errorlog"); s != "" { - cfg.Errorlog = s - } - if s := uri.Query().Get("tracelog"); s != "" { - cfg.Tracelog = s - } - - uri.Path = "" - uri.RawQuery = "" - cfg.URL = uri.String() - cfg.Index = index - - return cfg, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/config/doc.go b/vendor/github.com/olivere/elastic/v7/config/doc.go deleted file mode 100644 index c9acd5f..0000000 --- a/vendor/github.com/olivere/elastic/v7/config/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -/* -Package config allows parsing a configuration for Elasticsearch -from a URL. -*/ -package config diff --git a/vendor/github.com/olivere/elastic/v7/connection.go b/vendor/github.com/olivere/elastic/v7/connection.go deleted file mode 100644 index 0f27a87..0000000 --- a/vendor/github.com/olivere/elastic/v7/connection.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "sync" - "time" -) - -// conn represents a single connection to a node in a cluster. -type conn struct { - sync.RWMutex - nodeID string // node ID - url string - failures int - dead bool - deadSince *time.Time -} - -// newConn creates a new connection to the given URL. -func newConn(nodeID, url string) *conn { - c := &conn{ - nodeID: nodeID, - url: url, - } - return c -} - -// String returns a representation of the connection status. -func (c *conn) String() string { - c.RLock() - defer c.RUnlock() - return fmt.Sprintf("%s [dead=%v,failures=%d,deadSince=%v]", c.url, c.dead, c.failures, c.deadSince) -} - -// NodeID returns the ID of the node of this connection. -func (c *conn) NodeID() string { - c.RLock() - defer c.RUnlock() - return c.nodeID -} - -// URL returns the URL of this connection. -func (c *conn) URL() string { - c.RLock() - defer c.RUnlock() - return c.url -} - -// IsDead returns true if this connection is marked as dead, i.e. a previous -// request to the URL has been unsuccessful. -func (c *conn) IsDead() bool { - c.RLock() - defer c.RUnlock() - return c.dead -} - -// MarkAsDead marks this connection as dead, increments the failures -// counter and stores the current time in dead since. -func (c *conn) MarkAsDead() { - c.Lock() - c.dead = true - if c.deadSince == nil { - utcNow := time.Now().UTC() - c.deadSince = &utcNow - } - c.failures += 1 - c.Unlock() -} - -// MarkAsAlive marks this connection as eligible to be returned from the -// pool of connections by the selector. -func (c *conn) MarkAsAlive() { - c.Lock() - c.dead = false - c.Unlock() -} - -// MarkAsHealthy marks this connection as healthy, i.e. a request has been -// successfully performed with it. -func (c *conn) MarkAsHealthy() { - c.Lock() - c.dead = false - c.deadSince = nil - c.failures = 0 - c.Unlock() -} diff --git a/vendor/github.com/olivere/elastic/v7/count.go b/vendor/github.com/olivere/elastic/v7/count.go deleted file mode 100644 index d1c75be..0000000 --- a/vendor/github.com/olivere/elastic/v7/count.go +++ /dev/null @@ -1,381 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// CountService is a convenient service for determining the -// number of documents in an index. Use SearchService with -// a SearchType of count for counting with queries etc. -type CountService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - typ []string - allowNoIndices *bool - analyzeWildcard *bool - analyzer string - defaultOperator string - df string - expandWildcards string - ignoreUnavailable *bool - lenient *bool - lowercaseExpandedTerms *bool - minScore interface{} - preference string - q string - query Query - routing string - terminateAfter *int - bodyJson interface{} - bodyString string -} - -// NewCountService creates a new CountService. -func NewCountService(client *Client) *CountService { - return &CountService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *CountService) Pretty(pretty bool) *CountService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *CountService) Human(human bool) *CountService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *CountService) ErrorTrace(errorTrace bool) *CountService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *CountService) FilterPath(filterPath ...string) *CountService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *CountService) Header(name string, value string) *CountService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *CountService) Headers(headers http.Header) *CountService { - s.headers = headers - return s -} - -// Index sets the names of the indices to restrict the results. -func (s *CountService) Index(index ...string) *CountService { - if s.index == nil { - s.index = make([]string, 0) - } - s.index = append(s.index, index...) - return s -} - -// Type sets the types to use to restrict the results. -// -// Deprecated: Types are in the process of being removed. Instead of using a type, prefer to -// filter on a field on the document. -func (s *CountService) Type(typ ...string) *CountService { - if s.typ == nil { - s.typ = make([]string, 0) - } - s.typ = append(s.typ, typ...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes "_all" string -// or when no indices have been specified). -func (s *CountService) AllowNoIndices(allowNoIndices bool) *CountService { - s.allowNoIndices = &allowNoIndices - return s -} - -// AnalyzeWildcard specifies whether wildcard and prefix queries should be -// analyzed (default: false). -func (s *CountService) AnalyzeWildcard(analyzeWildcard bool) *CountService { - s.analyzeWildcard = &analyzeWildcard - return s -} - -// Analyzer specifies the analyzer to use for the query string. -func (s *CountService) Analyzer(analyzer string) *CountService { - s.analyzer = analyzer - return s -} - -// DefaultOperator specifies the default operator for query string query (AND or OR). -func (s *CountService) DefaultOperator(defaultOperator string) *CountService { - s.defaultOperator = defaultOperator - return s -} - -// Df specifies the field to use as default where no field prefix is given -// in the query string. -func (s *CountService) Df(df string) *CountService { - s.df = df - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *CountService) ExpandWildcards(expandWildcards string) *CountService { - s.expandWildcards = expandWildcards - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *CountService) IgnoreUnavailable(ignoreUnavailable bool) *CountService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Lenient specifies whether format-based query failures (such as -// providing text to a numeric field) should be ignored. -func (s *CountService) Lenient(lenient bool) *CountService { - s.lenient = &lenient - return s -} - -// LowercaseExpandedTerms specifies whether query terms should be lowercased. -func (s *CountService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *CountService { - s.lowercaseExpandedTerms = &lowercaseExpandedTerms - return s -} - -// MinScore indicates to include only documents with a specific `_score` -// value in the result. -func (s *CountService) MinScore(minScore interface{}) *CountService { - s.minScore = minScore - return s -} - -// Preference specifies the node or shard the operation should be -// performed on (default: random). -func (s *CountService) Preference(preference string) *CountService { - s.preference = preference - return s -} - -// Q in the Lucene query string syntax. You can also use Query to pass -// a Query struct. -func (s *CountService) Q(q string) *CountService { - s.q = q - return s -} - -// Query specifies the query to pass. You can also pass a query string with Q. -func (s *CountService) Query(query Query) *CountService { - s.query = query - return s -} - -// Routing specifies the routing value. -func (s *CountService) Routing(routing string) *CountService { - s.routing = routing - return s -} - -// TerminateAfter indicates the maximum count for each shard, upon reaching -// which the query execution will terminate early. -func (s *CountService) TerminateAfter(terminateAfter int) *CountService { - s.terminateAfter = &terminateAfter - return s -} - -// BodyJson specifies the query to restrict the results specified with the -// Query DSL (optional). The interface{} will be serialized to a JSON document, -// so use a map[string]interface{}. -func (s *CountService) BodyJson(body interface{}) *CountService { - s.bodyJson = body - return s -} - -// Body specifies a query to restrict the results specified with -// the Query DSL (optional). -func (s *CountService) BodyString(body string) *CountService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *CountService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) > 0 && len(s.typ) > 0 { - path, err = uritemplates.Expand("/{index}/{type}/_count", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - } else if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_count", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else if len(s.typ) > 0 { - path, err = uritemplates.Expand("/_all/{type}/_count", map[string]string{ - "type": strings.Join(s.typ, ","), - }) - } else { - path = "/_all/_count" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.analyzeWildcard != nil { - params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) - } - if s.analyzer != "" { - params.Set("analyzer", s.analyzer) - } - if s.defaultOperator != "" { - params.Set("default_operator", s.defaultOperator) - } - if s.df != "" { - params.Set("df", s.df) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.lenient != nil { - params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) - } - if s.lowercaseExpandedTerms != nil { - params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) - } - if s.minScore != nil { - params.Set("min_score", fmt.Sprintf("%v", s.minScore)) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.q != "" { - params.Set("q", s.q) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.terminateAfter != nil { - params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *CountService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *CountService) Do(ctx context.Context) (int64, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return 0, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return 0, err - } - - // Setup HTTP request body - var body interface{} - if s.query != nil { - src, err := s.query.Source() - if err != nil { - return 0, err - } - query := make(map[string]interface{}) - query["query"] = src - body = query - } else if s.bodyJson != nil { - body = s.bodyJson - } else if s.bodyString != "" { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return 0, err - } - - // Return result - ret := new(CountResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return 0, err - } - if ret != nil { - return ret.Count, nil - } - - return int64(0), nil -} - -// CountResponse is the response of using the Count API. -type CountResponse struct { - Count int64 `json:"count"` - TerminatedEarly bool `json:"terminated_early,omitempty"` - Shards *ShardsInfo `json:"_shards,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/decoder.go b/vendor/github.com/olivere/elastic/v7/decoder.go deleted file mode 100644 index 9133ccd..0000000 --- a/vendor/github.com/olivere/elastic/v7/decoder.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "encoding/json" -) - -// Decoder is used to decode responses from Elasticsearch. -// Users of elastic can implement their own marshaler for advanced purposes -// and set them per Client (see SetDecoder). If none is specified, -// DefaultDecoder is used. -type Decoder interface { - Decode(data []byte, v interface{}) error -} - -// DefaultDecoder uses json.Unmarshal from the Go standard library -// to decode JSON data. -type DefaultDecoder struct{} - -// Decode decodes with json.Unmarshal from the Go standard library. -func (u *DefaultDecoder) Decode(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -// NumberDecoder uses json.NewDecoder, with UseNumber() enabled, from -// the Go standard library to decode JSON data. -type NumberDecoder struct{} - -// Decode decodes with json.Unmarshal from the Go standard library. -func (u *NumberDecoder) Decode(data []byte, v interface{}) error { - dec := json.NewDecoder(bytes.NewReader(data)) - dec.UseNumber() - return dec.Decode(v) -} diff --git a/vendor/github.com/olivere/elastic/v7/delete.go b/vendor/github.com/olivere/elastic/v7/delete.go deleted file mode 100644 index f694701..0000000 --- a/vendor/github.com/olivere/elastic/v7/delete.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// DeleteService allows to delete a typed JSON document from a specified -// index based on its id. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-delete.html -// for details. -type DeleteService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - index string - typ string - routing string - timeout string - version interface{} - versionType string - waitForActiveShards string - parent string - refresh string - ifSeqNo *int64 - ifPrimaryTerm *int64 -} - -// NewDeleteService creates a new DeleteService. -func NewDeleteService(client *Client) *DeleteService { - return &DeleteService{ - client: client, - typ: "_doc", - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *DeleteService) Pretty(pretty bool) *DeleteService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *DeleteService) Human(human bool) *DeleteService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *DeleteService) ErrorTrace(errorTrace bool) *DeleteService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *DeleteService) FilterPath(filterPath ...string) *DeleteService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *DeleteService) Header(name string, value string) *DeleteService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *DeleteService) Headers(headers http.Header) *DeleteService { - s.headers = headers - return s -} - -// Type is the type of the document. -// -// Deprecated: Types are in the process of being removed. -func (s *DeleteService) Type(typ string) *DeleteService { - s.typ = typ - return s -} - -// Id is the document ID. -func (s *DeleteService) Id(id string) *DeleteService { - s.id = id - return s -} - -// Index is the name of the index. -func (s *DeleteService) Index(index string) *DeleteService { - s.index = index - return s -} - -// Routing is a specific routing value. -func (s *DeleteService) Routing(routing string) *DeleteService { - s.routing = routing - return s -} - -// Timeout is an explicit operation timeout. -func (s *DeleteService) Timeout(timeout string) *DeleteService { - s.timeout = timeout - return s -} - -// Version is an explicit version number for concurrency control. -func (s *DeleteService) Version(version interface{}) *DeleteService { - s.version = version - return s -} - -// VersionType is a specific version type. -func (s *DeleteService) VersionType(versionType string) *DeleteService { - s.versionType = versionType - return s -} - -// WaitForActiveShards sets the number of shard copies that must be active -// before proceeding with the delete operation. Defaults to 1, meaning the -// primary shard only. Set to `all` for all shard copies, otherwise set to -// any non-negative value less than or equal to the total number of copies -// for the shard (number of replicas + 1). -func (s *DeleteService) WaitForActiveShards(waitForActiveShards string) *DeleteService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// Parent is the ID of parent document. -func (s *DeleteService) Parent(parent string) *DeleteService { - s.parent = parent - return s -} - -// Refresh the index after performing the operation. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *DeleteService) Refresh(refresh string) *DeleteService { - s.refresh = refresh - return s -} - -// IfSeqNo indicates to only perform the delete operation if the last -// operation that has changed the document has the specified sequence number. -func (s *DeleteService) IfSeqNo(seqNo int64) *DeleteService { - s.ifSeqNo = &seqNo - return s -} - -// IfPrimaryTerm indicates to only perform the delete operation if the -// last operation that has changed the document has the specified primary term. -func (s *DeleteService) IfPrimaryTerm(primaryTerm int64) *DeleteService { - s.ifPrimaryTerm = &primaryTerm - return s -} - -// buildURL builds the URL for the operation. -func (s *DeleteService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "index": s.index, - "type": s.typ, - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if v := s.version; v != nil { - params.Set("version", fmt.Sprint(v)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if v := s.ifSeqNo; v != nil { - params.Set("if_seq_no", fmt.Sprintf("%d", *v)) - } - if v := s.ifPrimaryTerm; v != nil { - params.Set("if_primary_term", fmt.Sprintf("%d", *v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *DeleteService) Validate() error { - var invalid []string - if s.typ == "" { - invalid = append(invalid, "Type") - } - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. If the document is not found (404), Elasticsearch will -// still return a response. This response is serialized and returned as well. In other -// words, for HTTP status code 404, both an error and a response might be returned. -func (s *DeleteService) Do(ctx context.Context) (*DeleteResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - IgnoreErrors: []int{http.StatusNotFound}, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(DeleteResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - - // If we have a 404, we return both a result and an error, just like ES does - if res.StatusCode == http.StatusNotFound { - return ret, &Error{Status: http.StatusNotFound} - } - - return ret, nil -} - -// -- Result of a delete request. - -// DeleteResponse is the outcome of running DeleteService.Do. -type DeleteResponse struct { - Index string `json:"_index,omitempty"` - Type string `json:"_type,omitempty"` - Id string `json:"_id,omitempty"` - Version int64 `json:"_version,omitempty"` - Result string `json:"result,omitempty"` - Shards *ShardsInfo `json:"_shards,omitempty"` - SeqNo int64 `json:"_seq_no,omitempty"` - PrimaryTerm int64 `json:"_primary_term,omitempty"` - Status int `json:"status,omitempty"` - ForcedRefresh bool `json:"forced_refresh,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/delete_by_query.go b/vendor/github.com/olivere/elastic/v7/delete_by_query.go deleted file mode 100644 index 4c98444..0000000 --- a/vendor/github.com/olivere/elastic/v7/delete_by_query.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// DeleteByQueryService deletes documents that match a query. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-delete-by-query.html. -type DeleteByQueryService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - typ []string - query Query - body interface{} - xSource []string - xSourceExclude []string - xSourceInclude []string - analyzer string - analyzeWildcard *bool - allowNoIndices *bool - conflicts string - defaultOperator string - df string - docvalueFields []string - expandWildcards string - explain *bool - from *int - ignoreUnavailable *bool - lenient *bool - lowercaseExpandedTerms *bool - preference string - q string - refresh string - requestCache *bool - requestsPerSecond *int - routing []string - scroll string - scrollSize *int - searchTimeout string - searchType string - size *int - slices interface{} - sort []string - stats []string - storedFields []string - suggestField string - suggestMode string - suggestSize *int - suggestText string - terminateAfter *int - timeout string - trackScores *bool - version *bool - waitForActiveShards string - waitForCompletion *bool -} - -// NewDeleteByQueryService creates a new DeleteByQueryService. -// You typically use the client's DeleteByQuery to get a reference to -// the service. -func NewDeleteByQueryService(client *Client) *DeleteByQueryService { - builder := &DeleteByQueryService{ - client: client, - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *DeleteByQueryService) Pretty(pretty bool) *DeleteByQueryService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *DeleteByQueryService) Human(human bool) *DeleteByQueryService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *DeleteByQueryService) ErrorTrace(errorTrace bool) *DeleteByQueryService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *DeleteByQueryService) FilterPath(filterPath ...string) *DeleteByQueryService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *DeleteByQueryService) Header(name string, value string) *DeleteByQueryService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *DeleteByQueryService) Headers(headers http.Header) *DeleteByQueryService { - s.headers = headers - return s -} - -// Index sets the indices on which to perform the delete operation. -func (s *DeleteByQueryService) Index(index ...string) *DeleteByQueryService { - s.index = append(s.index, index...) - return s -} - -// Type limits the delete operation to the given types. -// -// Deprecated: Types are in the process of being removed. Instead of -// using a type, prefer to filter on a field of the document. -func (s *DeleteByQueryService) Type(typ ...string) *DeleteByQueryService { - s.typ = append(s.typ, typ...) - return s -} - -// XSource is true or false to return the _source field or not, -// or a list of fields to return. -func (s *DeleteByQueryService) XSource(xSource ...string) *DeleteByQueryService { - s.xSource = append(s.xSource, xSource...) - return s -} - -// XSourceExclude represents a list of fields to exclude from the returned _source field. -func (s *DeleteByQueryService) XSourceExclude(xSourceExclude ...string) *DeleteByQueryService { - s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) - return s -} - -// XSourceInclude represents a list of fields to extract and return from the _source field. -func (s *DeleteByQueryService) XSourceInclude(xSourceInclude ...string) *DeleteByQueryService { - s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) - return s -} - -// Analyzer to use for the query string. -func (s *DeleteByQueryService) Analyzer(analyzer string) *DeleteByQueryService { - s.analyzer = analyzer - return s -} - -// AnalyzeWildcard specifies whether wildcard and prefix queries should be -// analyzed (default: false). -func (s *DeleteByQueryService) AnalyzeWildcard(analyzeWildcard bool) *DeleteByQueryService { - s.analyzeWildcard = &analyzeWildcard - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices (including the _all string -// or when no indices have been specified). -func (s *DeleteByQueryService) AllowNoIndices(allow bool) *DeleteByQueryService { - s.allowNoIndices = &allow - return s -} - -// Conflicts indicates what to do when the process detects version conflicts. -// Possible values are "proceed" and "abort". -func (s *DeleteByQueryService) Conflicts(conflicts string) *DeleteByQueryService { - s.conflicts = conflicts - return s -} - -// AbortOnVersionConflict aborts the request on version conflicts. -// It is an alias to setting Conflicts("abort"). -func (s *DeleteByQueryService) AbortOnVersionConflict() *DeleteByQueryService { - s.conflicts = "abort" - return s -} - -// ProceedOnVersionConflict aborts the request on version conflicts. -// It is an alias to setting Conflicts("proceed"). -func (s *DeleteByQueryService) ProceedOnVersionConflict() *DeleteByQueryService { - s.conflicts = "proceed" - return s -} - -// DefaultOperator for query string query (AND or OR). -func (s *DeleteByQueryService) DefaultOperator(defaultOperator string) *DeleteByQueryService { - s.defaultOperator = defaultOperator - return s -} - -// DF is the field to use as default where no field prefix is given in the query string. -func (s *DeleteByQueryService) DF(defaultField string) *DeleteByQueryService { - s.df = defaultField - return s -} - -// DefaultField is the field to use as default where no field prefix is given in the query string. -// It is an alias to the DF func. -func (s *DeleteByQueryService) DefaultField(defaultField string) *DeleteByQueryService { - s.df = defaultField - return s -} - -// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit. -func (s *DeleteByQueryService) DocvalueFields(docvalueFields ...string) *DeleteByQueryService { - s.docvalueFields = docvalueFields - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. It can be "open" or "closed". -func (s *DeleteByQueryService) ExpandWildcards(expand string) *DeleteByQueryService { - s.expandWildcards = expand - return s -} - -// Explain specifies whether to return detailed information about score -// computation as part of a hit. -func (s *DeleteByQueryService) Explain(explain bool) *DeleteByQueryService { - s.explain = &explain - return s -} - -// From is the starting offset (default: 0). -func (s *DeleteByQueryService) From(from int) *DeleteByQueryService { - s.from = &from - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *DeleteByQueryService) IgnoreUnavailable(ignore bool) *DeleteByQueryService { - s.ignoreUnavailable = &ignore - return s -} - -// Lenient specifies whether format-based query failures -// (such as providing text to a numeric field) should be ignored. -func (s *DeleteByQueryService) Lenient(lenient bool) *DeleteByQueryService { - s.lenient = &lenient - return s -} - -// LowercaseExpandedTerms specifies whether query terms should be lowercased. -func (s *DeleteByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *DeleteByQueryService { - s.lowercaseExpandedTerms = &lowercaseExpandedTerms - return s -} - -// Preference specifies the node or shard the operation should be performed on -// (default: random). -func (s *DeleteByQueryService) Preference(preference string) *DeleteByQueryService { - s.preference = preference - return s -} - -// Q specifies the query in Lucene query string syntax. You can also use -// Query to programmatically specify the query. -func (s *DeleteByQueryService) Q(query string) *DeleteByQueryService { - s.q = query - return s -} - -// QueryString is an alias to Q. Notice that you can also use Query to -// programmatically set the query. -func (s *DeleteByQueryService) QueryString(query string) *DeleteByQueryService { - s.q = query - return s -} - -// Query sets the query programmatically. -func (s *DeleteByQueryService) Query(query Query) *DeleteByQueryService { - s.query = query - return s -} - -// Refresh indicates whether the effected indexes should be refreshed. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *DeleteByQueryService) Refresh(refresh string) *DeleteByQueryService { - s.refresh = refresh - return s -} - -// RequestCache specifies if request cache should be used for this request -// or not, defaults to index level setting. -func (s *DeleteByQueryService) RequestCache(requestCache bool) *DeleteByQueryService { - s.requestCache = &requestCache - return s -} - -// RequestsPerSecond sets the throttle on this request in sub-requests per second. -// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. -func (s *DeleteByQueryService) RequestsPerSecond(requestsPerSecond int) *DeleteByQueryService { - s.requestsPerSecond = &requestsPerSecond - return s -} - -// Routing is a list of specific routing values. -func (s *DeleteByQueryService) Routing(routing ...string) *DeleteByQueryService { - s.routing = append(s.routing, routing...) - return s -} - -// Scroll specifies how long a consistent view of the index should be maintained -// for scrolled search. -func (s *DeleteByQueryService) Scroll(scroll string) *DeleteByQueryService { - s.scroll = scroll - return s -} - -// ScrollSize is the size on the scroll request powering the update_by_query. -func (s *DeleteByQueryService) ScrollSize(scrollSize int) *DeleteByQueryService { - s.scrollSize = &scrollSize - return s -} - -// SearchTimeout defines an explicit timeout for each search request. -// Defaults to no timeout. -func (s *DeleteByQueryService) SearchTimeout(searchTimeout string) *DeleteByQueryService { - s.searchTimeout = searchTimeout - return s -} - -// SearchType is the search operation type. Possible values are -// "query_then_fetch" and "dfs_query_then_fetch". -func (s *DeleteByQueryService) SearchType(searchType string) *DeleteByQueryService { - s.searchType = searchType - return s -} - -// Size represents the number of hits to return (default: 10). -func (s *DeleteByQueryService) Size(size int) *DeleteByQueryService { - s.size = &size - return s -} - -// Slices represents the number of slices (default: 1). -// It used to be a number, but can be set to "auto" as of 6.7. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-delete-by-query.html#docs-delete-by-query-automatic-slice -// for details. -func (s *DeleteByQueryService) Slices(slices interface{}) *DeleteByQueryService { - s.slices = slices - return s -} - -// Sort is a list of : pairs. -func (s *DeleteByQueryService) Sort(sort ...string) *DeleteByQueryService { - s.sort = append(s.sort, sort...) - return s -} - -// SortByField adds a sort order. -func (s *DeleteByQueryService) SortByField(field string, ascending bool) *DeleteByQueryService { - if ascending { - s.sort = append(s.sort, fmt.Sprintf("%s:asc", field)) - } else { - s.sort = append(s.sort, fmt.Sprintf("%s:desc", field)) - } - return s -} - -// Stats specifies specific tag(s) of the request for logging and statistical purposes. -func (s *DeleteByQueryService) Stats(stats ...string) *DeleteByQueryService { - s.stats = append(s.stats, stats...) - return s -} - -// StoredFields specifies the list of stored fields to return as part of a hit. -func (s *DeleteByQueryService) StoredFields(storedFields ...string) *DeleteByQueryService { - s.storedFields = storedFields - return s -} - -// SuggestField specifies which field to use for suggestions. -func (s *DeleteByQueryService) SuggestField(suggestField string) *DeleteByQueryService { - s.suggestField = suggestField - return s -} - -// SuggestMode specifies the suggest mode. Possible values are -// "missing", "popular", and "always". -func (s *DeleteByQueryService) SuggestMode(suggestMode string) *DeleteByQueryService { - s.suggestMode = suggestMode - return s -} - -// SuggestSize specifies how many suggestions to return in response. -func (s *DeleteByQueryService) SuggestSize(suggestSize int) *DeleteByQueryService { - s.suggestSize = &suggestSize - return s -} - -// SuggestText specifies the source text for which the suggestions should be returned. -func (s *DeleteByQueryService) SuggestText(suggestText string) *DeleteByQueryService { - s.suggestText = suggestText - return s -} - -// TerminateAfter indicates the maximum number of documents to collect -// for each shard, upon reaching which the query execution will terminate early. -func (s *DeleteByQueryService) TerminateAfter(terminateAfter int) *DeleteByQueryService { - s.terminateAfter = &terminateAfter - return s -} - -// Timeout is the time each individual bulk request should wait for shards -// that are unavailable. -func (s *DeleteByQueryService) Timeout(timeout string) *DeleteByQueryService { - s.timeout = timeout - return s -} - -// TimeoutInMillis sets the timeout in milliseconds. -func (s *DeleteByQueryService) TimeoutInMillis(timeoutInMillis int) *DeleteByQueryService { - s.timeout = fmt.Sprintf("%dms", timeoutInMillis) - return s -} - -// TrackScores indicates whether to calculate and return scores even if -// they are not used for sorting. -func (s *DeleteByQueryService) TrackScores(trackScores bool) *DeleteByQueryService { - s.trackScores = &trackScores - return s -} - -// Version specifies whether to return document version as part of a hit. -func (s *DeleteByQueryService) Version(version bool) *DeleteByQueryService { - s.version = &version - return s -} - -// WaitForActiveShards sets the number of shard copies that must be active before proceeding -// with the update by query operation. Defaults to 1, meaning the primary shard only. -// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal -// to the total number of copies for the shard (number of replicas + 1). -func (s *DeleteByQueryService) WaitForActiveShards(waitForActiveShards string) *DeleteByQueryService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// WaitForCompletion indicates if the request should block until the reindex is complete. -func (s *DeleteByQueryService) WaitForCompletion(waitForCompletion bool) *DeleteByQueryService { - s.waitForCompletion = &waitForCompletion - return s -} - -// Body specifies the body of the request. It overrides data being specified via SearchService. -func (s *DeleteByQueryService) Body(body string) *DeleteByQueryService { - s.body = body - return s -} - -// buildURL builds the URL for the operation. -func (s *DeleteByQueryService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.typ) > 0 { - path, err = uritemplates.Expand("/{index}/{type}/_delete_by_query", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - } else { - path, err = uritemplates.Expand("/{index}/_delete_by_query", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if len(s.xSource) > 0 { - params.Set("_source", strings.Join(s.xSource, ",")) - } - if len(s.xSourceExclude) > 0 { - params.Set("_source_excludes", strings.Join(s.xSourceExclude, ",")) - } - if len(s.xSourceInclude) > 0 { - params.Set("_source_includes", strings.Join(s.xSourceInclude, ",")) - } - if s.analyzer != "" { - params.Set("analyzer", s.analyzer) - } - if s.analyzeWildcard != nil { - params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) - } - if s.defaultOperator != "" { - params.Set("default_operator", s.defaultOperator) - } - if s.df != "" { - params.Set("df", s.df) - } - if s.explain != nil { - params.Set("explain", fmt.Sprintf("%v", *s.explain)) - } - if len(s.storedFields) > 0 { - params.Set("stored_fields", strings.Join(s.storedFields, ",")) - } - if len(s.docvalueFields) > 0 { - params.Set("docvalue_fields", strings.Join(s.docvalueFields, ",")) - } - if s.from != nil { - params.Set("from", fmt.Sprintf("%d", *s.from)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.conflicts != "" { - params.Set("conflicts", s.conflicts) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.lenient != nil { - params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) - } - if s.lowercaseExpandedTerms != nil { - params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.q != "" { - params.Set("q", s.q) - } - if len(s.routing) > 0 { - params.Set("routing", strings.Join(s.routing, ",")) - } - if s.scroll != "" { - params.Set("scroll", s.scroll) - } - if s.searchType != "" { - params.Set("search_type", s.searchType) - } - if s.searchTimeout != "" { - params.Set("search_timeout", s.searchTimeout) - } - if s.size != nil { - params.Set("size", fmt.Sprintf("%d", *s.size)) - } - if s.slices != nil { - params.Set("slices", fmt.Sprintf("%v", s.slices)) - } - if len(s.sort) > 0 { - params.Set("sort", strings.Join(s.sort, ",")) - } - if s.terminateAfter != nil { - params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) - } - if len(s.stats) > 0 { - params.Set("stats", strings.Join(s.stats, ",")) - } - if s.suggestField != "" { - params.Set("suggest_field", s.suggestField) - } - if s.suggestMode != "" { - params.Set("suggest_mode", s.suggestMode) - } - if s.suggestSize != nil { - params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize)) - } - if s.suggestText != "" { - params.Set("suggest_text", s.suggestText) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.trackScores != nil { - params.Set("track_scores", fmt.Sprintf("%v", *s.trackScores)) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", *s.version)) - } - if s.requestCache != nil { - params.Set("request_cache", fmt.Sprintf("%v", *s.requestCache)) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - if s.scrollSize != nil { - params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize)) - } - if s.waitForCompletion != nil { - params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) - } - if s.requestsPerSecond != nil { - params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *DeleteByQueryService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the delete-by-query operation. -func (s *DeleteByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Set body if there is a query set - var body interface{} - if s.body != nil { - body = s.body - } else if s.query != nil { - src, err := s.query.Source() - if err != nil { - return nil, err - } - body = map[string]interface{}{ - "query": src, - } - } - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return result - ret := new(BulkIndexByScrollResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// DoAsync executes the delete-by-query operation asynchronously by starting a new task. -// Callers need to use the Task Management API to watch the outcome of the reindexing -// operation. -func (s *DeleteByQueryService) DoAsync(ctx context.Context) (*StartTaskResult, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // DoAsync only makes sense with WaitForCompletion set to true - if s.waitForCompletion != nil && *s.waitForCompletion { - return nil, fmt.Errorf("cannot start a task with WaitForCompletion set to true") - } - f := false - s.waitForCompletion = &f - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Set body if there is a query set - var body interface{} - if s.body != nil { - body = s.body - } else if s.query != nil { - src, err := s.query.Source() - if err != nil { - return nil, err - } - body = map[string]interface{}{ - "query": src, - } - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(StartTaskResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// BulkIndexByScrollResponse is the outcome of executing Do with -// DeleteByQueryService and UpdateByQueryService. -type BulkIndexByScrollResponse struct { - Header http.Header `json:"-"` - Took int64 `json:"took"` - SliceId *int64 `json:"slice_id,omitempty"` - TimedOut bool `json:"timed_out"` - Total int64 `json:"total"` - Updated int64 `json:"updated,omitempty"` - Created int64 `json:"created,omitempty"` - Deleted int64 `json:"deleted"` - Batches int64 `json:"batches"` - VersionConflicts int64 `json:"version_conflicts"` - Noops int64 `json:"noops"` - Retries struct { - Bulk int64 `json:"bulk"` - Search int64 `json:"search"` - } `json:"retries,omitempty"` - Throttled string `json:"throttled"` - ThrottledMillis int64 `json:"throttled_millis"` - RequestsPerSecond float64 `json:"requests_per_second"` - Canceled string `json:"canceled,omitempty"` - ThrottledUntil string `json:"throttled_until"` - ThrottledUntilMillis int64 `json:"throttled_until_millis"` - Failures []bulkIndexByScrollResponseFailure `json:"failures"` -} - -type bulkIndexByScrollResponseFailure struct { - Index string `json:"index,omitempty"` - Type string `json:"type,omitempty"` - Id string `json:"id,omitempty"` - Status int `json:"status,omitempty"` - Shard int `json:"shard,omitempty"` - Node int `json:"node,omitempty"` - // TOOD "cause" contains exception details - // TOOD "reason" contains exception details -} diff --git a/vendor/github.com/olivere/elastic/v7/doc.go b/vendor/github.com/olivere/elastic/v7/doc.go deleted file mode 100644 index ea16d66..0000000 --- a/vendor/github.com/olivere/elastic/v7/doc.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -/* -Package elastic provides an interface to the Elasticsearch server -(https://www.elastic.co/products/elasticsearch). - -The first thing you do is to create a Client. If you have Elasticsearch -installed and running with its default settings -(i.e. available at http://127.0.0.1:9200), all you need to do is: - - client, err := elastic.NewClient() - if err != nil { - // Handle error - } - -If your Elasticsearch server is running on a different IP and/or port, -just provide a URL to NewClient: - - // Create a client and connect to http://192.168.2.10:9201 - client, err := elastic.NewClient(elastic.SetURL("http://192.168.2.10:9201")) - if err != nil { - // Handle error - } - -You can pass many more configuration parameters to NewClient. Review the -documentation of NewClient for more information. - -If no Elasticsearch server is available, services will fail when creating -a new request and will return ErrNoClient. - -A Client provides services. The services usually come with a variety of -methods to prepare the query and a Do function to execute it against the -Elasticsearch REST interface and return a response. Here is an example -of the IndexExists service that checks if a given index already exists. - - exists, err := client.IndexExists("twitter").Do(context.Background()) - if err != nil { - // Handle error - } - if !exists { - // Index does not exist yet. - } - -Look up the documentation for Client to get an idea of the services provided -and what kinds of responses you get when executing the Do function of a service. -Also see the wiki on Github for more details. - -*/ -package elastic diff --git a/vendor/github.com/olivere/elastic/v7/docker-compose.yml b/vendor/github.com/olivere/elastic/v7/docker-compose.yml deleted file mode 100644 index 2fd0d98..0000000 --- a/vendor/github.com/olivere/elastic/v7/docker-compose.yml +++ /dev/null @@ -1,61 +0,0 @@ -version: '3' - -services: - elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.6.2 - hostname: elasticsearch - environment: - - cluster.name=elasticsearch - - bootstrap.memory_lock=true - - discovery.type=single-node - # - http.publish_host=localhost - # - http.host=0.0.0.0 - # - transport.host=127.0.0.1 - # - network.host=_local_ - - network.publish_host=127.0.0.1 - - logger.org.elasticsearch=warn - - "ES_JAVA_OPTS=-Xms1g -Xmx1g" - ulimits: - nproc: 65536 - nofile: - soft: 65536 - hard: 65536 - memlock: - soft: -1 - hard: -1 - # volumes: - # - ./data/elasticsearch:/usr/share/elasticsearch/data - ports: - - 9200:9200 - platinum: - image: docker.elastic.co/elasticsearch/elasticsearch:7.6.2 - hostname: elasticsearch-platinum - environment: - - cluster.name=platinum - - bootstrap.memory_lock=true - - discovery.type=single-node - - xpack.ilm.enabled=true - - xpack.license.self_generated.type=trial - - xpack.security.enabled=true - - xpack.watcher.enabled=true - # - http.publish_host=localhost - # - http.host=0.0.0.0 - # - transport.host=127.0.0.1 - # - network.host=_local_ - - http.port=9210 - - network.publish_host=127.0.0.1 - - logger.org.elasticsearch=warn - - "ES_JAVA_OPTS=-Xms1g -Xmx1g" - - ELASTIC_PASSWORD=elastic - ulimits: - nproc: 65536 - nofile: - soft: 65536 - hard: 65536 - memlock: - soft: -1 - hard: -1 - # volumes: - # - ./data/elasticsearch-platinum:/usr/share/elasticsearch/data - ports: - - 9210:9210 diff --git a/vendor/github.com/olivere/elastic/v7/docvalue_field.go b/vendor/github.com/olivere/elastic/v7/docvalue_field.go deleted file mode 100644 index e73cd24..0000000 --- a/vendor/github.com/olivere/elastic/v7/docvalue_field.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// DocvalueField represents a docvalue field, its name and -// its format (optional). -type DocvalueField struct { - Field string - Format string -} - -// Source serializes the DocvalueField into JSON. -func (d DocvalueField) Source() (interface{}, error) { - if d.Format == "" { - return d.Field, nil - } - return map[string]interface{}{ - "field": d.Field, - "format": d.Format, - }, nil -} - -// DocvalueFields is a slice of DocvalueField instances. -type DocvalueFields []DocvalueField - -// Source serializes the DocvalueFields into JSON. -func (d DocvalueFields) Source() (interface{}, error) { - if d == nil { - return nil, nil - } - v := make([]interface{}, 0) - for _, f := range d { - src, err := f.Source() - if err != nil { - return nil, err - } - v = append(v, src) - } - return v, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/errors.go b/vendor/github.com/olivere/elastic/v7/errors.go deleted file mode 100644 index 0b51c5e..0000000 --- a/vendor/github.com/olivere/elastic/v7/errors.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - - "github.com/pkg/errors" -) - -// checkResponse will return an error if the request/response indicates -// an error returned from Elasticsearch. -// -// HTTP status codes between in the range [200..299] are considered successful. -// All other errors are considered errors except they are specified in -// ignoreErrors. This is necessary because for some services, HTTP status 404 -// is a valid response from Elasticsearch (e.g. the Exists service). -// -// The func tries to parse error details as returned from Elasticsearch -// and encapsulates them in type elastic.Error. -func checkResponse(req *http.Request, res *http.Response, ignoreErrors ...int) error { - // 200-299 are valid status codes - if res.StatusCode >= 200 && res.StatusCode <= 299 { - return nil - } - // Ignore certain errors? - for _, code := range ignoreErrors { - if code == res.StatusCode { - return nil - } - } - return createResponseError(res) -} - -// createResponseError creates an Error structure from the HTTP response, -// its status code and the error information sent by Elasticsearch. -func createResponseError(res *http.Response) error { - if res.Body == nil { - return &Error{Status: res.StatusCode} - } - data, err := ioutil.ReadAll(res.Body) - if err != nil { - return &Error{Status: res.StatusCode} - } - errReply := new(Error) - err = json.Unmarshal(data, errReply) - if err != nil { - return &Error{Status: res.StatusCode} - } - if errReply != nil { - if errReply.Status == 0 { - errReply.Status = res.StatusCode - } - return errReply - } - return &Error{Status: res.StatusCode} -} - -// Error encapsulates error details as returned from Elasticsearch. -type Error struct { - Status int `json:"status"` - Details *ErrorDetails `json:"error,omitempty"` -} - -// ErrorDetails encapsulate error details from Elasticsearch. -// It is used in e.g. elastic.Error and elastic.BulkResponseItem. -type ErrorDetails struct { - Type string `json:"type"` - Reason string `json:"reason"` - ResourceType string `json:"resource.type,omitempty"` - ResourceId string `json:"resource.id,omitempty"` - Index string `json:"index,omitempty"` - Phase string `json:"phase,omitempty"` - Grouped bool `json:"grouped,omitempty"` - CausedBy map[string]interface{} `json:"caused_by,omitempty"` - RootCause []*ErrorDetails `json:"root_cause,omitempty"` - FailedShards []map[string]interface{} `json:"failed_shards,omitempty"` - - // ScriptException adds the information in the following block. - - ScriptStack []string `json:"script_stack,omitempty"` // from ScriptException - Script string `json:"script,omitempty"` // from ScriptException - Lang string `json:"lang,omitempty"` // from ScriptException - Position *ScriptErrorPosition `json:"position,omitempty"` // from ScriptException (7.7+) -} - -// ScriptErrorPosition specifies the position of the error -// in a script. It is used in ErrorDetails for scripting errors. -type ScriptErrorPosition struct { - Offset int `json:"offset"` - Start int `json:"start"` - End int `json:"end"` -} - -// Error returns a string representation of the error. -func (e *Error) Error() string { - if e.Details != nil && e.Details.Reason != "" { - return fmt.Sprintf("elastic: Error %d (%s): %s [type=%s]", e.Status, http.StatusText(e.Status), e.Details.Reason, e.Details.Type) - } - return fmt.Sprintf("elastic: Error %d (%s)", e.Status, http.StatusText(e.Status)) -} - -// IsContextErr returns true if the error is from a context that was canceled or deadline exceeded -func IsContextErr(err error) bool { - if err == context.Canceled || err == context.DeadlineExceeded { - return true - } - // This happens e.g. on redirect errors, see https://golang.org/src/net/http/client_test.go#L329 - if ue, ok := err.(*url.Error); ok { - if ue.Temporary() { - return true - } - // Use of an AWS Signing Transport can result in a wrapped url.Error - return IsContextErr(ue.Err) - } - return false -} - -// IsConnErr returns true if the error indicates that Elastic could not -// find an Elasticsearch host to connect to. -func IsConnErr(err error) bool { - return err == ErrNoClient || errors.Cause(err) == ErrNoClient -} - -// IsNotFound returns true if the given error indicates that Elasticsearch -// returned HTTP status 404. The err parameter can be of type *elastic.Error, -// elastic.Error, *http.Response or int (indicating the HTTP status code). -func IsNotFound(err interface{}) bool { - return IsStatusCode(err, http.StatusNotFound) -} - -// IsTimeout returns true if the given error indicates that Elasticsearch -// returned HTTP status 408. The err parameter can be of type *elastic.Error, -// elastic.Error, *http.Response or int (indicating the HTTP status code). -func IsTimeout(err interface{}) bool { - return IsStatusCode(err, http.StatusRequestTimeout) -} - -// IsConflict returns true if the given error indicates that the Elasticsearch -// operation resulted in a version conflict. This can occur in operations like -// `update` or `index` with `op_type=create`. The err parameter can be of -// type *elastic.Error, elastic.Error, *http.Response or int (indicating the -// HTTP status code). -func IsConflict(err interface{}) bool { - return IsStatusCode(err, http.StatusConflict) -} - -// IsForbidden returns true if the given error indicates that Elasticsearch -// returned HTTP status 403. This happens e.g. due to a missing license. -// The err parameter can be of type *elastic.Error, elastic.Error, -// *http.Response or int (indicating the HTTP status code). -func IsForbidden(err interface{}) bool { - return IsStatusCode(err, http.StatusForbidden) -} - -// IsStatusCode returns true if the given error indicates that the Elasticsearch -// operation returned the specified HTTP status code. The err parameter can be of -// type *http.Response, *Error, Error, or int (indicating the HTTP status code). -func IsStatusCode(err interface{}, code int) bool { - switch e := err.(type) { - case *http.Response: - return e.StatusCode == code - case *Error: - return e.Status == code - case Error: - return e.Status == code - case int: - return e == code - } - return false -} - -// -- General errors -- - -// ShardsInfo represents information from a shard. -type ShardsInfo struct { - Total int `json:"total"` - Successful int `json:"successful"` - Failed int `json:"failed"` - Failures []*ShardFailure `json:"failures,omitempty"` - Skipped int `json:"skipped,omitempty"` -} - -// ShardFailure represents details about a failure. -type ShardFailure struct { - Index string `json:"_index,omitempty"` - Shard int `json:"_shard,omitempty"` - Node string `json:"_node,omitempty"` - Reason map[string]interface{} `json:"reason,omitempty"` - Status string `json:"status,omitempty"` - Primary bool `json:"primary,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/exists.go b/vendor/github.com/olivere/elastic/v7/exists.go deleted file mode 100644 index dbc353a..0000000 --- a/vendor/github.com/olivere/elastic/v7/exists.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// ExistsService checks for the existence of a document using HEAD. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-get.html -// for details. -type ExistsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - index string - typ string - preference string - realtime *bool - refresh string - routing string - parent string -} - -// NewExistsService creates a new ExistsService. -func NewExistsService(client *Client) *ExistsService { - return &ExistsService{ - client: client, - typ: "_doc", - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ExistsService) Pretty(pretty bool) *ExistsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ExistsService) Human(human bool) *ExistsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ExistsService) ErrorTrace(errorTrace bool) *ExistsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ExistsService) FilterPath(filterPath ...string) *ExistsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ExistsService) Header(name string, value string) *ExistsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ExistsService) Headers(headers http.Header) *ExistsService { - s.headers = headers - return s -} - -// Id is the document ID. -func (s *ExistsService) Id(id string) *ExistsService { - s.id = id - return s -} - -// Index is the name of the index. -func (s *ExistsService) Index(index string) *ExistsService { - s.index = index - return s -} - -// Type is the type of the document (use `_all` to fetch the first document -// matching the ID across all types). -func (s *ExistsService) Type(typ string) *ExistsService { - s.typ = typ - return s -} - -// Preference specifies the node or shard the operation should be performed on (default: random). -func (s *ExistsService) Preference(preference string) *ExistsService { - s.preference = preference - return s -} - -// Realtime specifies whether to perform the operation in realtime or search mode. -func (s *ExistsService) Realtime(realtime bool) *ExistsService { - s.realtime = &realtime - return s -} - -// Refresh the shard containing the document before performing the operation. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *ExistsService) Refresh(refresh string) *ExistsService { - s.refresh = refresh - return s -} - -// Routing is a specific routing value. -func (s *ExistsService) Routing(routing string) *ExistsService { - s.routing = routing - return s -} - -// Parent is the ID of the parent document. -func (s *ExistsService) Parent(parent string) *ExistsService { - s.parent = parent - return s -} - -// buildURL builds the URL for the operation. -func (s *ExistsService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "id": s.id, - "index": s.index, - "type": s.typ, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.realtime != nil { - params.Set("realtime", fmt.Sprint(*s.realtime)) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ExistsService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *ExistsService) Do(ctx context.Context) (bool, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return false, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return false, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "HEAD", - Path: path, - Params: params, - IgnoreErrors: []int{404}, - Headers: s.headers, - }) - if err != nil { - return false, err - } - - // Return operation response - switch res.StatusCode { - case http.StatusOK: - return true, nil - case http.StatusNotFound: - return false, nil - default: - return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) - } -} diff --git a/vendor/github.com/olivere/elastic/v7/explain.go b/vendor/github.com/olivere/elastic/v7/explain.go deleted file mode 100644 index dc8795e..0000000 --- a/vendor/github.com/olivere/elastic/v7/explain.go +++ /dev/null @@ -1,390 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// ExplainService computes a score explanation for a query and -// a specific document. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-explain.html. -type ExplainService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - index string - typ string - q string - routing string - lenient *bool - analyzer string - df string - fields []string - lowercaseExpandedTerms *bool - xSourceInclude []string - analyzeWildcard *bool - parent string - preference string - xSource []string - defaultOperator string - xSourceExclude []string - source string - bodyJson interface{} - bodyString string -} - -// NewExplainService creates a new ExplainService. -func NewExplainService(client *Client) *ExplainService { - return &ExplainService{ - client: client, - typ: "_doc", - xSource: make([]string, 0), - xSourceExclude: make([]string, 0), - fields: make([]string, 0), - xSourceInclude: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ExplainService) Pretty(pretty bool) *ExplainService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ExplainService) Human(human bool) *ExplainService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ExplainService) ErrorTrace(errorTrace bool) *ExplainService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ExplainService) FilterPath(filterPath ...string) *ExplainService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ExplainService) Header(name string, value string) *ExplainService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ExplainService) Headers(headers http.Header) *ExplainService { - s.headers = headers - return s -} - -// Id is the document ID. -func (s *ExplainService) Id(id string) *ExplainService { - s.id = id - return s -} - -// Index is the name of the index. -func (s *ExplainService) Index(index string) *ExplainService { - s.index = index - return s -} - -// Type is the type of the document. -// -// Deprecated: Types are in the process of being removed. -func (s *ExplainService) Type(typ string) *ExplainService { - s.typ = typ - return s -} - -// Source is the URL-encoded query definition (instead of using the request body). -func (s *ExplainService) Source(source string) *ExplainService { - s.source = source - return s -} - -// XSourceExclude is a list of fields to exclude from the returned _source field. -func (s *ExplainService) XSourceExclude(xSourceExclude ...string) *ExplainService { - s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) - return s -} - -// Lenient specifies whether format-based query failures -// (such as providing text to a numeric field) should be ignored. -func (s *ExplainService) Lenient(lenient bool) *ExplainService { - s.lenient = &lenient - return s -} - -// Query in the Lucene query string syntax. -func (s *ExplainService) Q(q string) *ExplainService { - s.q = q - return s -} - -// Routing sets a specific routing value. -func (s *ExplainService) Routing(routing string) *ExplainService { - s.routing = routing - return s -} - -// AnalyzeWildcard specifies whether wildcards and prefix queries -// in the query string query should be analyzed (default: false). -func (s *ExplainService) AnalyzeWildcard(analyzeWildcard bool) *ExplainService { - s.analyzeWildcard = &analyzeWildcard - return s -} - -// Analyzer is the analyzer for the query string query. -func (s *ExplainService) Analyzer(analyzer string) *ExplainService { - s.analyzer = analyzer - return s -} - -// Df is the default field for query string query (default: _all). -func (s *ExplainService) Df(df string) *ExplainService { - s.df = df - return s -} - -// Fields is a list of fields to return in the response. -func (s *ExplainService) Fields(fields ...string) *ExplainService { - s.fields = append(s.fields, fields...) - return s -} - -// LowercaseExpandedTerms specifies whether query terms should be lowercased. -func (s *ExplainService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *ExplainService { - s.lowercaseExpandedTerms = &lowercaseExpandedTerms - return s -} - -// XSourceInclude is a list of fields to extract and return from the _source field. -func (s *ExplainService) XSourceInclude(xSourceInclude ...string) *ExplainService { - s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) - return s -} - -// DefaultOperator is the default operator for query string query (AND or OR). -func (s *ExplainService) DefaultOperator(defaultOperator string) *ExplainService { - s.defaultOperator = defaultOperator - return s -} - -// Parent is the ID of the parent document. -func (s *ExplainService) Parent(parent string) *ExplainService { - s.parent = parent - return s -} - -// Preference specifies the node or shard the operation should be performed on (default: random). -func (s *ExplainService) Preference(preference string) *ExplainService { - s.preference = preference - return s -} - -// XSource is true or false to return the _source field or not, or a list of fields to return. -func (s *ExplainService) XSource(xSource ...string) *ExplainService { - s.xSource = append(s.xSource, xSource...) - return s -} - -// Query sets a query definition using the Query DSL. -func (s *ExplainService) Query(query Query) *ExplainService { - src, err := query.Source() - if err != nil { - // Do nothing in case of an error - return s - } - body := make(map[string]interface{}) - body["query"] = src - s.bodyJson = body - return s -} - -// BodyJson sets the query definition using the Query DSL. -func (s *ExplainService) BodyJson(body interface{}) *ExplainService { - s.bodyJson = body - return s -} - -// BodyString sets the query definition using the Query DSL as a string. -func (s *ExplainService) BodyString(body string) *ExplainService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *ExplainService) buildURL() (string, url.Values, error) { - // Build URL - var path string - var err error - - if s.typ == "" || s.typ == "_doc" { - path, err = uritemplates.Expand("/{index}/_explain/{id}", map[string]string{ - "id": s.id, - "index": s.index, - }) - } else { - path, err = uritemplates.Expand("/{index}/{type}/{id}/_explain", map[string]string{ - "id": s.id, - "index": s.index, - "type": s.typ, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if len(s.xSource) > 0 { - params.Set("_source", strings.Join(s.xSource, ",")) - } - if s.defaultOperator != "" { - params.Set("default_operator", s.defaultOperator) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.source != "" { - params.Set("source", s.source) - } - if len(s.xSourceExclude) > 0 { - params.Set("_source_excludes", strings.Join(s.xSourceExclude, ",")) - } - if s.lenient != nil { - params.Set("lenient", fmt.Sprintf("%v", *s.lenient)) - } - if s.q != "" { - params.Set("q", s.q) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if s.lowercaseExpandedTerms != nil { - params.Set("lowercase_expanded_terms", fmt.Sprintf("%v", *s.lowercaseExpandedTerms)) - } - if len(s.xSourceInclude) > 0 { - params.Set("_source_includes", strings.Join(s.xSourceInclude, ",")) - } - if s.analyzeWildcard != nil { - params.Set("analyze_wildcard", fmt.Sprintf("%v", *s.analyzeWildcard)) - } - if s.analyzer != "" { - params.Set("analyzer", s.analyzer) - } - if s.df != "" { - params.Set("df", s.df) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ExplainService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *ExplainService) Do(ctx context.Context) (*ExplainResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ExplainResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ExplainResponse is the response of ExplainService.Do. -type ExplainResponse struct { - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id"` - Matched bool `json:"matched"` - Explanation map[string]interface{} `json:"explanation"` -} diff --git a/vendor/github.com/olivere/elastic/v7/fetch_source_context.go b/vendor/github.com/olivere/elastic/v7/fetch_source_context.go deleted file mode 100644 index 7a4a769..0000000 --- a/vendor/github.com/olivere/elastic/v7/fetch_source_context.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "net/url" - "strings" -) - -// FetchSourceContext enables source filtering, i.e. it allows control -// over how the _source field is returned with every hit. It is used -// with various endpoints, e.g. when searching for documents, retrieving -// individual documents, or even updating documents. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-source-filtering.html -// for details. -type FetchSourceContext struct { - fetchSource bool - includes []string - excludes []string -} - -// NewFetchSourceContext returns a new FetchSourceContext. -func NewFetchSourceContext(fetchSource bool) *FetchSourceContext { - return &FetchSourceContext{ - fetchSource: fetchSource, - includes: make([]string, 0), - excludes: make([]string, 0), - } -} - -// FetchSource indicates whether to return the _source. -func (fsc *FetchSourceContext) FetchSource() bool { - return fsc.fetchSource -} - -// SetFetchSource specifies whether to return the _source. -func (fsc *FetchSourceContext) SetFetchSource(fetchSource bool) { - fsc.fetchSource = fetchSource -} - -// Include indicates to return specific parts of the _source. -// Wildcards are allowed here. -func (fsc *FetchSourceContext) Include(includes ...string) *FetchSourceContext { - fsc.includes = append(fsc.includes, includes...) - return fsc -} - -// Exclude indicates to exclude specific parts of the _source. -// Wildcards are allowed here. -func (fsc *FetchSourceContext) Exclude(excludes ...string) *FetchSourceContext { - fsc.excludes = append(fsc.excludes, excludes...) - return fsc -} - -// Source returns the JSON-serializable data to be used in a body. -func (fsc *FetchSourceContext) Source() (interface{}, error) { - if !fsc.fetchSource { - return false, nil - } - if len(fsc.includes) == 0 && len(fsc.excludes) == 0 { - return true, nil - } - src := make(map[string]interface{}) - if len(fsc.includes) > 0 { - src["includes"] = fsc.includes - } - if len(fsc.excludes) > 0 { - src["excludes"] = fsc.excludes - } - return src, nil -} - -// Query returns the parameters in a form suitable for a URL query string. -func (fsc *FetchSourceContext) Query() url.Values { - params := url.Values{} - if fsc.fetchSource { - if len(fsc.includes) > 0 { - params.Add("_source_includes", strings.Join(fsc.includes, ",")) - } - if len(fsc.excludes) > 0 { - params.Add("_source_excludes", strings.Join(fsc.excludes, ",")) - } - } else { - params.Add("_source", "false") - } - return params -} diff --git a/vendor/github.com/olivere/elastic/v7/field_caps.go b/vendor/github.com/olivere/elastic/v7/field_caps.go deleted file mode 100644 index b84a1f1..0000000 --- a/vendor/github.com/olivere/elastic/v7/field_caps.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// FieldCapsService allows retrieving the capabilities of fields among multiple indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-field-caps.html -// for details -type FieldCapsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - allowNoIndices *bool - expandWildcards string - fields []string - ignoreUnavailable *bool - bodyJson interface{} - bodyString string -} - -// NewFieldCapsService creates a new FieldCapsService -func NewFieldCapsService(client *Client) *FieldCapsService { - return &FieldCapsService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *FieldCapsService) Pretty(pretty bool) *FieldCapsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *FieldCapsService) Human(human bool) *FieldCapsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *FieldCapsService) ErrorTrace(errorTrace bool) *FieldCapsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *FieldCapsService) FilterPath(filterPath ...string) *FieldCapsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *FieldCapsService) Header(name string, value string) *FieldCapsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *FieldCapsService) Headers(headers http.Header) *FieldCapsService { - s.headers = headers - return s -} - -// Index is a list of index names; use `_all` or empty string to perform -// the operation on all indices. -func (s *FieldCapsService) Index(index ...string) *FieldCapsService { - s.index = append(s.index, index...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *FieldCapsService) AllowNoIndices(allowNoIndices bool) *FieldCapsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *FieldCapsService) ExpandWildcards(expandWildcards string) *FieldCapsService { - s.expandWildcards = expandWildcards - return s -} - -// Fields is a list of fields for to get field capabilities. -func (s *FieldCapsService) Fields(fields ...string) *FieldCapsService { - s.fields = append(s.fields, fields...) - return s -} - -// IgnoreUnavailable is documented as: Whether specified concrete indices should be ignored when unavailable (missing or closed). -func (s *FieldCapsService) IgnoreUnavailable(ignoreUnavailable bool) *FieldCapsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// BodyJson is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds. -func (s *FieldCapsService) BodyJson(body interface{}) *FieldCapsService { - s.bodyJson = body - return s -} - -// BodyString is documented as: Field json objects containing the name and optionally a range to filter out indices result, that have results outside the defined bounds. -func (s *FieldCapsService) BodyString(body string) *FieldCapsService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *FieldCapsService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_field_caps", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_field_caps" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *FieldCapsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *FieldCapsService) Do(ctx context.Context) (*FieldCapsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - IgnoreErrors: []int{http.StatusNotFound}, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // TODO(oe): Is 404 really a valid response here? - if res.StatusCode == http.StatusNotFound { - return &FieldCapsResponse{}, nil - } - - // Return operation response - ret := new(FieldCapsResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Request -- - -// FieldCapsRequest can be used to set up the body to be used in the -// Field Capabilities API. -type FieldCapsRequest struct { - Fields []string `json:"fields"` -} - -// -- Response -- - -// FieldCapsResponse contains field capabilities. -type FieldCapsResponse struct { - Indices []string `json:"indices,omitempty"` // list of index names - Fields map[string]FieldCapsType `json:"fields,omitempty"` // Name -> type -> caps -} - -// FieldCapsType represents a mapping from type (e.g. keyword) -// to capabilities. -type FieldCapsType map[string]FieldCaps // type -> caps - -// FieldCaps contains capabilities of an individual field. -type FieldCaps struct { - Type string `json:"type"` - Searchable bool `json:"searchable"` - Aggregatable bool `json:"aggregatable"` - Indices []string `json:"indices,omitempty"` - NonSearchableIndices []string `json:"non_searchable_indices,omitempty"` - NonAggregatableIndices []string `json:"non_aggregatable_indices,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/geo_point.go b/vendor/github.com/olivere/elastic/v7/geo_point.go deleted file mode 100644 index 4ef0e1a..0000000 --- a/vendor/github.com/olivere/elastic/v7/geo_point.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "fmt" - "strconv" - "strings" -) - -// GeoPoint is a geographic position described via latitude and longitude. -type GeoPoint struct { - Lat float64 `json:"lat"` - Lon float64 `json:"lon"` -} - -// Source returns the object to be serialized in Elasticsearch DSL. -func (pt *GeoPoint) Source() map[string]float64 { - return map[string]float64{ - "lat": pt.Lat, - "lon": pt.Lon, - } -} - -// MarshalJSON encodes the GeoPoint to JSON. -func (pt *GeoPoint) MarshalJSON() ([]byte, error) { - return json.Marshal(pt.Source()) -} - -// GeoPointFromLatLon initializes a new GeoPoint by latitude and longitude. -func GeoPointFromLatLon(lat, lon float64) *GeoPoint { - return &GeoPoint{Lat: lat, Lon: lon} -} - -// GeoPointFromString initializes a new GeoPoint by a string that is -// formatted as "{latitude},{longitude}", e.g. "40.10210,-70.12091". -func GeoPointFromString(latLon string) (*GeoPoint, error) { - latlon := strings.SplitN(latLon, ",", 2) - if len(latlon) != 2 { - return nil, fmt.Errorf("elastic: %s is not a valid geo point string", latLon) - } - lat, err := strconv.ParseFloat(latlon[0], 64) - if err != nil { - return nil, err - } - lon, err := strconv.ParseFloat(latlon[1], 64) - if err != nil { - return nil, err - } - return &GeoPoint{Lat: lat, Lon: lon}, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/get.go b/vendor/github.com/olivere/elastic/v7/get.go deleted file mode 100644 index 769389b..0000000 --- a/vendor/github.com/olivere/elastic/v7/get.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// GetService allows to get a typed JSON document from the index based -// on its id. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-get.html -// for details. -type GetService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - typ string - id string - routing string - preference string - storedFields []string - refresh string - realtime *bool - fsc *FetchSourceContext - version interface{} - versionType string - parent string - ignoreErrorsOnGeneratedFields *bool -} - -// NewGetService creates a new GetService. -func NewGetService(client *Client) *GetService { - return &GetService{ - client: client, - typ: "_doc", - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *GetService) Pretty(pretty bool) *GetService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *GetService) Human(human bool) *GetService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *GetService) ErrorTrace(errorTrace bool) *GetService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *GetService) FilterPath(filterPath ...string) *GetService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *GetService) Header(name string, value string) *GetService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *GetService) Headers(headers http.Header) *GetService { - s.headers = headers - return s -} - -// Index is the name of the index. -func (s *GetService) Index(index string) *GetService { - s.index = index - return s -} - -// Type is the type of the document -// -// Deprecated: Types are in the process of being removed. -func (s *GetService) Type(typ string) *GetService { - s.typ = typ - return s -} - -// Id is the document ID. -func (s *GetService) Id(id string) *GetService { - s.id = id - return s -} - -// Parent is the ID of the parent document. -func (s *GetService) Parent(parent string) *GetService { - s.parent = parent - return s -} - -// Routing is the specific routing value. -func (s *GetService) Routing(routing string) *GetService { - s.routing = routing - return s -} - -// Preference specifies the node or shard the operation should be performed on (default: random). -func (s *GetService) Preference(preference string) *GetService { - s.preference = preference - return s -} - -// StoredFields is a list of fields to return in the response. -func (s *GetService) StoredFields(storedFields ...string) *GetService { - s.storedFields = append(s.storedFields, storedFields...) - return s -} - -func (s *GetService) FetchSource(fetchSource bool) *GetService { - if s.fsc == nil { - s.fsc = NewFetchSourceContext(fetchSource) - } else { - s.fsc.SetFetchSource(fetchSource) - } - return s -} - -func (s *GetService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *GetService { - s.fsc = fetchSourceContext - return s -} - -// Refresh the shard containing the document before performing the operation. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *GetService) Refresh(refresh string) *GetService { - s.refresh = refresh - return s -} - -// Realtime specifies whether to perform the operation in realtime or search mode. -func (s *GetService) Realtime(realtime bool) *GetService { - s.realtime = &realtime - return s -} - -// VersionType is the specific version type. -func (s *GetService) VersionType(versionType string) *GetService { - s.versionType = versionType - return s -} - -// Version is an explicit version number for concurrency control. -func (s *GetService) Version(version interface{}) *GetService { - s.version = version - return s -} - -// IgnoreErrorsOnGeneratedFields indicates whether to ignore fields that -// are generated if the transaction log is accessed. -func (s *GetService) IgnoreErrorsOnGeneratedFields(ignore bool) *GetService { - s.ignoreErrorsOnGeneratedFields = &ignore - return s -} - -// Validate checks if the operation is valid. -func (s *GetService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// buildURL builds the URL for the operation. -func (s *GetService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "id": s.id, - "index": s.index, - "type": s.typ, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if len(s.storedFields) > 0 { - params.Set("stored_fields", strings.Join(s.storedFields, ",")) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if s.realtime != nil { - params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) - } - if s.ignoreErrorsOnGeneratedFields != nil { - params.Add("ignore_errors_on_generated_fields", fmt.Sprintf("%v", *s.ignoreErrorsOnGeneratedFields)) - } - if s.fsc != nil { - for k, values := range s.fsc.Query() { - params.Add(k, strings.Join(values, ",")) - } - } - return path, params, nil -} - -// Do executes the operation. -func (s *GetService) Do(ctx context.Context) (*GetResult, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(GetResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a get request. - -// GetResult is the outcome of GetService.Do. -type GetResult struct { - Index string `json:"_index"` // index meta field - Type string `json:"_type"` // type meta field - Id string `json:"_id"` // id meta field - Uid string `json:"_uid"` // uid meta field (see MapperService.java for all meta fields) - Routing string `json:"_routing"` // routing meta field - Parent string `json:"_parent"` // parent meta field - Version *int64 `json:"_version"` // version number, when Version is set to true in SearchService - SeqNo *int64 `json:"_seq_no"` - PrimaryTerm *int64 `json:"_primary_term"` - Source json.RawMessage `json:"_source,omitempty"` - Found bool `json:"found,omitempty"` - Fields map[string]interface{} `json:"fields,omitempty"` - //Error string `json:"error,omitempty"` // used only in MultiGet - // TODO double-check that MultiGet now returns details error information - Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet -} diff --git a/vendor/github.com/olivere/elastic/v7/go.mod b/vendor/github.com/olivere/elastic/v7/go.mod deleted file mode 100644 index 61cca83..0000000 --- a/vendor/github.com/olivere/elastic/v7/go.mod +++ /dev/null @@ -1,14 +0,0 @@ -module github.com/olivere/elastic/v7 - -go 1.14 - -require ( - github.com/aws/aws-sdk-go v1.30.7 - github.com/fortytw2/leaktest v1.3.0 - github.com/google/go-cmp v0.4.0 - github.com/mailru/easyjson v0.7.1 - github.com/opentracing/opentracing-go v1.1.0 - github.com/pkg/errors v0.9.1 - github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 - go.opencensus.io v0.22.3 -) diff --git a/vendor/github.com/olivere/elastic/v7/highlight.go b/vendor/github.com/olivere/elastic/v7/highlight.go deleted file mode 100644 index 49e1b4d..0000000 --- a/vendor/github.com/olivere/elastic/v7/highlight.go +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// Highlight allows highlighting search results on one or more fields. -// For details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-highlighting.html -type Highlight struct { - fields []*HighlighterField - tagsSchema *string - highlightFilter *bool - fragmentSize *int - numOfFragments *int - preTags []string - postTags []string - order *string - encoder *string - requireFieldMatch *bool - boundaryMaxScan *int - boundaryChars *string - boundaryScannerType *string - boundaryScannerLocale *string - highlighterType *string - fragmenter *string - highlightQuery Query - noMatchSize *int - phraseLimit *int - options map[string]interface{} - forceSource *bool - useExplicitFieldOrder bool -} - -func NewHighlight() *Highlight { - hl := &Highlight{ - options: make(map[string]interface{}), - } - return hl -} - -func (hl *Highlight) Fields(fields ...*HighlighterField) *Highlight { - hl.fields = append(hl.fields, fields...) - return hl -} - -func (hl *Highlight) Field(name string) *Highlight { - field := NewHighlighterField(name) - hl.fields = append(hl.fields, field) - return hl -} - -func (hl *Highlight) TagsSchema(schemaName string) *Highlight { - hl.tagsSchema = &schemaName - return hl -} - -func (hl *Highlight) HighlightFilter(highlightFilter bool) *Highlight { - hl.highlightFilter = &highlightFilter - return hl -} - -func (hl *Highlight) FragmentSize(fragmentSize int) *Highlight { - hl.fragmentSize = &fragmentSize - return hl -} - -func (hl *Highlight) NumOfFragments(numOfFragments int) *Highlight { - hl.numOfFragments = &numOfFragments - return hl -} - -func (hl *Highlight) Encoder(encoder string) *Highlight { - hl.encoder = &encoder - return hl -} - -func (hl *Highlight) PreTags(preTags ...string) *Highlight { - hl.preTags = append(hl.preTags, preTags...) - return hl -} - -func (hl *Highlight) PostTags(postTags ...string) *Highlight { - hl.postTags = append(hl.postTags, postTags...) - return hl -} - -func (hl *Highlight) Order(order string) *Highlight { - hl.order = &order - return hl -} - -func (hl *Highlight) RequireFieldMatch(requireFieldMatch bool) *Highlight { - hl.requireFieldMatch = &requireFieldMatch - return hl -} - -func (hl *Highlight) BoundaryMaxScan(boundaryMaxScan int) *Highlight { - hl.boundaryMaxScan = &boundaryMaxScan - return hl -} - -func (hl *Highlight) BoundaryChars(boundaryChars string) *Highlight { - hl.boundaryChars = &boundaryChars - return hl -} - -func (hl *Highlight) BoundaryScannerType(boundaryScannerType string) *Highlight { - hl.boundaryScannerType = &boundaryScannerType - return hl -} - -func (hl *Highlight) BoundaryScannerLocale(boundaryScannerLocale string) *Highlight { - hl.boundaryScannerLocale = &boundaryScannerLocale - return hl -} - -func (hl *Highlight) HighlighterType(highlighterType string) *Highlight { - hl.highlighterType = &highlighterType - return hl -} - -func (hl *Highlight) Fragmenter(fragmenter string) *Highlight { - hl.fragmenter = &fragmenter - return hl -} - -func (hl *Highlight) HighlightQuery(highlightQuery Query) *Highlight { - hl.highlightQuery = highlightQuery - return hl -} - -func (hl *Highlight) NoMatchSize(noMatchSize int) *Highlight { - hl.noMatchSize = &noMatchSize - return hl -} - -func (hl *Highlight) Options(options map[string]interface{}) *Highlight { - hl.options = options - return hl -} - -func (hl *Highlight) ForceSource(forceSource bool) *Highlight { - hl.forceSource = &forceSource - return hl -} - -func (hl *Highlight) UseExplicitFieldOrder(useExplicitFieldOrder bool) *Highlight { - hl.useExplicitFieldOrder = useExplicitFieldOrder - return hl -} - -// Creates the query source for the bool query. -func (hl *Highlight) Source() (interface{}, error) { - // Returns the map inside of "highlight": - // "highlight":{ - // ... this ... - // } - source := make(map[string]interface{}) - if hl.tagsSchema != nil { - source["tags_schema"] = *hl.tagsSchema - } - if hl.preTags != nil && len(hl.preTags) > 0 { - source["pre_tags"] = hl.preTags - } - if hl.postTags != nil && len(hl.postTags) > 0 { - source["post_tags"] = hl.postTags - } - if hl.order != nil { - source["order"] = *hl.order - } - if hl.highlightFilter != nil { - source["highlight_filter"] = *hl.highlightFilter - } - if hl.fragmentSize != nil { - source["fragment_size"] = *hl.fragmentSize - } - if hl.numOfFragments != nil { - source["number_of_fragments"] = *hl.numOfFragments - } - if hl.encoder != nil { - source["encoder"] = *hl.encoder - } - if hl.requireFieldMatch != nil { - source["require_field_match"] = *hl.requireFieldMatch - } - if hl.boundaryMaxScan != nil { - source["boundary_max_scan"] = *hl.boundaryMaxScan - } - if hl.boundaryChars != nil { - source["boundary_chars"] = *hl.boundaryChars - } - if hl.boundaryScannerType != nil { - source["boundary_scanner"] = *hl.boundaryScannerType - } - if hl.boundaryScannerLocale != nil { - source["boundary_scanner_locale"] = *hl.boundaryScannerLocale - } - if hl.highlighterType != nil { - source["type"] = *hl.highlighterType - } - if hl.fragmenter != nil { - source["fragmenter"] = *hl.fragmenter - } - if hl.highlightQuery != nil { - src, err := hl.highlightQuery.Source() - if err != nil { - return nil, err - } - source["highlight_query"] = src - } - if hl.noMatchSize != nil { - source["no_match_size"] = *hl.noMatchSize - } - if hl.phraseLimit != nil { - source["phrase_limit"] = *hl.phraseLimit - } - if hl.options != nil && len(hl.options) > 0 { - source["options"] = hl.options - } - if hl.forceSource != nil { - source["force_source"] = *hl.forceSource - } - - if hl.fields != nil && len(hl.fields) > 0 { - if hl.useExplicitFieldOrder { - // Use a slice for the fields - var fields []map[string]interface{} - for _, field := range hl.fields { - src, err := field.Source() - if err != nil { - return nil, err - } - fmap := make(map[string]interface{}) - fmap[field.Name] = src - fields = append(fields, fmap) - } - source["fields"] = fields - } else { - // Use a map for the fields - fields := make(map[string]interface{}) - for _, field := range hl.fields { - src, err := field.Source() - if err != nil { - return nil, err - } - fields[field.Name] = src - } - source["fields"] = fields - } - } - - return source, nil -} - -// HighlighterField specifies a highlighted field. -type HighlighterField struct { - Name string - - preTags []string - postTags []string - fragmentSize int - fragmentOffset int - numOfFragments int - highlightFilter *bool - order *string - requireFieldMatch *bool - boundaryMaxScan int - boundaryChars []rune - highlighterType *string - fragmenter *string - highlightQuery Query - noMatchSize *int - matchedFields []string - phraseLimit *int - options map[string]interface{} - forceSource *bool - - /* - Name string - preTags []string - postTags []string - fragmentSize int - numOfFragments int - fragmentOffset int - highlightFilter *bool - order string - requireFieldMatch *bool - boundaryMaxScan int - boundaryChars []rune - highlighterType string - fragmenter string - highlightQuery Query - noMatchSize *int - matchedFields []string - options map[string]interface{} - forceSource *bool - */ -} - -func NewHighlighterField(name string) *HighlighterField { - return &HighlighterField{ - Name: name, - preTags: make([]string, 0), - postTags: make([]string, 0), - fragmentSize: -1, - fragmentOffset: -1, - numOfFragments: -1, - boundaryMaxScan: -1, - boundaryChars: make([]rune, 0), - matchedFields: make([]string, 0), - options: make(map[string]interface{}), - } -} - -func (f *HighlighterField) PreTags(preTags ...string) *HighlighterField { - f.preTags = append(f.preTags, preTags...) - return f -} - -func (f *HighlighterField) PostTags(postTags ...string) *HighlighterField { - f.postTags = append(f.postTags, postTags...) - return f -} - -func (f *HighlighterField) FragmentSize(fragmentSize int) *HighlighterField { - f.fragmentSize = fragmentSize - return f -} - -func (f *HighlighterField) FragmentOffset(fragmentOffset int) *HighlighterField { - f.fragmentOffset = fragmentOffset - return f -} - -func (f *HighlighterField) NumOfFragments(numOfFragments int) *HighlighterField { - f.numOfFragments = numOfFragments - return f -} - -func (f *HighlighterField) HighlightFilter(highlightFilter bool) *HighlighterField { - f.highlightFilter = &highlightFilter - return f -} - -func (f *HighlighterField) Order(order string) *HighlighterField { - f.order = &order - return f -} - -func (f *HighlighterField) RequireFieldMatch(requireFieldMatch bool) *HighlighterField { - f.requireFieldMatch = &requireFieldMatch - return f -} - -func (f *HighlighterField) BoundaryMaxScan(boundaryMaxScan int) *HighlighterField { - f.boundaryMaxScan = boundaryMaxScan - return f -} - -func (f *HighlighterField) BoundaryChars(boundaryChars ...rune) *HighlighterField { - f.boundaryChars = append(f.boundaryChars, boundaryChars...) - return f -} - -func (f *HighlighterField) HighlighterType(highlighterType string) *HighlighterField { - f.highlighterType = &highlighterType - return f -} - -func (f *HighlighterField) Fragmenter(fragmenter string) *HighlighterField { - f.fragmenter = &fragmenter - return f -} - -func (f *HighlighterField) HighlightQuery(highlightQuery Query) *HighlighterField { - f.highlightQuery = highlightQuery - return f -} - -func (f *HighlighterField) NoMatchSize(noMatchSize int) *HighlighterField { - f.noMatchSize = &noMatchSize - return f -} - -func (f *HighlighterField) Options(options map[string]interface{}) *HighlighterField { - f.options = options - return f -} - -func (f *HighlighterField) MatchedFields(matchedFields ...string) *HighlighterField { - f.matchedFields = append(f.matchedFields, matchedFields...) - return f -} - -func (f *HighlighterField) PhraseLimit(phraseLimit int) *HighlighterField { - f.phraseLimit = &phraseLimit - return f -} - -func (f *HighlighterField) ForceSource(forceSource bool) *HighlighterField { - f.forceSource = &forceSource - return f -} - -func (f *HighlighterField) Source() (interface{}, error) { - source := make(map[string]interface{}) - - if f.preTags != nil && len(f.preTags) > 0 { - source["pre_tags"] = f.preTags - } - if f.postTags != nil && len(f.postTags) > 0 { - source["post_tags"] = f.postTags - } - if f.fragmentSize != -1 { - source["fragment_size"] = f.fragmentSize - } - if f.numOfFragments != -1 { - source["number_of_fragments"] = f.numOfFragments - } - if f.fragmentOffset != -1 { - source["fragment_offset"] = f.fragmentOffset - } - if f.highlightFilter != nil { - source["highlight_filter"] = *f.highlightFilter - } - if f.order != nil { - source["order"] = *f.order - } - if f.requireFieldMatch != nil { - source["require_field_match"] = *f.requireFieldMatch - } - if f.boundaryMaxScan != -1 { - source["boundary_max_scan"] = f.boundaryMaxScan - } - if f.boundaryChars != nil && len(f.boundaryChars) > 0 { - source["boundary_chars"] = f.boundaryChars - } - if f.highlighterType != nil { - source["type"] = *f.highlighterType - } - if f.fragmenter != nil { - source["fragmenter"] = *f.fragmenter - } - if f.highlightQuery != nil { - src, err := f.highlightQuery.Source() - if err != nil { - return nil, err - } - source["highlight_query"] = src - } - if f.noMatchSize != nil { - source["no_match_size"] = *f.noMatchSize - } - if f.matchedFields != nil && len(f.matchedFields) > 0 { - source["matched_fields"] = f.matchedFields - } - if f.phraseLimit != nil { - source["phrase_limit"] = *f.phraseLimit - } - if f.options != nil && len(f.options) > 0 { - source["options"] = f.options - } - if f.forceSource != nil { - source["force_source"] = *f.forceSource - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/index.go b/vendor/github.com/olivere/elastic/v7/index.go deleted file mode 100644 index 2e3d66f..0000000 --- a/vendor/github.com/olivere/elastic/v7/index.go +++ /dev/null @@ -1,377 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndexService adds or updates a typed JSON document in a specified index, -// making it searchable. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html -// for details. -type IndexService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - index string - typ string - parent string - routing string - timeout string - timestamp string - ttl string - version interface{} - opType string - versionType string - refresh string - waitForActiveShards string - pipeline string - ifSeqNo *int64 - ifPrimaryTerm *int64 - bodyJson interface{} - bodyString string -} - -// NewIndexService creates a new IndexService. -func NewIndexService(client *Client) *IndexService { - return &IndexService{ - client: client, - typ: "_doc", - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndexService) Pretty(pretty bool) *IndexService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndexService) Human(human bool) *IndexService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndexService) ErrorTrace(errorTrace bool) *IndexService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndexService) FilterPath(filterPath ...string) *IndexService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndexService) Header(name string, value string) *IndexService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndexService) Headers(headers http.Header) *IndexService { - s.headers = headers - return s -} - -// Id is the document ID. -func (s *IndexService) Id(id string) *IndexService { - s.id = id - return s -} - -// Index is the name of the index. -func (s *IndexService) Index(index string) *IndexService { - s.index = index - return s -} - -// Type is the type of the document. -// -// Deprecated: Types are in the process of being removed. -func (s *IndexService) Type(typ string) *IndexService { - s.typ = typ - return s -} - -// WaitForActiveShards sets the number of shard copies that must be active -// before proceeding with the index operation. Defaults to 1, meaning the -// primary shard only. Set to `all` for all shard copies, otherwise set to -// any non-negative value less than or equal to the total number of copies -// for the shard (number of replicas + 1). -func (s *IndexService) WaitForActiveShards(waitForActiveShards string) *IndexService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// Pipeline specifies the pipeline id to preprocess incoming documents with. -func (s *IndexService) Pipeline(pipeline string) *IndexService { - s.pipeline = pipeline - return s -} - -// Refresh the index after performing the operation. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *IndexService) Refresh(refresh string) *IndexService { - s.refresh = refresh - return s -} - -// Ttl is an expiration time for the document. -func (s *IndexService) Ttl(ttl string) *IndexService { - s.ttl = ttl - return s -} - -// TTL is an expiration time for the document (alias for Ttl). -func (s *IndexService) TTL(ttl string) *IndexService { - s.ttl = ttl - return s -} - -// Version is an explicit version number for concurrency control. -func (s *IndexService) Version(version interface{}) *IndexService { - s.version = version - return s -} - -// OpType is an explicit operation type, i.e. "create" or "index" (default). -func (s *IndexService) OpType(opType string) *IndexService { - s.opType = opType - return s -} - -// Parent is the ID of the parent document. -func (s *IndexService) Parent(parent string) *IndexService { - s.parent = parent - return s -} - -// Routing is a specific routing value. -func (s *IndexService) Routing(routing string) *IndexService { - s.routing = routing - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndexService) Timeout(timeout string) *IndexService { - s.timeout = timeout - return s -} - -// Timestamp is an explicit timestamp for the document. -func (s *IndexService) Timestamp(timestamp string) *IndexService { - s.timestamp = timestamp - return s -} - -// VersionType is a specific version type. -func (s *IndexService) VersionType(versionType string) *IndexService { - s.versionType = versionType - return s -} - -// IfSeqNo indicates to only perform the index operation if the last -// operation that has changed the document has the specified sequence number. -func (s *IndexService) IfSeqNo(seqNo int64) *IndexService { - s.ifSeqNo = &seqNo - return s -} - -// IfPrimaryTerm indicates to only perform the index operation if the -// last operation that has changed the document has the specified primary term. -func (s *IndexService) IfPrimaryTerm(primaryTerm int64) *IndexService { - s.ifPrimaryTerm = &primaryTerm - return s -} - -// BodyJson is the document as a serializable JSON interface. -func (s *IndexService) BodyJson(body interface{}) *IndexService { - s.bodyJson = body - return s -} - -// BodyString is the document encoded as a string. -func (s *IndexService) BodyString(body string) *IndexService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IndexService) buildURL() (string, string, url.Values, error) { - var err error - var method, path string - - if s.id != "" { - // Create document with manual id - method = "PUT" - path, err = uritemplates.Expand("/{index}/{type}/{id}", map[string]string{ - "id": s.id, - "index": s.index, - "type": s.typ, - }) - } else { - // Automatic ID generation - // See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#index-creation - method = "POST" - path, err = uritemplates.Expand("/{index}/{type}/", map[string]string{ - "index": s.index, - "type": s.typ, - }) - } - if err != nil { - return "", "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if s.opType != "" { - params.Set("op_type", s.opType) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.pipeline != "" { - params.Set("pipeline", s.pipeline) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.timestamp != "" { - params.Set("timestamp", s.timestamp) - } - if s.ttl != "" { - params.Set("ttl", s.ttl) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if v := s.ifSeqNo; v != nil { - params.Set("if_seq_no", fmt.Sprintf("%d", *v)) - } - if v := s.ifPrimaryTerm; v != nil { - params.Set("if_primary_term", fmt.Sprintf("%d", *v)) - } - return method, path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndexService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if s.typ == "" { - invalid = append(invalid, "Type") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndexService) Do(ctx context.Context) (*IndexResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - method, path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: method, - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndexResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndexResponse is the result of indexing a document in Elasticsearch. -type IndexResponse struct { - Index string `json:"_index,omitempty"` - Type string `json:"_type,omitempty"` - Id string `json:"_id,omitempty"` - Version int64 `json:"_version,omitempty"` - Result string `json:"result,omitempty"` - Shards *ShardsInfo `json:"_shards,omitempty"` - SeqNo int64 `json:"_seq_no,omitempty"` - PrimaryTerm int64 `json:"_primary_term,omitempty"` - Status int `json:"status,omitempty"` - ForcedRefresh bool `json:"forced_refresh,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_analyze.go b/vendor/github.com/olivere/elastic/v7/indices_analyze.go deleted file mode 100644 index 3f0c282..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_analyze.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesAnalyzeService performs the analysis process on a text and returns -// the tokens breakdown of the text. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-analyze.html -// for detail. -type IndicesAnalyzeService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - request *IndicesAnalyzeRequest - format string - preferLocal *bool - bodyJson interface{} - bodyString string -} - -// NewIndicesAnalyzeService creates a new IndicesAnalyzeService. -func NewIndicesAnalyzeService(client *Client) *IndicesAnalyzeService { - return &IndicesAnalyzeService{ - client: client, - request: new(IndicesAnalyzeRequest), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesAnalyzeService) Pretty(pretty bool) *IndicesAnalyzeService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesAnalyzeService) Human(human bool) *IndicesAnalyzeService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesAnalyzeService) ErrorTrace(errorTrace bool) *IndicesAnalyzeService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesAnalyzeService) FilterPath(filterPath ...string) *IndicesAnalyzeService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesAnalyzeService) Header(name string, value string) *IndicesAnalyzeService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesAnalyzeService) Headers(headers http.Header) *IndicesAnalyzeService { - s.headers = headers - return s -} - -// Index is the name of the index to scope the operation. -func (s *IndicesAnalyzeService) Index(index string) *IndicesAnalyzeService { - s.index = index - return s -} - -// Format of the output. -func (s *IndicesAnalyzeService) Format(format string) *IndicesAnalyzeService { - s.format = format - return s -} - -// PreferLocal, when true, specifies that a local shard should be used -// if available. When false, a random shard is used (default: true). -func (s *IndicesAnalyzeService) PreferLocal(preferLocal bool) *IndicesAnalyzeService { - s.preferLocal = &preferLocal - return s -} - -// Request passes the analyze request to use. -func (s *IndicesAnalyzeService) Request(request *IndicesAnalyzeRequest) *IndicesAnalyzeService { - if request == nil { - s.request = new(IndicesAnalyzeRequest) - } else { - s.request = request - } - return s -} - -// Analyzer is the name of the analyzer to use. -func (s *IndicesAnalyzeService) Analyzer(analyzer string) *IndicesAnalyzeService { - s.request.Analyzer = analyzer - return s -} - -// Attributes is a list of token attributes to output; this parameter works -// only with explain=true. -func (s *IndicesAnalyzeService) Attributes(attributes ...string) *IndicesAnalyzeService { - s.request.Attributes = attributes - return s -} - -// CharFilter is a list of character filters to use for the analysis. -func (s *IndicesAnalyzeService) CharFilter(charFilter ...string) *IndicesAnalyzeService { - s.request.CharFilter = charFilter - return s -} - -// Explain, when true, outputs more advanced details (default: false). -func (s *IndicesAnalyzeService) Explain(explain bool) *IndicesAnalyzeService { - s.request.Explain = explain - return s -} - -// Field specifies to use a specific analyzer configured for this field (instead of passing the analyzer name). -func (s *IndicesAnalyzeService) Field(field string) *IndicesAnalyzeService { - s.request.Field = field - return s -} - -// Filter is a list of filters to use for the analysis. -func (s *IndicesAnalyzeService) Filter(filter ...string) *IndicesAnalyzeService { - s.request.Filter = filter - return s -} - -// Text is the text on which the analysis should be performed (when request body is not used). -func (s *IndicesAnalyzeService) Text(text ...string) *IndicesAnalyzeService { - s.request.Text = text - return s -} - -// Tokenizer is the name of the tokenizer to use for the analysis. -func (s *IndicesAnalyzeService) Tokenizer(tokenizer string) *IndicesAnalyzeService { - s.request.Tokenizer = tokenizer - return s -} - -// BodyJson is the text on which the analysis should be performed. -func (s *IndicesAnalyzeService) BodyJson(body interface{}) *IndicesAnalyzeService { - s.bodyJson = body - return s -} - -// BodyString is the text on which the analysis should be performed. -func (s *IndicesAnalyzeService) BodyString(body string) *IndicesAnalyzeService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesAnalyzeService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - - if s.index == "" { - path = "/_analyze" - } else { - path, err = uritemplates.Expand("/{index}/_analyze", map[string]string{ - "index": s.index, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.format != "" { - params.Set("format", s.format) - } - if s.preferLocal != nil { - params.Set("prefer_local", fmt.Sprintf("%v", *s.preferLocal)) - } - - return path, params, nil -} - -// Do will execute the request with the given context. -func (s *IndicesAnalyzeService) Do(ctx context.Context) (*IndicesAnalyzeResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else if s.bodyString != "" { - body = s.bodyString - } else { - // Request parameters are deprecated in 5.1.1, and we must use a JSON - // structure in the body to pass the parameters. - // See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-analyze.html - body = s.request - } - - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - ret := new(IndicesAnalyzeResponse) - if err = s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - - return ret, nil -} - -func (s *IndicesAnalyzeService) Validate() error { - var invalid []string - if s.bodyJson == nil && s.bodyString == "" { - if len(s.request.Text) == 0 { - invalid = append(invalid, "Text") - } - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// IndicesAnalyzeRequest specifies the parameters of the analyze request. -type IndicesAnalyzeRequest struct { - Text []string `json:"text,omitempty"` - Analyzer string `json:"analyzer,omitempty"` - Tokenizer string `json:"tokenizer,omitempty"` - Filter []string `json:"filter,omitempty"` - CharFilter []string `json:"char_filter,omitempty"` - Field string `json:"field,omitempty"` - Explain bool `json:"explain,omitempty"` - Attributes []string `json:"attributes,omitempty"` -} - -type IndicesAnalyzeResponse struct { - Tokens []AnalyzeToken `json:"tokens"` // json part for normal message - Detail IndicesAnalyzeResponseDetail `json:"detail"` // json part for verbose message of explain request -} - -type AnalyzeTokenList struct { - Name string `json:"name"` - Tokens []AnalyzeToken `json:"tokens,omitempty"` -} - -type AnalyzeToken struct { - Token string `json:"token"` - Type string `json:"type"` // e.g. "" - StartOffset int `json:"start_offset"` - EndOffset int `json:"end_offset"` - Bytes string `json:"bytes"` // e.g. "[67 75 79]" - Position int `json:"position"` - PositionLength int `json:"positionLength"` // seems to be wrong in 7.2+ (no snake_case), see https://github.com/elastic/elasticsearch/blob/7.2/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeResponse.java - TermFrequency int `json:"termFrequency"` - Keyword bool `json:"keyword"` -} - -type CharFilteredText struct { - Name string `json:"name"` - FilteredText []string `json:"filtered_text"` -} - -type IndicesAnalyzeResponseDetail struct { - CustomAnalyzer bool `json:"custom_analyzer"` - Analyzer *AnalyzeTokenList `json:"analyzer,omitempty"` - Charfilters []*CharFilteredText `json:"charfilters,omitempty"` - Tokenizer *AnalyzeTokenList `json:"tokenizer,omitempty"` - TokenFilters []*AnalyzeTokenList `json:"tokenfilters,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_clear_cache.go b/vendor/github.com/olivere/elastic/v7/indices_clear_cache.go deleted file mode 100644 index 7a2d9f7..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_clear_cache.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesClearCacheService allows to clear either all caches or specific cached associated -// with one or more indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.6/indices-clearcache.html -// for details. -type IndicesClearCacheService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - fieldData *bool - fields string - query *bool - request *bool -} - -// NewIndicesClearCacheService initializes a new instance of -// IndicesClearCacheService. -func NewIndicesClearCacheService(client *Client) *IndicesClearCacheService { - return &IndicesClearCacheService{client: client} -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesClearCacheService) Pretty(pretty bool) *IndicesClearCacheService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesClearCacheService) Human(human bool) *IndicesClearCacheService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesClearCacheService) ErrorTrace(errorTrace bool) *IndicesClearCacheService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesClearCacheService) FilterPath(filterPath ...string) *IndicesClearCacheService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesClearCacheService) Header(name string, value string) *IndicesClearCacheService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesClearCacheService) Headers(headers http.Header) *IndicesClearCacheService { - s.headers = headers - return s -} - -// Index is the comma-separated list or wildcard expression of index names used to clear cache. -func (s *IndicesClearCacheService) Index(indices ...string) *IndicesClearCacheService { - s.index = append(s.index, indices...) - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesClearCacheService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesClearCacheService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` string or when no indices -// have been specified). -func (s *IndicesClearCacheService) AllowNoIndices(allowNoIndices bool) *IndicesClearCacheService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesClearCacheService) ExpandWildcards(expandWildcards string) *IndicesClearCacheService { - s.expandWildcards = expandWildcards - return s -} - -// FieldData indicates whether to clear the fields cache. -// Use the fields parameter to clear the cache of specific fields only. -func (s *IndicesClearCacheService) FieldData(fieldData bool) *IndicesClearCacheService { - s.fieldData = &fieldData - return s -} - -// Fields indicates comma-separated list of field names used to limit the fielddata parameter. -// Defaults to all fields. -func (s *IndicesClearCacheService) Fields(fields string) *IndicesClearCacheService { - s.fields = fields - return s -} - -// Query indicates whether to clear only query cache. -func (s *IndicesClearCacheService) Query(queryCache bool) *IndicesClearCacheService { - s.query = &queryCache - return s -} - -// Request indicates whether to clear only request cache. -func (s *IndicesClearCacheService) Request(requestCache bool) *IndicesClearCacheService { - s.request = &requestCache - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesClearCacheService) buildURL() (string, url.Values, error) { - // Build URL - var path string - var err error - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_cache/clear", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_cache/clear" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.allowNoIndices; v != nil { - params.Set("allow_no_indices", fmt.Sprint(*v)) - } - if v := s.expandWildcards; v != "" { - params.Set("expand_wildcards", v) - } - if v := s.ignoreUnavailable; v != nil { - params.Set("ignore_unavailable", fmt.Sprint(*v)) - } - if len(s.index) > 0 { - params.Set("index", fmt.Sprintf("%v", s.index)) - } - if v := s.ignoreUnavailable; v != nil { - params.Set("fielddata", fmt.Sprint(*v)) - } - if len(s.fields) > 0 { - params.Set("fields", fmt.Sprintf("%v", s.fields)) - } - if v := s.query; v != nil { - params.Set("query", fmt.Sprint(*v)) - } - if s.request != nil { - params.Set("request", fmt.Sprintf("%v", *s.request)) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesClearCacheService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesClearCacheService) Do(ctx context.Context) (*IndicesClearCacheResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesClearCacheResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesClearCacheResponse is the response of IndicesClearCacheService.Do. -type IndicesClearCacheResponse struct { - Shards *ShardsInfo `json:"_shards"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_close.go b/vendor/github.com/olivere/elastic/v7/indices_close.go deleted file mode 100644 index 36edd60..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_close.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesCloseService closes an index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-open-close.html -// for details. -type IndicesCloseService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - timeout string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewIndicesCloseService creates and initializes a new IndicesCloseService. -func NewIndicesCloseService(client *Client) *IndicesCloseService { - return &IndicesCloseService{client: client} -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesCloseService) Pretty(pretty bool) *IndicesCloseService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesCloseService) Human(human bool) *IndicesCloseService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesCloseService) ErrorTrace(errorTrace bool) *IndicesCloseService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesCloseService) FilterPath(filterPath ...string) *IndicesCloseService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesCloseService) Header(name string, value string) *IndicesCloseService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesCloseService) Headers(headers http.Header) *IndicesCloseService { - s.headers = headers - return s -} - -// Index is the name of the index to close. -func (s *IndicesCloseService) Index(index string) *IndicesCloseService { - s.index = index - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesCloseService) Timeout(timeout string) *IndicesCloseService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesCloseService) MasterTimeout(masterTimeout string) *IndicesCloseService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesCloseService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesCloseService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). -func (s *IndicesCloseService) AllowNoIndices(allowNoIndices bool) *IndicesCloseService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesCloseService) ExpandWildcards(expandWildcards string) *IndicesCloseService { - s.expandWildcards = expandWildcards - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesCloseService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_close", map[string]string{ - "index": s.index, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesCloseService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesCloseService) Do(ctx context.Context) (*IndicesCloseResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesCloseResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesCloseResponse is the response of IndicesCloseService.Do. -type IndicesCloseResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_create.go b/vendor/github.com/olivere/elastic/v7/indices_create.go deleted file mode 100644 index 4bcd77b..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_create.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesCreateService creates a new index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-create-index.html -// for details. -type IndicesCreateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - timeout string - masterTimeout string - bodyJson interface{} - bodyString string -} - -// NewIndicesCreateService returns a new IndicesCreateService. -func NewIndicesCreateService(client *Client) *IndicesCreateService { - return &IndicesCreateService{client: client} -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesCreateService) Pretty(pretty bool) *IndicesCreateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesCreateService) Human(human bool) *IndicesCreateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesCreateService) ErrorTrace(errorTrace bool) *IndicesCreateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesCreateService) FilterPath(filterPath ...string) *IndicesCreateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesCreateService) Header(name string, value string) *IndicesCreateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesCreateService) Headers(headers http.Header) *IndicesCreateService { - s.headers = headers - return s -} - -// Index is the name of the index to create. -func (s *IndicesCreateService) Index(index string) *IndicesCreateService { - s.index = index - return s -} - -// Timeout the explicit operation timeout, e.g. "5s". -func (s *IndicesCreateService) Timeout(timeout string) *IndicesCreateService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesCreateService) MasterTimeout(masterTimeout string) *IndicesCreateService { - s.masterTimeout = masterTimeout - return s -} - -// Body specifies the configuration of the index as a string. -// It is an alias for BodyString. -func (s *IndicesCreateService) Body(body string) *IndicesCreateService { - s.bodyString = body - return s -} - -// BodyString specifies the configuration of the index as a string. -func (s *IndicesCreateService) BodyString(body string) *IndicesCreateService { - s.bodyString = body - return s -} - -// BodyJson specifies the configuration of the index. The interface{} will -// be serializes as a JSON document, so use a map[string]interface{}. -func (s *IndicesCreateService) BodyJson(body interface{}) *IndicesCreateService { - s.bodyJson = body - return s -} - -// Do executes the operation. -func (s *IndicesCreateService) Do(ctx context.Context) (*IndicesCreateResult, error) { - if s.index == "" { - return nil, errors.New("missing index name") - } - - // Build url - path, err := uritemplates.Expand("/{index}", map[string]string{ - "index": s.index, - }) - if err != nil { - return nil, err - } - - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - ret := new(IndicesCreateResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a create index request. - -// IndicesCreateResult is the outcome of creating a new index. -type IndicesCreateResult struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_delete.go b/vendor/github.com/olivere/elastic/v7/indices_delete.go deleted file mode 100644 index cdf27a5..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_delete.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesDeleteService allows to delete existing indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-delete-index.html -// for details. -type IndicesDeleteService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - timeout string - masterTimeout string -} - -// NewIndicesDeleteService creates and initializes a new IndicesDeleteService. -func NewIndicesDeleteService(client *Client) *IndicesDeleteService { - return &IndicesDeleteService{ - client: client, - index: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesDeleteService) Pretty(pretty bool) *IndicesDeleteService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesDeleteService) Human(human bool) *IndicesDeleteService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesDeleteService) ErrorTrace(errorTrace bool) *IndicesDeleteService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesDeleteService) FilterPath(filterPath ...string) *IndicesDeleteService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesDeleteService) Header(name string, value string) *IndicesDeleteService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesDeleteService) Headers(headers http.Header) *IndicesDeleteService { - s.headers = headers - return s -} - -// Index adds the list of indices to delete. -// Use `_all` or `*` string to delete all indices. -func (s *IndicesDeleteService) Index(index []string) *IndicesDeleteService { - s.index = index - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesDeleteService) Timeout(timeout string) *IndicesDeleteService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesDeleteService) MasterTimeout(masterTimeout string) *IndicesDeleteService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesDeleteService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}", map[string]string{ - "index": strings.Join(s.index, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesDeleteService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesDeleteService) Do(ctx context.Context) (*IndicesDeleteResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesDeleteResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a delete index request. - -// IndicesDeleteResponse is the response of IndicesDeleteService.Do. -type IndicesDeleteResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_delete_template.go b/vendor/github.com/olivere/elastic/v7/indices_delete_template.go deleted file mode 100644 index e1ed3e6..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_delete_template.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesDeleteTemplateService deletes index templates. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-templates.html. -type IndicesDeleteTemplateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string - timeout string - masterTimeout string -} - -// NewIndicesDeleteTemplateService creates a new IndicesDeleteTemplateService. -func NewIndicesDeleteTemplateService(client *Client) *IndicesDeleteTemplateService { - return &IndicesDeleteTemplateService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesDeleteTemplateService) Pretty(pretty bool) *IndicesDeleteTemplateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesDeleteTemplateService) Human(human bool) *IndicesDeleteTemplateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesDeleteTemplateService) ErrorTrace(errorTrace bool) *IndicesDeleteTemplateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesDeleteTemplateService) FilterPath(filterPath ...string) *IndicesDeleteTemplateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesDeleteTemplateService) Header(name string, value string) *IndicesDeleteTemplateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesDeleteTemplateService) Headers(headers http.Header) *IndicesDeleteTemplateService { - s.headers = headers - return s -} - -// Name is the name of the template. -func (s *IndicesDeleteTemplateService) Name(name string) *IndicesDeleteTemplateService { - s.name = name - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesDeleteTemplateService) Timeout(timeout string) *IndicesDeleteTemplateService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesDeleteTemplateService) MasterTimeout(masterTimeout string) *IndicesDeleteTemplateService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesDeleteTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_template/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesDeleteTemplateService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesDeleteTemplateService) Do(ctx context.Context) (*IndicesDeleteTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesDeleteTemplateResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesDeleteTemplateResponse is the response of IndicesDeleteTemplateService.Do. -type IndicesDeleteTemplateResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_exists.go b/vendor/github.com/olivere/elastic/v7/indices_exists.go deleted file mode 100644 index a813f56..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_exists.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesExistsService checks if an index or indices exist or not. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-exists.html -// for details. -type IndicesExistsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - local *bool -} - -// NewIndicesExistsService creates and initializes a new IndicesExistsService. -func NewIndicesExistsService(client *Client) *IndicesExistsService { - return &IndicesExistsService{ - client: client, - index: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesExistsService) Pretty(pretty bool) *IndicesExistsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesExistsService) Human(human bool) *IndicesExistsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesExistsService) ErrorTrace(errorTrace bool) *IndicesExistsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesExistsService) FilterPath(filterPath ...string) *IndicesExistsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesExistsService) Header(name string, value string) *IndicesExistsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesExistsService) Headers(headers http.Header) *IndicesExistsService { - s.headers = headers - return s -} - -// Index is a list of one or more indices to check. -func (s *IndicesExistsService) Index(index []string) *IndicesExistsService { - s.index = index - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or -// when no indices have been specified). -func (s *IndicesExistsService) AllowNoIndices(allowNoIndices bool) *IndicesExistsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesExistsService) ExpandWildcards(expandWildcards string) *IndicesExistsService { - s.expandWildcards = expandWildcards - return s -} - -// Local, when set, returns local information and does not retrieve the state -// from master node (default: false). -func (s *IndicesExistsService) Local(local bool) *IndicesExistsService { - s.local = &local - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesExistsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesExistsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesExistsService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}", map[string]string{ - "index": strings.Join(s.index, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesExistsService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesExistsService) Do(ctx context.Context) (bool, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return false, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return false, err - } - - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "HEAD", - Path: path, - Params: params, - IgnoreErrors: []int{404}, - Headers: s.headers, - }) - if err != nil { - return false, err - } - - // Return operation response - switch res.StatusCode { - case http.StatusOK: - return true, nil - case http.StatusNotFound: - return false, nil - default: - return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) - } -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_exists_template.go b/vendor/github.com/olivere/elastic/v7/indices_exists_template.go deleted file mode 100644 index c5c9bbd..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_exists_template.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesExistsTemplateService checks if a given template exists. -// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-templates.html#indices-templates-exists -// for documentation. -type IndicesExistsTemplateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string - local *bool -} - -// NewIndicesExistsTemplateService creates a new IndicesExistsTemplateService. -func NewIndicesExistsTemplateService(client *Client) *IndicesExistsTemplateService { - return &IndicesExistsTemplateService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesExistsTemplateService) Pretty(pretty bool) *IndicesExistsTemplateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesExistsTemplateService) Human(human bool) *IndicesExistsTemplateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesExistsTemplateService) ErrorTrace(errorTrace bool) *IndicesExistsTemplateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesExistsTemplateService) FilterPath(filterPath ...string) *IndicesExistsTemplateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesExistsTemplateService) Header(name string, value string) *IndicesExistsTemplateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesExistsTemplateService) Headers(headers http.Header) *IndicesExistsTemplateService { - s.headers = headers - return s -} - -// Name is the name of the template. -func (s *IndicesExistsTemplateService) Name(name string) *IndicesExistsTemplateService { - s.name = name - return s -} - -// Local indicates whether to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *IndicesExistsTemplateService) Local(local bool) *IndicesExistsTemplateService { - s.local = &local - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesExistsTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_template/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesExistsTemplateService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesExistsTemplateService) Do(ctx context.Context) (bool, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return false, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return false, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "HEAD", - Path: path, - Params: params, - IgnoreErrors: []int{404}, - Headers: s.headers, - }) - if err != nil { - return false, err - } - - // Return operation response - switch res.StatusCode { - case http.StatusOK: - return true, nil - case http.StatusNotFound: - return false, nil - default: - return false, fmt.Errorf("elastic: got HTTP code %d when it should have been either 200 or 404", res.StatusCode) - } -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_flush.go b/vendor/github.com/olivere/elastic/v7/indices_flush.go deleted file mode 100644 index 5d4c4e1..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_flush.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// Flush allows to flush one or more indices. The flush process of an index -// basically frees memory from the index by flushing data to the index -// storage and clearing the internal transaction log. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-flush.html -// for details. -type IndicesFlushService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - force *bool - waitIfOngoing *bool - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewIndicesFlushService creates a new IndicesFlushService. -func NewIndicesFlushService(client *Client) *IndicesFlushService { - return &IndicesFlushService{ - client: client, - index: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesFlushService) Pretty(pretty bool) *IndicesFlushService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesFlushService) Human(human bool) *IndicesFlushService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesFlushService) ErrorTrace(errorTrace bool) *IndicesFlushService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesFlushService) FilterPath(filterPath ...string) *IndicesFlushService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesFlushService) Header(name string, value string) *IndicesFlushService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesFlushService) Headers(headers http.Header) *IndicesFlushService { - s.headers = headers - return s -} - -// Index is a list of index names; use `_all` or empty string for all indices. -func (s *IndicesFlushService) Index(indices ...string) *IndicesFlushService { - s.index = append(s.index, indices...) - return s -} - -// Force indicates whether a flush should be forced even if it is not -// necessarily needed ie. if no changes will be committed to the index. -// This is useful if transaction log IDs should be incremented even if -// no uncommitted changes are present. (This setting can be considered as internal). -func (s *IndicesFlushService) Force(force bool) *IndicesFlushService { - s.force = &force - return s -} - -// WaitIfOngoing, if set to true, indicates that the flush operation will -// block until the flush can be executed if another flush operation is -// already executing. The default is false and will cause an exception -// to be thrown on the shard level if another flush operation is already running.. -func (s *IndicesFlushService) WaitIfOngoing(waitIfOngoing bool) *IndicesFlushService { - s.waitIfOngoing = &waitIfOngoing - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFlushService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or when -// no indices have been specified). -func (s *IndicesFlushService) AllowNoIndices(allowNoIndices bool) *IndicesFlushService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards specifies whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesFlushService) ExpandWildcards(expandWildcards string) *IndicesFlushService { - s.expandWildcards = expandWildcards - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesFlushService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_flush", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_flush" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.force != nil { - params.Set("force", fmt.Sprintf("%v", *s.force)) - } - if s.waitIfOngoing != nil { - params.Set("wait_if_ongoing", fmt.Sprintf("%v", *s.waitIfOngoing)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesFlushService) Validate() error { - return nil -} - -// Do executes the service. -func (s *IndicesFlushService) Do(ctx context.Context) (*IndicesFlushResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesFlushResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a flush request. - -type IndicesFlushResponse struct { - Shards *ShardsInfo `json:"_shards"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_flush_synced.go b/vendor/github.com/olivere/elastic/v7/indices_flush_synced.go deleted file mode 100644 index 140ae62..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_flush_synced.go +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesSyncedFlushService performs a normal flush, then adds a generated -// unique marked (sync_id) to all shards. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-synced-flush.html -// for details. -type IndicesSyncedFlushService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewIndicesSyncedFlushService creates a new IndicesSyncedFlushService. -func NewIndicesSyncedFlushService(client *Client) *IndicesSyncedFlushService { - return &IndicesSyncedFlushService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesSyncedFlushService) Pretty(pretty bool) *IndicesSyncedFlushService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesSyncedFlushService) Human(human bool) *IndicesSyncedFlushService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesSyncedFlushService) ErrorTrace(errorTrace bool) *IndicesSyncedFlushService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesSyncedFlushService) FilterPath(filterPath ...string) *IndicesSyncedFlushService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesSyncedFlushService) Header(name string, value string) *IndicesSyncedFlushService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesSyncedFlushService) Headers(headers http.Header) *IndicesSyncedFlushService { - s.headers = headers - return s -} - -// Index is a list of index names; use `_all` or empty string for all indices. -func (s *IndicesSyncedFlushService) Index(indices ...string) *IndicesSyncedFlushService { - s.index = append(s.index, indices...) - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesSyncedFlushService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesSyncedFlushService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or when -// no indices have been specified). -func (s *IndicesSyncedFlushService) AllowNoIndices(allowNoIndices bool) *IndicesSyncedFlushService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards specifies whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesSyncedFlushService) ExpandWildcards(expandWildcards string) *IndicesSyncedFlushService { - s.expandWildcards = expandWildcards - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesSyncedFlushService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_flush/synced", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_flush/synced" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesSyncedFlushService) Validate() error { - return nil -} - -// Do executes the service. -func (s *IndicesSyncedFlushService) Do(ctx context.Context) (*IndicesSyncedFlushResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesSyncedFlushResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a flush request. - -// IndicesSyncedFlushResponse is the outcome of a synched flush call. -type IndicesSyncedFlushResponse struct { - Shards *ShardsInfo `json:"_shards"` - Index map[string]*IndicesShardsSyncedFlushResult `json:"-"` - - // TODO Add information about the indices here from the root level - // It looks like this: - // { - // "_shards" : { - // "total" : 4, - // "successful" : 4, - // "failed" : 0 - // }, - // "elastic-test" : { - // "total" : 1, - // "successful" : 1, - // "failed" : 0 - // }, - // "elastic-test2" : { - // "total" : 1, - // "successful" : 1, - // "failed" : 0 - // }, - // "elastic-orders" : { - // "total" : 1, - // "successful" : 1, - // "failed" : 0 - // }, - // "elastic-nosource-test" : { - // "total" : 1, - // "successful" : 1, - // "failed" : 0 - // } - // } -} - -// IndicesShardsSyncedFlushResult represents synced flush information about -// a specific index. -type IndicesShardsSyncedFlushResult struct { - Total int `json:"total"` - Successful int `json:"successful"` - Failed int `json:"failed"` - Failures []IndicesShardsSyncedFlushResultFailure `json:"failures,omitempty"` -} - -// IndicesShardsSyncedFlushResultFailure represents a failure of a synced -// flush operation. -type IndicesShardsSyncedFlushResultFailure struct { - Shard int `json:"shard"` - Reason string `json:"reason"` - Routing struct { - State string `json:"state"` - Primary bool `json:"primary"` - Node string `json:"node"` - RelocatingNode *string `json:"relocating_node"` - Shard int `json:"shard"` - Index string `json:"index"` - ExpectedShardSizeInBytes int64 `json:"expected_shard_size_in_bytes,omitempty"` - // recoverySource - // allocationId - // unassignedInfo - } `json:"routing"` -} - -// UnmarshalJSON parses the output from Synced Flush API. -func (resp *IndicesSyncedFlushResponse) UnmarshalJSON(data []byte) error { - m := make(map[string]json.RawMessage) - err := json.Unmarshal(data, &m) - if err != nil { - return err - } - resp.Index = make(map[string]*IndicesShardsSyncedFlushResult) - for k, v := range m { - if k == "_shards" { - if err := json.Unmarshal(v, &resp.Shards); err != nil { - return err - } - } else { - ix := new(IndicesShardsSyncedFlushResult) - if err := json.Unmarshal(v, &ix); err != nil { - return err - } - resp.Index[k] = ix - } - } - return nil -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_forcemerge.go b/vendor/github.com/olivere/elastic/v7/indices_forcemerge.go deleted file mode 100644 index 8098390..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_forcemerge.go +++ /dev/null @@ -1,235 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesForcemergeService allows to force merging of one or more indices. -// The merge relates to the number of segments a Lucene index holds -// within each shard. The force merge operation allows to reduce the number -// of segments by merging them. -// -// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-forcemerge.html -// for more information. -type IndicesForcemergeService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - allowNoIndices *bool - expandWildcards string - flush *bool - ignoreUnavailable *bool - maxNumSegments interface{} - onlyExpungeDeletes *bool -} - -// NewIndicesForcemergeService creates a new IndicesForcemergeService. -func NewIndicesForcemergeService(client *Client) *IndicesForcemergeService { - return &IndicesForcemergeService{ - client: client, - index: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesForcemergeService) Pretty(pretty bool) *IndicesForcemergeService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesForcemergeService) Human(human bool) *IndicesForcemergeService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesForcemergeService) ErrorTrace(errorTrace bool) *IndicesForcemergeService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesForcemergeService) FilterPath(filterPath ...string) *IndicesForcemergeService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesForcemergeService) Header(name string, value string) *IndicesForcemergeService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesForcemergeService) Headers(headers http.Header) *IndicesForcemergeService { - s.headers = headers - return s -} - -// Index is a list of index names; use `_all` or empty string to perform -// the operation on all indices. -func (s *IndicesForcemergeService) Index(index ...string) *IndicesForcemergeService { - if s.index == nil { - s.index = make([]string, 0) - } - s.index = append(s.index, index...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *IndicesForcemergeService) AllowNoIndices(allowNoIndices bool) *IndicesForcemergeService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesForcemergeService) ExpandWildcards(expandWildcards string) *IndicesForcemergeService { - s.expandWildcards = expandWildcards - return s -} - -// Flush specifies whether the index should be flushed after performing -// the operation (default: true). -func (s *IndicesForcemergeService) Flush(flush bool) *IndicesForcemergeService { - s.flush = &flush - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should -// be ignored when unavailable (missing or closed). -func (s *IndicesForcemergeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesForcemergeService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// MaxNumSegments specifies the number of segments the index should be -// merged into (default: dynamic). -func (s *IndicesForcemergeService) MaxNumSegments(maxNumSegments interface{}) *IndicesForcemergeService { - s.maxNumSegments = maxNumSegments - return s -} - -// OnlyExpungeDeletes specifies whether the operation should only expunge -// deleted documents. -func (s *IndicesForcemergeService) OnlyExpungeDeletes(onlyExpungeDeletes bool) *IndicesForcemergeService { - s.onlyExpungeDeletes = &onlyExpungeDeletes - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesForcemergeService) buildURL() (string, url.Values, error) { - var err error - var path string - - // Build URL - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_forcemerge", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_forcemerge" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flush != nil { - params.Set("flush", fmt.Sprintf("%v", *s.flush)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.maxNumSegments != nil { - params.Set("max_num_segments", fmt.Sprintf("%v", s.maxNumSegments)) - } - if s.onlyExpungeDeletes != nil { - params.Set("only_expunge_deletes", fmt.Sprintf("%v", *s.onlyExpungeDeletes)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesForcemergeService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesForcemergeService) Do(ctx context.Context) (*IndicesForcemergeResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesForcemergeResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesForcemergeResponse is the response of IndicesForcemergeService.Do. -type IndicesForcemergeResponse struct { - Shards *ShardsInfo `json:"_shards"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_freeze.go b/vendor/github.com/olivere/elastic/v7/indices_freeze.go deleted file mode 100644 index 04c2a3e..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_freeze.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesFreezeService freezes an index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/freeze-index-api.html -// and https://www.elastic.co/blog/creating-frozen-indices-with-the-elasticsearch-freeze-index-api -// for details. -type IndicesFreezeService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - timeout string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - waitForActiveShards string -} - -// NewIndicesFreezeService creates a new IndicesFreezeService. -func NewIndicesFreezeService(client *Client) *IndicesFreezeService { - return &IndicesFreezeService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesFreezeService) Pretty(pretty bool) *IndicesFreezeService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesFreezeService) Human(human bool) *IndicesFreezeService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesFreezeService) ErrorTrace(errorTrace bool) *IndicesFreezeService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesFreezeService) FilterPath(filterPath ...string) *IndicesFreezeService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesFreezeService) Header(name string, value string) *IndicesFreezeService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesFreezeService) Headers(headers http.Header) *IndicesFreezeService { - s.headers = headers - return s -} - -// Index is the name of the index to freeze. -func (s *IndicesFreezeService) Index(index string) *IndicesFreezeService { - s.index = index - return s -} - -// Timeout allows to specify an explicit timeout. -func (s *IndicesFreezeService) Timeout(timeout string) *IndicesFreezeService { - s.timeout = timeout - return s -} - -// MasterTimeout allows to specify a timeout for connection to master. -func (s *IndicesFreezeService) MasterTimeout(masterTimeout string) *IndicesFreezeService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesFreezeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesFreezeService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or when -// no indices have been specified). -func (s *IndicesFreezeService) AllowNoIndices(allowNoIndices bool) *IndicesFreezeService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards specifies whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesFreezeService) ExpandWildcards(expandWildcards string) *IndicesFreezeService { - s.expandWildcards = expandWildcards - return s -} - -// WaitForActiveShards sets the number of active shards to wait for -// before the operation returns. -func (s *IndicesFreezeService) WaitForActiveShards(numShards string) *IndicesFreezeService { - s.waitForActiveShards = numShards - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesFreezeService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_freeze", map[string]string{ - "index": s.index, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesFreezeService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the service. -func (s *IndicesFreezeService) Do(ctx context.Context) (*IndicesFreezeResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesFreezeResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesFreezeResponse is the outcome of freezing an index. -type IndicesFreezeResponse struct { - Shards *ShardsInfo `json:"_shards"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_get.go b/vendor/github.com/olivere/elastic/v7/indices_get.go deleted file mode 100644 index 6416c04..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_get.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesGetService retrieves information about one or more indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-get-index.html -// for more details. -type IndicesGetService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - feature []string - local *bool - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - flatSettings *bool -} - -// NewIndicesGetService creates a new IndicesGetService. -func NewIndicesGetService(client *Client) *IndicesGetService { - return &IndicesGetService{ - client: client, - index: make([]string, 0), - feature: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesGetService) Pretty(pretty bool) *IndicesGetService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesGetService) Human(human bool) *IndicesGetService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesGetService) ErrorTrace(errorTrace bool) *IndicesGetService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesGetService) FilterPath(filterPath ...string) *IndicesGetService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesGetService) Header(name string, value string) *IndicesGetService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesGetService) Headers(headers http.Header) *IndicesGetService { - s.headers = headers - return s -} - -// Index is a list of index names. -func (s *IndicesGetService) Index(indices ...string) *IndicesGetService { - s.index = append(s.index, indices...) - return s -} - -// Feature is a list of features. -func (s *IndicesGetService) Feature(features ...string) *IndicesGetService { - s.feature = append(s.feature, features...) - return s -} - -// Local indicates whether to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *IndicesGetService) Local(local bool) *IndicesGetService { - s.local = &local - return s -} - -// IgnoreUnavailable indicates whether to ignore unavailable indexes (default: false). -func (s *IndicesGetService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard expression -// resolves to no concrete indices (default: false). -func (s *IndicesGetService) AllowNoIndices(allowNoIndices bool) *IndicesGetService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether wildcard expressions should get -// expanded to open or closed indices (default: open). -func (s *IndicesGetService) ExpandWildcards(expandWildcards string) *IndicesGetService { - s.expandWildcards = expandWildcards - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetService) buildURL() (string, url.Values, error) { - var err error - var path string - var index []string - - if len(s.index) > 0 { - index = s.index - } else { - index = []string{"_all"} - } - - if len(s.feature) > 0 { - // Build URL - path, err = uritemplates.Expand("/{index}/{feature}", map[string]string{ - "index": strings.Join(index, ","), - "feature": strings.Join(s.feature, ","), - }) - } else { - // Build URL - path, err = uritemplates.Expand("/{index}", map[string]string{ - "index": strings.Join(index, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesGetService) Do(ctx context.Context) (map[string]*IndicesGetResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]*IndicesGetResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesGetResponse is part of the response of IndicesGetService.Do. -type IndicesGetResponse struct { - Aliases map[string]interface{} `json:"aliases"` - Mappings map[string]interface{} `json:"mappings"` - Settings map[string]interface{} `json:"settings"` - Warmers map[string]interface{} `json:"warmers"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_get_aliases.go b/vendor/github.com/olivere/elastic/v7/indices_get_aliases.go deleted file mode 100644 index 596dc60..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_get_aliases.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// AliasesService returns the aliases associated with one or more indices, or the -// indices associated with one or more aliases, or a combination of those filters. -// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-aliases.html. -type AliasesService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - alias []string -} - -// NewAliasesService instantiates a new AliasesService. -func NewAliasesService(client *Client) *AliasesService { - builder := &AliasesService{ - client: client, - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *AliasesService) Pretty(pretty bool) *AliasesService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *AliasesService) Human(human bool) *AliasesService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *AliasesService) ErrorTrace(errorTrace bool) *AliasesService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *AliasesService) FilterPath(filterPath ...string) *AliasesService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *AliasesService) Header(name string, value string) *AliasesService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *AliasesService) Headers(headers http.Header) *AliasesService { - s.headers = headers - return s -} - -// Index adds one or more indices. -func (s *AliasesService) Index(index ...string) *AliasesService { - s.index = append(s.index, index...) - return s -} - -// Alias adds one or more aliases. -func (s *AliasesService) Alias(alias ...string) *AliasesService { - s.alias = append(s.alias, alias...) - return s -} - -// buildURL builds the URL for the operation. -func (s *AliasesService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_alias/{alias}", map[string]string{ - "index": strings.Join(s.index, ","), - "alias": strings.Join(s.alias, ","), - }) - } else { - path, err = uritemplates.Expand("/_alias/{alias}", map[string]string{ - "alias": strings.Join(s.alias, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - path = strings.TrimSuffix(path, "/") - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -func (s *AliasesService) Do(ctx context.Context) (*AliasesResult, error) { - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // { - // "indexName" : { - // "aliases" : { - // "alias1" : { }, - // "alias2" : { } - // } - // }, - // "indexName2" : { - // ... - // }, - // } - indexMap := make(map[string]struct { - Aliases map[string]struct { - IsWriteIndex bool `json:"is_write_index"` - } `json:"aliases"` - }) - if err := s.client.decoder.Decode(res.Body, &indexMap); err != nil { - return nil, err - } - - // Each (indexName, _) - ret := &AliasesResult{ - Indices: make(map[string]indexResult), - } - for indexName, indexData := range indexMap { - if indexData.Aliases == nil { - continue - } - - indexOut, found := ret.Indices[indexName] - if !found { - indexOut = indexResult{Aliases: make([]aliasResult, 0)} - } - - // { "aliases" : { ... } } - for aliasName, aliasData := range indexData.Aliases { - aliasRes := aliasResult{AliasName: aliasName, IsWriteIndex: aliasData.IsWriteIndex} - indexOut.Aliases = append(indexOut.Aliases, aliasRes) - } - - ret.Indices[indexName] = indexOut - } - - return ret, nil -} - -// -- Result of an alias request. - -// AliasesResult is the outcome of calling AliasesService.Do. -type AliasesResult struct { - Indices map[string]indexResult -} - -type indexResult struct { - Aliases []aliasResult -} - -type aliasResult struct { - AliasName string - IsWriteIndex bool -} - -// IndicesByAlias returns all indices given a specific alias name. -func (ar AliasesResult) IndicesByAlias(aliasName string) []string { - var indices []string - for indexName, indexInfo := range ar.Indices { - for _, aliasInfo := range indexInfo.Aliases { - if aliasInfo.AliasName == aliasName { - indices = append(indices, indexName) - } - } - } - return indices -} - -// HasAlias returns true if the index has a specific alias. -func (ir indexResult) HasAlias(aliasName string) bool { - for _, alias := range ir.Aliases { - if alias.AliasName == aliasName { - return true - } - } - return false -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_get_field_mapping.go b/vendor/github.com/olivere/elastic/v7/indices_get_field_mapping.go deleted file mode 100644 index 5875af5..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_get_field_mapping.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesGetFieldMappingService retrieves the mapping definitions for the fields in an index -// or index/type. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-get-field-mapping.html -// for details. -type IndicesGetFieldMappingService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - typ []string - field []string - local *bool - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewGetFieldMappingService is an alias for NewIndicesGetFieldMappingService. -// Use NewIndicesGetFieldMappingService. -func NewGetFieldMappingService(client *Client) *IndicesGetFieldMappingService { - return NewIndicesGetFieldMappingService(client) -} - -// NewIndicesGetFieldMappingService creates a new IndicesGetFieldMappingService. -func NewIndicesGetFieldMappingService(client *Client) *IndicesGetFieldMappingService { - return &IndicesGetFieldMappingService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesGetFieldMappingService) Pretty(pretty bool) *IndicesGetFieldMappingService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesGetFieldMappingService) Human(human bool) *IndicesGetFieldMappingService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesGetFieldMappingService) ErrorTrace(errorTrace bool) *IndicesGetFieldMappingService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesGetFieldMappingService) FilterPath(filterPath ...string) *IndicesGetFieldMappingService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesGetFieldMappingService) Header(name string, value string) *IndicesGetFieldMappingService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesGetFieldMappingService) Headers(headers http.Header) *IndicesGetFieldMappingService { - s.headers = headers - return s -} - -// Index is a list of index names. -func (s *IndicesGetFieldMappingService) Index(indices ...string) *IndicesGetFieldMappingService { - s.index = append(s.index, indices...) - return s -} - -// Type is a list of document types. -func (s *IndicesGetFieldMappingService) Type(types ...string) *IndicesGetFieldMappingService { - s.typ = append(s.typ, types...) - return s -} - -// Field is a list of fields. -func (s *IndicesGetFieldMappingService) Field(fields ...string) *IndicesGetFieldMappingService { - s.field = append(s.field, fields...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// This includes `_all` string or when no indices have been specified. -func (s *IndicesGetFieldMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetFieldMappingService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesGetFieldMappingService) ExpandWildcards(expandWildcards string) *IndicesGetFieldMappingService { - s.expandWildcards = expandWildcards - return s -} - -// Local indicates whether to return local information, do not retrieve -// the state from master node (default: false). -func (s *IndicesGetFieldMappingService) Local(local bool) *IndicesGetFieldMappingService { - s.local = &local - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesGetFieldMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetFieldMappingService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetFieldMappingService) buildURL() (string, url.Values, error) { - var index, typ, field []string - - if len(s.index) > 0 { - index = s.index - } else { - index = []string{"_all"} - } - - if len(s.typ) > 0 { - typ = s.typ - } else { - typ = []string{"_all"} - } - - if len(s.field) > 0 { - field = s.field - } else { - field = []string{"*"} - } - - // Build URL - path, err := uritemplates.Expand("/{index}/_mapping/{type}/field/{field}", map[string]string{ - "index": strings.Join(index, ","), - "type": strings.Join(typ, ","), - "field": strings.Join(field, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetFieldMappingService) Validate() error { - return nil -} - -// Do executes the operation. It returns mapping definitions for an index -// or index/type. -func (s *IndicesGetFieldMappingService) Do(ctx context.Context) (map[string]interface{}, error) { - var ret map[string]interface{} - - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_get_mapping.go b/vendor/github.com/olivere/elastic/v7/indices_get_mapping.go deleted file mode 100644 index 3520163..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_get_mapping.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesGetMappingService retrieves the mapping definitions for an index or -// index/type. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-get-mapping.html -// for details. -type IndicesGetMappingService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - typ []string - local *bool - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewGetMappingService is an alias for NewIndicesGetMappingService. -// Use NewIndicesGetMappingService. -func NewGetMappingService(client *Client) *IndicesGetMappingService { - return NewIndicesGetMappingService(client) -} - -// NewIndicesGetMappingService creates a new IndicesGetMappingService. -func NewIndicesGetMappingService(client *Client) *IndicesGetMappingService { - return &IndicesGetMappingService{ - client: client, - index: make([]string, 0), - typ: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesGetMappingService) Pretty(pretty bool) *IndicesGetMappingService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesGetMappingService) Human(human bool) *IndicesGetMappingService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesGetMappingService) ErrorTrace(errorTrace bool) *IndicesGetMappingService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesGetMappingService) FilterPath(filterPath ...string) *IndicesGetMappingService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesGetMappingService) Header(name string, value string) *IndicesGetMappingService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesGetMappingService) Headers(headers http.Header) *IndicesGetMappingService { - s.headers = headers - return s -} - -// Index is a list of index names. -func (s *IndicesGetMappingService) Index(indices ...string) *IndicesGetMappingService { - s.index = append(s.index, indices...) - return s -} - -// Type is a list of document types. -func (s *IndicesGetMappingService) Type(types ...string) *IndicesGetMappingService { - s.typ = append(s.typ, types...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// This includes `_all` string or when no indices have been specified. -func (s *IndicesGetMappingService) AllowNoIndices(allowNoIndices bool) *IndicesGetMappingService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesGetMappingService) ExpandWildcards(expandWildcards string) *IndicesGetMappingService { - s.expandWildcards = expandWildcards - return s -} - -// Local indicates whether to return local information, do not retrieve -// the state from master node (default: false). -func (s *IndicesGetMappingService) Local(local bool) *IndicesGetMappingService { - s.local = &local - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesGetMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetMappingService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetMappingService) buildURL() (string, url.Values, error) { - var index, typ []string - - if len(s.index) > 0 { - index = s.index - } else { - index = []string{"_all"} - } - - if len(s.typ) > 0 { - typ = s.typ - } else { - typ = []string{"_all"} - } - - // Build URL - path, err := uritemplates.Expand("/{index}/_mapping/{type}", map[string]string{ - "index": strings.Join(index, ","), - "type": strings.Join(typ, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetMappingService) Validate() error { - return nil -} - -// Do executes the operation. It returns mapping definitions for an index -// or index/type. -func (s *IndicesGetMappingService) Do(ctx context.Context) (map[string]interface{}, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]interface{} - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_get_settings.go b/vendor/github.com/olivere/elastic/v7/indices_get_settings.go deleted file mode 100644 index 2f7e4d4..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_get_settings.go +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesGetSettingsService allows to retrieve settings of one -// or more indices. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-get-settings.html -// for more details. -type IndicesGetSettingsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - name []string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - flatSettings *bool - local *bool -} - -// NewIndicesGetSettingsService creates a new IndicesGetSettingsService. -func NewIndicesGetSettingsService(client *Client) *IndicesGetSettingsService { - return &IndicesGetSettingsService{ - client: client, - index: make([]string, 0), - name: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesGetSettingsService) Pretty(pretty bool) *IndicesGetSettingsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesGetSettingsService) Human(human bool) *IndicesGetSettingsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesGetSettingsService) ErrorTrace(errorTrace bool) *IndicesGetSettingsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesGetSettingsService) FilterPath(filterPath ...string) *IndicesGetSettingsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesGetSettingsService) Header(name string, value string) *IndicesGetSettingsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesGetSettingsService) Headers(headers http.Header) *IndicesGetSettingsService { - s.headers = headers - return s -} - -// Index is a list of index names; use `_all` or empty string to perform -// the operation on all indices. -func (s *IndicesGetSettingsService) Index(indices ...string) *IndicesGetSettingsService { - s.index = append(s.index, indices...) - return s -} - -// Name are the names of the settings that should be included. -func (s *IndicesGetSettingsService) Name(name ...string) *IndicesGetSettingsService { - s.name = append(s.name, name...) - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should -// be ignored when unavailable (missing or closed). -func (s *IndicesGetSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesGetSettingsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *IndicesGetSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesGetSettingsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression -// to concrete indices that are open, closed or both. -// Options: open, closed, none, all. Default: open,closed. -func (s *IndicesGetSettingsService) ExpandWildcards(expandWildcards string) *IndicesGetSettingsService { - s.expandWildcards = expandWildcards - return s -} - -// FlatSettings indicates whether to return settings in flat format (default: false). -func (s *IndicesGetSettingsService) FlatSettings(flatSettings bool) *IndicesGetSettingsService { - s.flatSettings = &flatSettings - return s -} - -// Local indicates whether to return local information, do not retrieve -// the state from master node (default: false). -func (s *IndicesGetSettingsService) Local(local bool) *IndicesGetSettingsService { - s.local = &local - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetSettingsService) buildURL() (string, url.Values, error) { - var err error - var path string - var index []string - - if len(s.index) > 0 { - index = s.index - } else { - index = []string{"_all"} - } - - if len(s.name) > 0 { - // Build URL - path, err = uritemplates.Expand("/{index}/_settings/{name}", map[string]string{ - "index": strings.Join(index, ","), - "name": strings.Join(s.name, ","), - }) - } else { - // Build URL - path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ - "index": strings.Join(index, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetSettingsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesGetSettingsService) Do(ctx context.Context) (map[string]*IndicesGetSettingsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]*IndicesGetSettingsResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesGetSettingsResponse is the response of IndicesGetSettingsService.Do. -type IndicesGetSettingsResponse struct { - Settings map[string]interface{} `json:"settings"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_get_template.go b/vendor/github.com/olivere/elastic/v7/indices_get_template.go deleted file mode 100644 index aeafe91..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_get_template.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesGetTemplateService returns an index template. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-templates.html. -type IndicesGetTemplateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name []string - flatSettings *bool - local *bool -} - -// NewIndicesGetTemplateService creates a new IndicesGetTemplateService. -func NewIndicesGetTemplateService(client *Client) *IndicesGetTemplateService { - return &IndicesGetTemplateService{ - client: client, - name: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesGetTemplateService) Pretty(pretty bool) *IndicesGetTemplateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesGetTemplateService) Human(human bool) *IndicesGetTemplateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesGetTemplateService) ErrorTrace(errorTrace bool) *IndicesGetTemplateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesGetTemplateService) FilterPath(filterPath ...string) *IndicesGetTemplateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesGetTemplateService) Header(name string, value string) *IndicesGetTemplateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesGetTemplateService) Headers(headers http.Header) *IndicesGetTemplateService { - s.headers = headers - return s -} - -// Name is the name of the index template. -func (s *IndicesGetTemplateService) Name(name ...string) *IndicesGetTemplateService { - s.name = append(s.name, name...) - return s -} - -// FlatSettings is returns settings in flat format (default: false). -func (s *IndicesGetTemplateService) FlatSettings(flatSettings bool) *IndicesGetTemplateService { - s.flatSettings = &flatSettings - return s -} - -// Local indicates whether to return local information, i.e. do not retrieve -// the state from master node (default: false). -func (s *IndicesGetTemplateService) Local(local bool) *IndicesGetTemplateService { - s.local = &local - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesGetTemplateService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.name) > 0 { - path, err = uritemplates.Expand("/_template/{name}", map[string]string{ - "name": strings.Join(s.name, ","), - }) - } else { - path = "/_template" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesGetTemplateService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesGetTemplateService) Do(ctx context.Context) (map[string]*IndicesGetTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]*IndicesGetTemplateResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesGetTemplateResponse is the response of IndicesGetTemplateService.Do. -type IndicesGetTemplateResponse struct { - Order int `json:"order,omitempty"` - Version int `json:"version,omitempty"` - IndexPatterns []string `json:"index_patterns,omitempty"` - Settings map[string]interface{} `json:"settings,omitempty"` - Mappings map[string]interface{} `json:"mappings,omitempty"` - Aliases map[string]interface{} `json:"aliases,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_open.go b/vendor/github.com/olivere/elastic/v7/indices_open.go deleted file mode 100644 index 81ad90f..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_open.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesOpenService opens an index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-open-close.html -// for details. -type IndicesOpenService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - timeout string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - waitForActiveShards string -} - -// NewIndicesOpenService creates and initializes a new IndicesOpenService. -func NewIndicesOpenService(client *Client) *IndicesOpenService { - return &IndicesOpenService{client: client} -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesOpenService) Pretty(pretty bool) *IndicesOpenService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesOpenService) Human(human bool) *IndicesOpenService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesOpenService) ErrorTrace(errorTrace bool) *IndicesOpenService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesOpenService) FilterPath(filterPath ...string) *IndicesOpenService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesOpenService) Header(name string, value string) *IndicesOpenService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesOpenService) Headers(headers http.Header) *IndicesOpenService { - s.headers = headers - return s -} - -// Index is the name of the index to open. -func (s *IndicesOpenService) Index(index string) *IndicesOpenService { - s.index = index - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesOpenService) Timeout(timeout string) *IndicesOpenService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesOpenService) MasterTimeout(masterTimeout string) *IndicesOpenService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should -// be ignored when unavailable (missing or closed). -func (s *IndicesOpenService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesOpenService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// (This includes `_all` string or when no indices have been specified). -func (s *IndicesOpenService) AllowNoIndices(allowNoIndices bool) *IndicesOpenService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesOpenService) ExpandWildcards(expandWildcards string) *IndicesOpenService { - s.expandWildcards = expandWildcards - return s -} - -// WaitForActiveShards specifies the number of shards that must be allocated -// before the Open operation returns. Valid values are "all" or an integer -// between 0 and number_of_replicas+1 (default: 0) -func (s *IndicesOpenService) WaitForActiveShards(waitForActiveShards string) *IndicesOpenService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesOpenService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_open", map[string]string{ - "index": s.index, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesOpenService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesOpenService) Do(ctx context.Context) (*IndicesOpenResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesOpenResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesOpenResponse is the response of IndicesOpenService.Do. -type IndicesOpenResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_put_alias.go b/vendor/github.com/olivere/elastic/v7/indices_put_alias.go deleted file mode 100644 index b723eb9..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_put_alias.go +++ /dev/null @@ -1,399 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" -) - -// -- Actions -- - -// AliasAction is an action to apply to an alias, e.g. "add" or "remove". -type AliasAction interface { - Source() (interface{}, error) -} - -// AliasAddAction is an action to add to an alias. -type AliasAddAction struct { - index []string // index name(s) - alias string // alias name - filter Query - routing string - searchRouting string - indexRouting string - isWriteIndex *bool -} - -// NewAliasAddAction returns an action to add an alias. -func NewAliasAddAction(alias string) *AliasAddAction { - return &AliasAddAction{ - alias: alias, - } -} - -// Index associates one or more indices to the alias. -func (a *AliasAddAction) Index(index ...string) *AliasAddAction { - a.index = append(a.index, index...) - return a -} - -func (a *AliasAddAction) removeBlankIndexNames() { - var indices []string - for _, index := range a.index { - if len(index) > 0 { - indices = append(indices, index) - } - } - a.index = indices -} - -// Filter associates a filter to the alias. -func (a *AliasAddAction) Filter(filter Query) *AliasAddAction { - a.filter = filter - return a -} - -// Routing associates a routing value to the alias. -// This basically sets index and search routing to the same value. -func (a *AliasAddAction) Routing(routing string) *AliasAddAction { - a.routing = routing - return a -} - -// IndexRouting associates an index routing value to the alias. -func (a *AliasAddAction) IndexRouting(routing string) *AliasAddAction { - a.indexRouting = routing - return a -} - -// SearchRouting associates a search routing value to the alias. -func (a *AliasAddAction) SearchRouting(routing ...string) *AliasAddAction { - a.searchRouting = strings.Join(routing, ",") - return a -} - -// IsWriteIndex associates an is_write_index flag to the alias. -func (a *AliasAddAction) IsWriteIndex(flag bool) *AliasAddAction { - a.isWriteIndex = &flag - return a -} - -// Validate checks if the operation is valid. -func (a *AliasAddAction) Validate() error { - var invalid []string - if len(a.alias) == 0 { - invalid = append(invalid, "Alias") - } - if len(a.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - if a.isWriteIndex != nil && len(a.index) > 1 { - return fmt.Errorf("more than 1 target index specified in operation with 'is_write_index' flag present") - } - return nil -} - -// Source returns the JSON-serializable data. -func (a *AliasAddAction) Source() (interface{}, error) { - a.removeBlankIndexNames() - if err := a.Validate(); err != nil { - return nil, err - } - src := make(map[string]interface{}) - act := make(map[string]interface{}) - src["add"] = act - act["alias"] = a.alias - switch len(a.index) { - case 1: - act["index"] = a.index[0] - default: - act["indices"] = a.index - } - if a.filter != nil { - f, err := a.filter.Source() - if err != nil { - return nil, err - } - act["filter"] = f - } - if len(a.routing) > 0 { - act["routing"] = a.routing - } - if len(a.indexRouting) > 0 { - act["index_routing"] = a.indexRouting - } - if len(a.searchRouting) > 0 { - act["search_routing"] = a.searchRouting - } - if a.isWriteIndex != nil { - act["is_write_index"] = *a.isWriteIndex - } - return src, nil -} - -// AliasRemoveAction is an action to remove an alias. -type AliasRemoveAction struct { - index []string // index name(s) - alias string // alias name -} - -// NewAliasRemoveAction returns an action to remove an alias. -func NewAliasRemoveAction(alias string) *AliasRemoveAction { - return &AliasRemoveAction{ - alias: alias, - } -} - -// Index associates one or more indices to the alias. -func (a *AliasRemoveAction) Index(index ...string) *AliasRemoveAction { - a.index = append(a.index, index...) - return a -} - -func (a *AliasRemoveAction) removeBlankIndexNames() { - var indices []string - for _, index := range a.index { - if len(index) > 0 { - indices = append(indices, index) - } - } - a.index = indices -} - -// Validate checks if the operation is valid. -func (a *AliasRemoveAction) Validate() error { - var invalid []string - if len(a.alias) == 0 { - invalid = append(invalid, "Alias") - } - if len(a.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Source returns the JSON-serializable data. -func (a *AliasRemoveAction) Source() (interface{}, error) { - a.removeBlankIndexNames() - if err := a.Validate(); err != nil { - return nil, err - } - src := make(map[string]interface{}) - act := make(map[string]interface{}) - src["remove"] = act - act["alias"] = a.alias - switch len(a.index) { - case 1: - act["index"] = a.index[0] - default: - act["indices"] = a.index - } - return src, nil -} - -// AliasRemoveIndexAction is an action to remove an index during an alias -// operation. -type AliasRemoveIndexAction struct { - index string // index name -} - -// NewAliasRemoveIndexAction returns an action to remove an index. -func NewAliasRemoveIndexAction(index string) *AliasRemoveIndexAction { - return &AliasRemoveIndexAction{ - index: index, - } -} - -// Validate checks if the operation is valid. -func (a *AliasRemoveIndexAction) Validate() error { - if a.index == "" { - return fmt.Errorf("missing required field: index") - } - return nil -} - -// Source returns the JSON-serializable data. -func (a *AliasRemoveIndexAction) Source() (interface{}, error) { - if err := a.Validate(); err != nil { - return nil, err - } - src := make(map[string]interface{}) - act := make(map[string]interface{}) - src["remove_index"] = act - act["index"] = a.index - return src, nil -} - -// -- Service -- - -// AliasService enables users to add or remove an alias. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-aliases.html -// for details. -type AliasService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - actions []AliasAction -} - -// NewAliasService implements a service to manage aliases. -func NewAliasService(client *Client) *AliasService { - builder := &AliasService{ - client: client, - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *AliasService) Pretty(pretty bool) *AliasService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *AliasService) Human(human bool) *AliasService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *AliasService) ErrorTrace(errorTrace bool) *AliasService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *AliasService) FilterPath(filterPath ...string) *AliasService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *AliasService) Header(name string, value string) *AliasService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *AliasService) Headers(headers http.Header) *AliasService { - s.headers = headers - return s -} - -// Add adds an alias to an index. -func (s *AliasService) Add(indexName string, aliasName string) *AliasService { - action := NewAliasAddAction(aliasName).Index(indexName) - s.actions = append(s.actions, action) - return s -} - -// Add adds an alias to an index and associates a filter to the alias. -func (s *AliasService) AddWithFilter(indexName string, aliasName string, filter Query) *AliasService { - action := NewAliasAddAction(aliasName).Index(indexName).Filter(filter) - s.actions = append(s.actions, action) - return s -} - -// Remove removes an alias. -func (s *AliasService) Remove(indexName string, aliasName string) *AliasService { - action := NewAliasRemoveAction(aliasName).Index(indexName) - s.actions = append(s.actions, action) - return s -} - -// Action accepts one or more AliasAction instances which can be -// of type AliasAddAction or AliasRemoveAction. -func (s *AliasService) Action(action ...AliasAction) *AliasService { - s.actions = append(s.actions, action...) - return s -} - -// buildURL builds the URL for the operation. -func (s *AliasService) buildURL() (string, url.Values, error) { - path := "/_aliases" - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Do executes the command. -func (s *AliasService) Do(ctx context.Context) (*AliasResult, error) { - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Body with actions - body := make(map[string]interface{}) - var actions []interface{} - for _, action := range s.actions { - src, err := action.Source() - if err != nil { - return nil, err - } - actions = append(actions, src) - } - body["actions"] = actions - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return results - ret := new(AliasResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of an alias request. - -// AliasResult is the outcome of calling Do on AliasService. -type AliasResult struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_put_mapping.go b/vendor/github.com/olivere/elastic/v7/indices_put_mapping.go deleted file mode 100644 index eae320a..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_put_mapping.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesPutMappingService allows to register specific mapping definition -// for a specific type. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-put-mapping.html -// for details. -type IndicesPutMappingService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - updateAllTypes *bool - timeout string - bodyJson map[string]interface{} - bodyString string -} - -// NewPutMappingService is an alias for NewIndicesPutMappingService. -// Use NewIndicesPutMappingService. -func NewPutMappingService(client *Client) *IndicesPutMappingService { - return NewIndicesPutMappingService(client) -} - -// NewIndicesPutMappingService creates a new IndicesPutMappingService. -func NewIndicesPutMappingService(client *Client) *IndicesPutMappingService { - return &IndicesPutMappingService{ - client: client, - index: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesPutMappingService) Pretty(pretty bool) *IndicesPutMappingService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesPutMappingService) Human(human bool) *IndicesPutMappingService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesPutMappingService) ErrorTrace(errorTrace bool) *IndicesPutMappingService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesPutMappingService) FilterPath(filterPath ...string) *IndicesPutMappingService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesPutMappingService) Header(name string, value string) *IndicesPutMappingService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesPutMappingService) Headers(headers http.Header) *IndicesPutMappingService { - s.headers = headers - return s -} - -// Index is a list of index names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all indices. -func (s *IndicesPutMappingService) Index(indices ...string) *IndicesPutMappingService { - s.index = append(s.index, indices...) - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesPutMappingService) Timeout(timeout string) *IndicesPutMappingService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesPutMappingService) MasterTimeout(masterTimeout string) *IndicesPutMappingService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesPutMappingService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutMappingService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. -// This includes `_all` string or when no indices have been specified. -func (s *IndicesPutMappingService) AllowNoIndices(allowNoIndices bool) *IndicesPutMappingService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesPutMappingService) ExpandWildcards(expandWildcards string) *IndicesPutMappingService { - s.expandWildcards = expandWildcards - return s -} - -// UpdateAllTypes, if true, indicates that all fields that span multiple indices -// should be updated (default: false). -func (s *IndicesPutMappingService) UpdateAllTypes(updateAllTypes bool) *IndicesPutMappingService { - s.updateAllTypes = &updateAllTypes - return s -} - -// BodyJson contains the mapping definition. -func (s *IndicesPutMappingService) BodyJson(mapping map[string]interface{}) *IndicesPutMappingService { - s.bodyJson = mapping - return s -} - -// BodyString is the mapping definition serialized as a string. -func (s *IndicesPutMappingService) BodyString(mapping string) *IndicesPutMappingService { - s.bodyString = mapping - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesPutMappingService) buildURL() (string, url.Values, error) { - path, err := uritemplates.Expand("/{index}/_mapping", map[string]string{ - "index": strings.Join(s.index, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.updateAllTypes != nil { - params.Set("update_all_types", fmt.Sprintf("%v", *s.updateAllTypes)) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesPutMappingService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesPutMappingService) Do(ctx context.Context) (*PutMappingResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(PutMappingResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// PutMappingResponse is the response of IndicesPutMappingService.Do. -type PutMappingResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_put_settings.go b/vendor/github.com/olivere/elastic/v7/indices_put_settings.go deleted file mode 100644 index c8c0811..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_put_settings.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesPutSettingsService changes specific index level settings in -// real time. -// -// See the documentation at -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-update-settings.html. -type IndicesPutSettingsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - allowNoIndices *bool - expandWildcards string - flatSettings *bool - ignoreUnavailable *bool - masterTimeout string - bodyJson interface{} - bodyString string -} - -// NewIndicesPutSettingsService creates a new IndicesPutSettingsService. -func NewIndicesPutSettingsService(client *Client) *IndicesPutSettingsService { - return &IndicesPutSettingsService{ - client: client, - index: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesPutSettingsService) Pretty(pretty bool) *IndicesPutSettingsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesPutSettingsService) Human(human bool) *IndicesPutSettingsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesPutSettingsService) ErrorTrace(errorTrace bool) *IndicesPutSettingsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesPutSettingsService) FilterPath(filterPath ...string) *IndicesPutSettingsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesPutSettingsService) Header(name string, value string) *IndicesPutSettingsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesPutSettingsService) Headers(headers http.Header) *IndicesPutSettingsService { - s.headers = headers - return s -} - -// Index is a list of index names the mapping should be added to -// (supports wildcards); use `_all` or omit to add the mapping on all indices. -func (s *IndicesPutSettingsService) Index(indices ...string) *IndicesPutSettingsService { - s.index = append(s.index, indices...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` -// string or when no indices have been specified). -func (s *IndicesPutSettingsService) AllowNoIndices(allowNoIndices bool) *IndicesPutSettingsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards specifies whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *IndicesPutSettingsService) ExpandWildcards(expandWildcards string) *IndicesPutSettingsService { - s.expandWildcards = expandWildcards - return s -} - -// FlatSettings indicates whether to return settings in flat format (default: false). -func (s *IndicesPutSettingsService) FlatSettings(flatSettings bool) *IndicesPutSettingsService { - s.flatSettings = &flatSettings - return s -} - -// IgnoreUnavailable specifies whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesPutSettingsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesPutSettingsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// MasterTimeout is the timeout for connection to master. -func (s *IndicesPutSettingsService) MasterTimeout(masterTimeout string) *IndicesPutSettingsService { - s.masterTimeout = masterTimeout - return s -} - -// BodyJson is documented as: The index settings to be updated. -func (s *IndicesPutSettingsService) BodyJson(body interface{}) *IndicesPutSettingsService { - s.bodyJson = body - return s -} - -// BodyString is documented as: The index settings to be updated. -func (s *IndicesPutSettingsService) BodyString(body string) *IndicesPutSettingsService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesPutSettingsService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_settings", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_settings" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesPutSettingsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesPutSettingsService) Do(ctx context.Context) (*IndicesPutSettingsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesPutSettingsResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesPutSettingsResponse is the response of IndicesPutSettingsService.Do. -type IndicesPutSettingsResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_put_template.go b/vendor/github.com/olivere/elastic/v7/indices_put_template.go deleted file mode 100644 index 4ced8e8..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_put_template.go +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesPutTemplateService creates or updates index mappings. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-templates.html. -type IndicesPutTemplateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string - cause string - order interface{} - version *int - create *bool - timeout string - masterTimeout string - flatSettings *bool - bodyJson interface{} - bodyString string -} - -// NewIndicesPutTemplateService creates a new IndicesPutTemplateService. -func NewIndicesPutTemplateService(client *Client) *IndicesPutTemplateService { - return &IndicesPutTemplateService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesPutTemplateService) Pretty(pretty bool) *IndicesPutTemplateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesPutTemplateService) Human(human bool) *IndicesPutTemplateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesPutTemplateService) ErrorTrace(errorTrace bool) *IndicesPutTemplateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesPutTemplateService) FilterPath(filterPath ...string) *IndicesPutTemplateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesPutTemplateService) Header(name string, value string) *IndicesPutTemplateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesPutTemplateService) Headers(headers http.Header) *IndicesPutTemplateService { - s.headers = headers - return s -} - -// Name is the name of the index template. -func (s *IndicesPutTemplateService) Name(name string) *IndicesPutTemplateService { - s.name = name - return s -} - -// Cause describes the cause for this index template creation. This is currently -// undocumented, but part of the Java source. -func (s *IndicesPutTemplateService) Cause(cause string) *IndicesPutTemplateService { - s.cause = cause - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesPutTemplateService) Timeout(timeout string) *IndicesPutTemplateService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesPutTemplateService) MasterTimeout(masterTimeout string) *IndicesPutTemplateService { - s.masterTimeout = masterTimeout - return s -} - -// FlatSettings indicates whether to return settings in flat format (default: false). -func (s *IndicesPutTemplateService) FlatSettings(flatSettings bool) *IndicesPutTemplateService { - s.flatSettings = &flatSettings - return s -} - -// Order is the order for this template when merging multiple matching ones -// (higher numbers are merged later, overriding the lower numbers). -func (s *IndicesPutTemplateService) Order(order interface{}) *IndicesPutTemplateService { - s.order = order - return s -} - -// Version sets the version number for this template. -func (s *IndicesPutTemplateService) Version(version int) *IndicesPutTemplateService { - s.version = &version - return s -} - -// Create indicates whether the index template should only be added if -// new or can also replace an existing one. -func (s *IndicesPutTemplateService) Create(create bool) *IndicesPutTemplateService { - s.create = &create - return s -} - -// BodyJson is documented as: The template definition. -func (s *IndicesPutTemplateService) BodyJson(body interface{}) *IndicesPutTemplateService { - s.bodyJson = body - return s -} - -// BodyString is documented as: The template definition. -func (s *IndicesPutTemplateService) BodyString(body string) *IndicesPutTemplateService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesPutTemplateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_template/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.order != nil { - params.Set("order", fmt.Sprintf("%v", s.order)) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", *s.version)) - } - if s.create != nil { - params.Set("create", fmt.Sprintf("%v", *s.create)) - } - if s.cause != "" { - params.Set("cause", s.cause) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesPutTemplateService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesPutTemplateService) Do(ctx context.Context) (*IndicesPutTemplateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesPutTemplateResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesPutTemplateResponse is the response of IndicesPutTemplateService.Do. -type IndicesPutTemplateResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_refresh.go b/vendor/github.com/olivere/elastic/v7/indices_refresh.go deleted file mode 100644 index 4dfdbe4..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_refresh.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// RefreshService explicitly refreshes one or more indices. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-refresh.html. -type RefreshService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string -} - -// NewRefreshService creates a new instance of RefreshService. -func NewRefreshService(client *Client) *RefreshService { - builder := &RefreshService{ - client: client, - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *RefreshService) Pretty(pretty bool) *RefreshService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *RefreshService) Human(human bool) *RefreshService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *RefreshService) ErrorTrace(errorTrace bool) *RefreshService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *RefreshService) FilterPath(filterPath ...string) *RefreshService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *RefreshService) Header(name string, value string) *RefreshService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *RefreshService) Headers(headers http.Header) *RefreshService { - s.headers = headers - return s -} - -// Index specifies the indices to refresh. -func (s *RefreshService) Index(index ...string) *RefreshService { - s.index = append(s.index, index...) - return s -} - -// buildURL builds the URL for the operation. -func (s *RefreshService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_refresh", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_refresh" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Do executes the request. -func (s *RefreshService) Do(ctx context.Context) (*RefreshResult, error) { - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return result - ret := new(RefreshResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Result of a refresh request. - -// RefreshResult is the outcome of RefreshService.Do. -type RefreshResult struct { - Shards *ShardsInfo `json:"_shards,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_rollover.go b/vendor/github.com/olivere/elastic/v7/indices_rollover.go deleted file mode 100644 index ce72ef1..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_rollover.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesRolloverService rolls an alias over to a new index when the -// existing index is considered to be too large or too old. -// -// It is documented at -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-rollover-index.html. -type IndicesRolloverService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - dryRun bool - newIndex string - alias string - masterTimeout string - timeout string - waitForActiveShards string - conditions map[string]interface{} - settings map[string]interface{} - mappings map[string]interface{} - bodyJson interface{} - bodyString string -} - -// NewIndicesRolloverService creates a new IndicesRolloverService. -func NewIndicesRolloverService(client *Client) *IndicesRolloverService { - return &IndicesRolloverService{ - client: client, - conditions: make(map[string]interface{}), - settings: make(map[string]interface{}), - mappings: make(map[string]interface{}), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesRolloverService) Pretty(pretty bool) *IndicesRolloverService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesRolloverService) Human(human bool) *IndicesRolloverService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesRolloverService) ErrorTrace(errorTrace bool) *IndicesRolloverService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesRolloverService) FilterPath(filterPath ...string) *IndicesRolloverService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesRolloverService) Header(name string, value string) *IndicesRolloverService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesRolloverService) Headers(headers http.Header) *IndicesRolloverService { - s.headers = headers - return s -} - -// Alias is the name of the alias to rollover. -func (s *IndicesRolloverService) Alias(alias string) *IndicesRolloverService { - s.alias = alias - return s -} - -// NewIndex is the name of the rollover index. -func (s *IndicesRolloverService) NewIndex(newIndex string) *IndicesRolloverService { - s.newIndex = newIndex - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesRolloverService) MasterTimeout(masterTimeout string) *IndicesRolloverService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout sets an explicit operation timeout. -func (s *IndicesRolloverService) Timeout(timeout string) *IndicesRolloverService { - s.timeout = timeout - return s -} - -// WaitForActiveShards sets the number of active shards to wait for on the -// newly created rollover index before the operation returns. -func (s *IndicesRolloverService) WaitForActiveShards(waitForActiveShards string) *IndicesRolloverService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// DryRun, when set, specifies that only conditions are checked without -// performing the actual rollover. -func (s *IndicesRolloverService) DryRun(dryRun bool) *IndicesRolloverService { - s.dryRun = dryRun - return s -} - -// Conditions allows to specify all conditions as a dictionary. -func (s *IndicesRolloverService) Conditions(conditions map[string]interface{}) *IndicesRolloverService { - s.conditions = conditions - return s -} - -// AddCondition adds a condition to the rollover decision. -func (s *IndicesRolloverService) AddCondition(name string, value interface{}) *IndicesRolloverService { - s.conditions[name] = value - return s -} - -// AddMaxIndexAgeCondition adds a condition to set the max index age. -func (s *IndicesRolloverService) AddMaxIndexAgeCondition(time string) *IndicesRolloverService { - s.conditions["max_age"] = time - return s -} - -// AddMaxIndexDocsCondition adds a condition to set the max documents in the index. -func (s *IndicesRolloverService) AddMaxIndexDocsCondition(docs int64) *IndicesRolloverService { - s.conditions["max_docs"] = docs - return s -} - -// Settings adds the index settings. -func (s *IndicesRolloverService) Settings(settings map[string]interface{}) *IndicesRolloverService { - s.settings = settings - return s -} - -// AddSetting adds an index setting. -func (s *IndicesRolloverService) AddSetting(name string, value interface{}) *IndicesRolloverService { - s.settings[name] = value - return s -} - -// Mappings adds the index mappings. -func (s *IndicesRolloverService) Mappings(mappings map[string]interface{}) *IndicesRolloverService { - s.mappings = mappings - return s -} - -// AddMapping adds a mapping for the given type. -func (s *IndicesRolloverService) AddMapping(typ string, mapping interface{}) *IndicesRolloverService { - s.mappings[typ] = mapping - return s -} - -// BodyJson sets the conditions that needs to be met for executing rollover, -// specified as a serializable JSON instance which is sent as the body of -// the request. -func (s *IndicesRolloverService) BodyJson(body interface{}) *IndicesRolloverService { - s.bodyJson = body - return s -} - -// BodyString sets the conditions that needs to be met for executing rollover, -// specified as a string which is sent as the body of the request. -func (s *IndicesRolloverService) BodyString(body string) *IndicesRolloverService { - s.bodyString = body - return s -} - -// getBody returns the body of the request, if not explicitly set via -// BodyJson or BodyString. -func (s *IndicesRolloverService) getBody() interface{} { - body := make(map[string]interface{}) - if len(s.conditions) > 0 { - body["conditions"] = s.conditions - } - if len(s.settings) > 0 { - body["settings"] = s.settings - } - if len(s.mappings) > 0 { - body["mappings"] = s.mappings - } - return body -} - -// buildURL builds the URL for the operation. -func (s *IndicesRolloverService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if s.newIndex != "" { - path, err = uritemplates.Expand("/{alias}/_rollover/{new_index}", map[string]string{ - "alias": s.alias, - "new_index": s.newIndex, - }) - } else { - path, err = uritemplates.Expand("/{alias}/_rollover", map[string]string{ - "alias": s.alias, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.dryRun { - params.Set("dry_run", "true") - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesRolloverService) Validate() error { - var invalid []string - if s.alias == "" { - invalid = append(invalid, "Alias") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesRolloverService) Do(ctx context.Context) (*IndicesRolloverResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else if s.bodyString != "" { - body = s.bodyString - } else { - body = s.getBody() - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesRolloverResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesRolloverResponse is the response of IndicesRolloverService.Do. -type IndicesRolloverResponse struct { - OldIndex string `json:"old_index"` - NewIndex string `json:"new_index"` - RolledOver bool `json:"rolled_over"` - DryRun bool `json:"dry_run"` - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Conditions map[string]bool `json:"conditions"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_segments.go b/vendor/github.com/olivere/elastic/v7/indices_segments.go deleted file mode 100644 index 01c5c8d..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_segments.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesSegmentsService provides low level segments information that a -// Lucene index (shard level) is built with. Allows to be used to provide -// more information on the state of a shard and an index, possibly -// optimization information, data "wasted" on deletes, and so on. -// -// Find further documentation at -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-segments.html. -type IndicesSegmentsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - allowNoIndices *bool - expandWildcards string - ignoreUnavailable *bool - operationThreading interface{} - verbose *bool -} - -// NewIndicesSegmentsService creates a new IndicesSegmentsService. -func NewIndicesSegmentsService(client *Client) *IndicesSegmentsService { - return &IndicesSegmentsService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesSegmentsService) Pretty(pretty bool) *IndicesSegmentsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesSegmentsService) Human(human bool) *IndicesSegmentsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesSegmentsService) ErrorTrace(errorTrace bool) *IndicesSegmentsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesSegmentsService) FilterPath(filterPath ...string) *IndicesSegmentsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesSegmentsService) Header(name string, value string) *IndicesSegmentsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesSegmentsService) Headers(headers http.Header) *IndicesSegmentsService { - s.headers = headers - return s -} - -// Index is a comma-separated list of index names; use `_all` or empty string -// to perform the operation on all indices. -func (s *IndicesSegmentsService) Index(indices ...string) *IndicesSegmentsService { - s.index = append(s.index, indices...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or when -// no indices have been specified). -func (s *IndicesSegmentsService) AllowNoIndices(allowNoIndices bool) *IndicesSegmentsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to concrete indices -// that are open, closed or both. -func (s *IndicesSegmentsService) ExpandWildcards(expandWildcards string) *IndicesSegmentsService { - s.expandWildcards = expandWildcards - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesSegmentsService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesSegmentsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// OperationThreading is undocumented in Elasticsearch as of now. -func (s *IndicesSegmentsService) OperationThreading(operationThreading interface{}) *IndicesSegmentsService { - s.operationThreading = operationThreading - return s -} - -// Verbose, when set to true, includes detailed memory usage by Lucene. -func (s *IndicesSegmentsService) Verbose(verbose bool) *IndicesSegmentsService { - s.verbose = &verbose - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesSegmentsService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_segments", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path = "/_segments" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.operationThreading != nil { - params.Set("operation_threading", fmt.Sprintf("%v", s.operationThreading)) - } - if s.verbose != nil { - params.Set("verbose", fmt.Sprintf("%v", *s.verbose)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesSegmentsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesSegmentsService) Do(ctx context.Context) (*IndicesSegmentsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesSegmentsResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesSegmentsResponse is the response of IndicesSegmentsService.Do. -type IndicesSegmentsResponse struct { - // Shards provides information returned from shards. - Shards *ShardsInfo `json:"_shards"` - - // Indices provides a map into the stats of an index. - // The key of the map is the index name. - Indices map[string]*IndexSegments `json:"indices,omitempty"` -} - -type IndexSegments struct { - // Shards provides a map into the shard related information of an index. - // The key of the map is the number of a specific shard. - Shards map[string][]*IndexSegmentsShards `json:"shards,omitempty"` -} - -type IndexSegmentsShards struct { - Routing *IndexSegmentsRouting `json:"routing,omitempty"` - NumCommittedSegments int64 `json:"num_committed_segments,omitempty"` - NumSearchSegments int64 `json:"num_search_segments"` - - // Segments provides a map into the segment related information of a shard. - // The key of the map is the specific lucene segment id. - Segments map[string]*IndexSegmentsDetails `json:"segments,omitempty"` -} - -type IndexSegmentsRouting struct { - State string `json:"state,omitempty"` - Primary bool `json:"primary,omitempty"` - Node string `json:"node,omitempty"` - RelocatingNode string `json:"relocating_node,omitempty"` -} - -type IndexSegmentsDetails struct { - Generation int64 `json:"generation,omitempty"` - NumDocs int64 `json:"num_docs,omitempty"` - DeletedDocs int64 `json:"deleted_docs,omitempty"` - Size string `json:"size,omitempty"` - SizeInBytes int64 `json:"size_in_bytes,omitempty"` - Memory string `json:"memory,omitempty"` - MemoryInBytes int64 `json:"memory_in_bytes,omitempty"` - Committed bool `json:"committed,omitempty"` - Search bool `json:"search,omitempty"` - Version string `json:"version,omitempty"` - Compound bool `json:"compound,omitempty"` - MergeId string `json:"merge_id,omitempty"` - Sort []*IndexSegmentsSort `json:"sort,omitempty"` - RAMTree []*IndexSegmentsRamTree `json:"ram_tree,omitempty"` - Attributes map[string]string `json:"attributes,omitempty"` -} - -type IndexSegmentsSort struct { - Field string `json:"field,omitempty"` - Mode string `json:"mode,omitempty"` - Missing interface{} `json:"missing,omitempty"` - Reverse bool `json:"reverse,omitempty"` -} - -type IndexSegmentsRamTree struct { - Description string `json:"description,omitempty"` - Size string `json:"size,omitempty"` - SizeInBytes int64 `json:"size_in_bytes,omitempty"` - Children []*IndexSegmentsRamTree `json:"children,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_shrink.go b/vendor/github.com/olivere/elastic/v7/indices_shrink.go deleted file mode 100644 index 76cec61..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_shrink.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesShrinkService allows you to shrink an existing index into a -// new index with fewer primary shards. -// -// For further details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-shrink-index.html. -type IndicesShrinkService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - source string - target string - masterTimeout string - timeout string - waitForActiveShards string - bodyJson interface{} - bodyString string -} - -// NewIndicesShrinkService creates a new IndicesShrinkService. -func NewIndicesShrinkService(client *Client) *IndicesShrinkService { - return &IndicesShrinkService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesShrinkService) Pretty(pretty bool) *IndicesShrinkService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesShrinkService) Human(human bool) *IndicesShrinkService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesShrinkService) ErrorTrace(errorTrace bool) *IndicesShrinkService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesShrinkService) FilterPath(filterPath ...string) *IndicesShrinkService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesShrinkService) Header(name string, value string) *IndicesShrinkService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesShrinkService) Headers(headers http.Header) *IndicesShrinkService { - s.headers = headers - return s -} - -// Source is the name of the source index to shrink. -func (s *IndicesShrinkService) Source(source string) *IndicesShrinkService { - s.source = source - return s -} - -// Target is the name of the target index to shrink into. -func (s *IndicesShrinkService) Target(target string) *IndicesShrinkService { - s.target = target - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *IndicesShrinkService) MasterTimeout(masterTimeout string) *IndicesShrinkService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout is an explicit operation timeout. -func (s *IndicesShrinkService) Timeout(timeout string) *IndicesShrinkService { - s.timeout = timeout - return s -} - -// WaitForActiveShards sets the number of active shards to wait for on -// the shrunken index before the operation returns. -func (s *IndicesShrinkService) WaitForActiveShards(waitForActiveShards string) *IndicesShrinkService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// BodyJson is the configuration for the target index (`settings` and `aliases`) -// defined as a JSON-serializable instance to be sent as the request body. -func (s *IndicesShrinkService) BodyJson(body interface{}) *IndicesShrinkService { - s.bodyJson = body - return s -} - -// BodyString is the configuration for the target index (`settings` and `aliases`) -// defined as a string to send as the request body. -func (s *IndicesShrinkService) BodyString(body string) *IndicesShrinkService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesShrinkService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{source}/_shrink/{target}", map[string]string{ - "source": s.source, - "target": s.target, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesShrinkService) Validate() error { - var invalid []string - if s.source == "" { - invalid = append(invalid, "Source") - } - if s.target == "" { - invalid = append(invalid, "Target") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IndicesShrinkService) Do(ctx context.Context) (*IndicesShrinkResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else if s.bodyString != "" { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesShrinkResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesShrinkResponse is the response of IndicesShrinkService.Do. -type IndicesShrinkResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_stats.go b/vendor/github.com/olivere/elastic/v7/indices_stats.go deleted file mode 100644 index 397d638..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_stats.go +++ /dev/null @@ -1,530 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesStatsService provides stats on various metrics of one or more -// indices. See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-stats.html. -type IndicesStatsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - metric []string - index []string - level string - types []string - completionFields []string - fielddataFields []string - fields []string - groups []string -} - -// NewIndicesStatsService creates a new IndicesStatsService. -func NewIndicesStatsService(client *Client) *IndicesStatsService { - return &IndicesStatsService{ - client: client, - index: make([]string, 0), - metric: make([]string, 0), - completionFields: make([]string, 0), - fielddataFields: make([]string, 0), - fields: make([]string, 0), - groups: make([]string, 0), - types: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesStatsService) Pretty(pretty bool) *IndicesStatsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesStatsService) Human(human bool) *IndicesStatsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesStatsService) ErrorTrace(errorTrace bool) *IndicesStatsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesStatsService) FilterPath(filterPath ...string) *IndicesStatsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesStatsService) Header(name string, value string) *IndicesStatsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesStatsService) Headers(headers http.Header) *IndicesStatsService { - s.headers = headers - return s -} - -// Metric limits the information returned the specific metrics. Options are: -// docs, store, indexing, get, search, completion, fielddata, flush, merge, -// query_cache, refresh, suggest, and warmer. -func (s *IndicesStatsService) Metric(metric ...string) *IndicesStatsService { - s.metric = append(s.metric, metric...) - return s -} - -// Index is the list of index names; use `_all` or empty string to perform -// the operation on all indices. -func (s *IndicesStatsService) Index(indices ...string) *IndicesStatsService { - s.index = append(s.index, indices...) - return s -} - -// Type is a list of document types for the `indexing` index metric. -func (s *IndicesStatsService) Type(types ...string) *IndicesStatsService { - s.types = append(s.types, types...) - return s -} - -// Level returns stats aggregated at cluster, index or shard level. -func (s *IndicesStatsService) Level(level string) *IndicesStatsService { - s.level = level - return s -} - -// CompletionFields is a list of fields for `fielddata` and `suggest` -// index metric (supports wildcards). -func (s *IndicesStatsService) CompletionFields(completionFields ...string) *IndicesStatsService { - s.completionFields = append(s.completionFields, completionFields...) - return s -} - -// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). -func (s *IndicesStatsService) FielddataFields(fielddataFields ...string) *IndicesStatsService { - s.fielddataFields = append(s.fielddataFields, fielddataFields...) - return s -} - -// Fields is a list of fields for `fielddata` and `completion` index metric -// (supports wildcards). -func (s *IndicesStatsService) Fields(fields ...string) *IndicesStatsService { - s.fields = append(s.fields, fields...) - return s -} - -// Groups is a list of search groups for `search` index metric. -func (s *IndicesStatsService) Groups(groups ...string) *IndicesStatsService { - s.groups = append(s.groups, groups...) - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesStatsService) buildURL() (string, url.Values, error) { - var err error - var path string - if len(s.index) > 0 && len(s.metric) > 0 { - path, err = uritemplates.Expand("/{index}/_stats/{metric}", map[string]string{ - "index": strings.Join(s.index, ","), - "metric": strings.Join(s.metric, ","), - }) - } else if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_stats", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else if len(s.metric) > 0 { - path, err = uritemplates.Expand("/_stats/{metric}", map[string]string{ - "metric": strings.Join(s.metric, ","), - }) - } else { - path = "/_stats" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if len(s.groups) > 0 { - params.Set("groups", strings.Join(s.groups, ",")) - } - if s.level != "" { - params.Set("level", s.level) - } - if len(s.types) > 0 { - params.Set("types", strings.Join(s.types, ",")) - } - if len(s.completionFields) > 0 { - params.Set("completion_fields", strings.Join(s.completionFields, ",")) - } - if len(s.fielddataFields) > 0 { - params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesStatsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IndicesStatsService) Do(ctx context.Context) (*IndicesStatsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesStatsResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesStatsResponse is the response of IndicesStatsService.Do. -type IndicesStatsResponse struct { - // Shards provides information returned from shards. - Shards *ShardsInfo `json:"_shards"` - - // All provides summary stats about all indices. - All *IndexStats `json:"_all,omitempty"` - - // Indices provides a map into the stats of an index. The key of the - // map is the index name. - Indices map[string]*IndexStats `json:"indices,omitempty"` -} - -// IndexStats is index stats for a specific index. -type IndexStats struct { - UUID string `json:"uuid,omitempty"` - Primaries *IndexStatsDetails `json:"primaries,omitempty"` - Total *IndexStatsDetails `json:"total,omitempty"` - Shards map[string][]*IndexStatsDetails `json:"shards,omitempty"` -} - -type IndexStatsDetails struct { - Routing *IndexStatsRouting `json:"routing,omitempty"` - Docs *IndexStatsDocs `json:"docs,omitempty"` - Store *IndexStatsStore `json:"store,omitempty"` - Indexing *IndexStatsIndexing `json:"indexing,omitempty"` - Get *IndexStatsGet `json:"get,omitempty"` - Search *IndexStatsSearch `json:"search,omitempty"` - Merges *IndexStatsMerges `json:"merges,omitempty"` - Refresh *IndexStatsRefresh `json:"refresh,omitempty"` - Recovery *IndexStatsRecovery `json:"recovery,omitempty"` - Flush *IndexStatsFlush `json:"flush,omitempty"` - Warmer *IndexStatsWarmer `json:"warmer,omitempty"` - FilterCache *IndexStatsFilterCache `json:"filter_cache,omitempty"` - IdCache *IndexStatsIdCache `json:"id_cache,omitempty"` - Fielddata *IndexStatsFielddata `json:"fielddata,omitempty"` - Percolate *IndexStatsPercolate `json:"percolate,omitempty"` - Completion *IndexStatsCompletion `json:"completion,omitempty"` - Segments *IndexStatsSegments `json:"segments,omitempty"` - Translog *IndexStatsTranslog `json:"translog,omitempty"` - Suggest *IndexStatsSuggest `json:"suggest,omitempty"` - QueryCache *IndexStatsQueryCache `json:"query_cache,omitempty"` - RequestCache *IndexStatsRequestCache `json:"request_cache,omitempty"` - Commit *IndexStatsCommit `json:"commit,omitempty"` - SeqNo *IndexStatsSeqNo `json:"seq_no,omitempty"` - RetentionLeases *IndexStatsRetentionLeases `json:"retention_leases,omitempty"` - ShardPath *IndexStatsShardPath `json:"shard_path,omitempty"` -} - -type IndexStatsRouting struct { - State string `json:"state"` // e.g. "STARTED" - Primary bool `json:"primary"` - Node string `json:"node"` // e.g. "-aXnGv4oTW6bIIl0db3eCg" - RelocatingNode *string `json:"relocating_node"` -} - -type IndexStatsShardPath struct { - StatePath string `json:"state_path"` // e.g. "/usr/share/elasticsearch/data/nodes/0" - DataPath string `json:"data_path"` // e.g. "/usr/share/elasticsearch/data/nodes/0" - IsCustomDataPath bool `json:"is_custom_data_path"` -} - -type IndexStatsDocs struct { - Count int64 `json:"count,omitempty"` - Deleted int64 `json:"deleted,omitempty"` -} - -type IndexStatsStore struct { - Size string `json:"size,omitempty"` // human size, e.g. 119.3mb - SizeInBytes int64 `json:"size_in_bytes,omitempty"` -} - -type IndexStatsIndexing struct { - IndexTotal int64 `json:"index_total,omitempty"` - IndexTime string `json:"index_time,omitempty"` - IndexTimeInMillis int64 `json:"index_time_in_millis,omitempty"` - IndexCurrent int64 `json:"index_current,omitempty"` - IndexFailed int64 `json:"index_failed,omitempty"` - DeleteTotal int64 `json:"delete_total,omitempty"` - DeleteTime string `json:"delete_time,omitempty"` - DeleteTimeInMillis int64 `json:"delete_time_in_millis,omitempty"` - DeleteCurrent int64 `json:"delete_current,omitempty"` - NoopUpdateTotal int64 `json:"noop_update_total,omitempty"` - IsThrottled bool `json:"is_throttled,omitempty"` - ThrottleTime string `json:"throttle_time,omitempty"` - ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` -} - -type IndexStatsGet struct { - Total int64 `json:"total,omitempty"` - GetTime string `json:"getTime,omitempty"` // 7.4.0 uses "getTime", earlier versions used "get_time" - TimeInMillis int64 `json:"time_in_millis,omitempty"` - ExistsTotal int64 `json:"exists_total,omitempty"` - ExistsTime string `json:"exists_time,omitempty"` - ExistsTimeInMillis int64 `json:"exists_time_in_millis,omitempty"` - MissingTotal int64 `json:"missing_total,omitempty"` - MissingTime string `json:"missing_time,omitempty"` - MissingTimeInMillis int64 `json:"missing_time_in_millis,omitempty"` - Current int64 `json:"current,omitempty"` -} - -type IndexStatsSearch struct { - OpenContexts int64 `json:"open_contexts,omitempty"` - QueryTotal int64 `json:"query_total,omitempty"` - QueryTime string `json:"query_time,omitempty"` - QueryTimeInMillis int64 `json:"query_time_in_millis,omitempty"` - QueryCurrent int64 `json:"query_current,omitempty"` - FetchTotal int64 `json:"fetch_total,omitempty"` - FetchTime string `json:"fetch_time,omitempty"` - FetchTimeInMillis int64 `json:"fetch_time_in_millis,omitempty"` - FetchCurrent int64 `json:"fetch_current,omitempty"` - ScrollTotal int64 `json:"scroll_total,omitempty"` - ScrollTime string `json:"scroll_time,omitempty"` - ScrollTimeInMillis int64 `json:"scroll_time_in_millis,omitempty"` - ScrollCurrent int64 `json:"scroll_current,omitempty"` - SuggestTotal int64 `json:"suggest_total,omitempty"` - SuggestTime string `json:"suggest_time,omitempty"` - SuggestTimeInMillis int64 `json:"suggest_time_in_millis,omitempty"` - SuggestCurrent int64 `json:"suggest_current,omitempty"` -} - -type IndexStatsMerges struct { - Current int64 `json:"current,omitempty"` - CurrentDocs int64 `json:"current_docs,omitempty"` - CurrentSize string `json:"current_size,omitempty"` - CurrentSizeInBytes int64 `json:"current_size_in_bytes,omitempty"` - Total int64 `json:"total,omitempty"` - TotalTime string `json:"total_time,omitempty"` - TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` - TotalDocs int64 `json:"total_docs,omitempty"` - TotalSize string `json:"total_size,omitempty"` - TotalSizeInBytes int64 `json:"total_size_in_bytes,omitempty"` - TotalStoppedTime string `json:"total_stopped_time,omitempty"` - TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis,omitempty"` - TotalThrottledTime string `json:"total_throttled_time,omitempty"` - TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis,omitempty"` - TotalAutoThrottle string `json:"total_auto_throttle,omitempty"` - TotalAutoThrottleInBytes int64 `json:"total_auto_throttle_in_bytes,omitempty"` -} - -type IndexStatsRefresh struct { - Total int64 `json:"total,omitempty"` - TotalTime string `json:"total_time,omitempty"` - TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` - ExternalTotal int64 `json:"external_total,omitempty"` - ExternalTotalTime string `json:"external_total_time,omitempty"` - ExternalTotalTimeInMillis int64 `json:"external_total_time_in_millis,omitempty"` - Listeners int64 `json:"listeners,omitempty"` -} - -type IndexStatsRecovery struct { - CurrentAsSource int64 `json:"current_as_source,omitempty"` - CurrentAsTarget int64 `json:"current_as_target,omitempty"` - ThrottleTime string `json:"throttle_time,omitempty"` - ThrottleTimeInMillis int64 `json:"throttle_time_in_millis,omitempty"` -} - -type IndexStatsFlush struct { - Total int64 `json:"total,omitempty"` - TotalTime string `json:"total_time,omitempty"` - TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` - Periodic int64 `json:"periodic,omitempty"` -} - -type IndexStatsWarmer struct { - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - TotalTime string `json:"total_time,omitempty"` - TotalTimeInMillis int64 `json:"total_time_in_millis,omitempty"` -} - -type IndexStatsRequestCache struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - Evictions int64 `json:"evictions,omitempty"` - HitCount int64 `json:"hit_count,omitempty"` - MissCount int64 `json:"miss_count,omitempty"` -} - -type IndexStatsCommit struct { - ID string `json:"id,omitempty"` // lucene commit ID in base64, e.g. "m2tDMYHzSpSV6zJH0lIAnA==" - Generation int64 `json:"generation,omitempty"` - UserData map[string]string `json:"user_data,omitempty"` - NumDocs int64 `json:"num_docs,omitempty"` -} - -type IndexStatsFilterCache struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - Evictions int64 `json:"evictions,omitempty"` -} - -type IndexStatsIdCache struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` -} - -type IndexStatsFielddata struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - Evictions int64 `json:"evictions,omitempty"` -} - -type IndexStatsPercolate struct { - Total int64 `json:"total,omitempty"` - GetTime string `json:"get_time,omitempty"` - TimeInMillis int64 `json:"time_in_millis,omitempty"` - Current int64 `json:"current,omitempty"` - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - Queries int64 `json:"queries,omitempty"` -} - -type IndexStatsCompletion struct { - Size string `json:"size,omitempty"` - SizeInBytes int64 `json:"size_in_bytes,omitempty"` -} - -type IndexStatsSegments struct { - Count int64 `json:"count"` - Memory string `json:"memory"` // e.g. "61.3kb" - MemoryInBytes int64 `json:"memory_in_bytes"` - TermsMemory string `json:"terms_memory"` // e.g. "61.3kb" - TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"` - StoredFieldsMemory string `json:"stored_fields_memory"` // e.g. "61.3kb" - StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"` - TermVectorsMemory string `json:"term_vectors_memory"` // e.g. "61.3kb" - TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"` - NormsMemory string `json:"norms_memory"` // e.g. "61.3kb" - NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"` - PointsMemory string `json:"points_memory"` // e.g. "61.3kb" - PointsMemoryInBytes int64 `json:"points_memory_in_bytes"` - DocValuesMemory string `json:"doc_values_memory"` // e.g. "61.3kb" - DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"` - IndexWriterMemory string `json:"index_writer_memory"` // e.g. "61.3kb" - IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` - VersionMapMemory string `json:"version_map_memory"` // e.g. "61.3kb" - VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` - FixedBitSet string `json:"fixed_bit_set"` // e.g. "61.3kb" - FixedBitSetInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` - MaxUnsafeAutoIDTimestamp int64 `json:"max_unsafe_auto_id_timestamp"` - FileSizes map[string]*ClusterStatsIndicesSegmentsFile `json:"file_sizes"` -} - -type IndexStatsTranslog struct { - Operations int64 `json:"operations,omitempty"` - Size string `json:"size,omitempty"` - SizeInBytes int64 `json:"size_in_bytes,omitempty"` - UncommittedOperations int64 `json:"uncommitted_operations,omitempty"` - UncommittedSize string `json:"uncommitted_size,omitempty"` - UncommittedSizeInBytes int64 `json:"uncommitted_size_in_bytes,omitempty"` - EarliestLastModifiedAge int64 `json:"earliest_last_modified_age,omitempty"` -} - -type IndexStatsSuggest struct { - Total int64 `json:"total,omitempty"` - Time string `json:"time,omitempty"` - TimeInMillis int64 `json:"time_in_millis,omitempty"` - Current int64 `json:"current,omitempty"` -} - -type IndexStatsQueryCache struct { - MemorySize string `json:"memory_size,omitempty"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes,omitempty"` - TotalCount int64 `json:"total_count,omitempty"` - HitCount int64 `json:"hit_count,omitempty"` - MissCount int64 `json:"miss_count,omitempty"` - CacheSize int64 `json:"cache_size,omitempty"` - CacheCount int64 `json:"cache_count,omitempty"` - Evictions int64 `json:"evictions,omitempty"` -} - -type IndexStatsSeqNo struct { - MaxSeqNo int64 `json:"max_seq_no,omitempty"` - LocalCheckpoint int64 `json:"local_checkpoint,omitempty"` - GlobalCheckpoint int64 `json:"global_checkpoint,omitempty"` -} - -type IndexStatsRetentionLeases struct { - PrimaryTerm int64 `json:"primary_term,omitempty"` - Version int64 `json:"version,omitempty"` - Leases []*IndexStatsRetentionLease `json:"leases,omitempty"` -} - -type IndexStatsRetentionLease struct { - Id string `json:"id,omitempty"` - RetainingSeqNo int64 `json:"retaining_seq_no,omitempty"` - Timestamp int64 `json:"timestamp,omitempty"` - Source string `json:"source,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/indices_unfreeze.go b/vendor/github.com/olivere/elastic/v7/indices_unfreeze.go deleted file mode 100644 index 3da75d4..0000000 --- a/vendor/github.com/olivere/elastic/v7/indices_unfreeze.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IndicesUnfreezeService unfreezes an index. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/unfreeze-index-api.html -// and https://www.elastic.co/blog/creating-frozen-indices-with-the-elasticsearch-freeze-index-api -// for details. -type IndicesUnfreezeService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - timeout string - masterTimeout string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - waitForActiveShards string -} - -// NewIndicesUnfreezeService creates a new IndicesUnfreezeService. -func NewIndicesUnfreezeService(client *Client) *IndicesUnfreezeService { - return &IndicesUnfreezeService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IndicesUnfreezeService) Pretty(pretty bool) *IndicesUnfreezeService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IndicesUnfreezeService) Human(human bool) *IndicesUnfreezeService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IndicesUnfreezeService) ErrorTrace(errorTrace bool) *IndicesUnfreezeService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IndicesUnfreezeService) FilterPath(filterPath ...string) *IndicesUnfreezeService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IndicesUnfreezeService) Header(name string, value string) *IndicesUnfreezeService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IndicesUnfreezeService) Headers(headers http.Header) *IndicesUnfreezeService { - s.headers = headers - return s -} - -// Index is the name of the index to unfreeze. -func (s *IndicesUnfreezeService) Index(index string) *IndicesUnfreezeService { - s.index = index - return s -} - -// Timeout allows to specify an explicit timeout. -func (s *IndicesUnfreezeService) Timeout(timeout string) *IndicesUnfreezeService { - s.timeout = timeout - return s -} - -// MasterTimeout allows to specify a timeout for connection to master. -func (s *IndicesUnfreezeService) MasterTimeout(masterTimeout string) *IndicesUnfreezeService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *IndicesUnfreezeService) IgnoreUnavailable(ignoreUnavailable bool) *IndicesUnfreezeService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or when -// no indices have been specified). -func (s *IndicesUnfreezeService) AllowNoIndices(allowNoIndices bool) *IndicesUnfreezeService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards specifies whether to expand wildcard expression to -// concrete indices that are open, closed or both.. -func (s *IndicesUnfreezeService) ExpandWildcards(expandWildcards string) *IndicesUnfreezeService { - s.expandWildcards = expandWildcards - return s -} - -// WaitForActiveShards sets the number of active shards to wait for -// before the operation returns. -func (s *IndicesUnfreezeService) WaitForActiveShards(numShards string) *IndicesUnfreezeService { - s.waitForActiveShards = numShards - return s -} - -// buildURL builds the URL for the operation. -func (s *IndicesUnfreezeService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_unfreeze", map[string]string{ - "index": s.index, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IndicesUnfreezeService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the service. -func (s *IndicesUnfreezeService) Do(ctx context.Context) (*IndicesUnfreezeResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IndicesUnfreezeResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IndicesUnfreezeResponse is the outcome of freezing an index. -type IndicesUnfreezeResponse struct { - Shards *ShardsInfo `json:"_shards"` -} diff --git a/vendor/github.com/olivere/elastic/v7/ingest_delete_pipeline.go b/vendor/github.com/olivere/elastic/v7/ingest_delete_pipeline.go deleted file mode 100644 index fed06d6..0000000 --- a/vendor/github.com/olivere/elastic/v7/ingest_delete_pipeline.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IngestDeletePipelineService deletes pipelines by ID. -// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/7.0/delete-pipeline-api.html. -type IngestDeletePipelineService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - masterTimeout string - timeout string -} - -// NewIngestDeletePipelineService creates a new IngestDeletePipelineService. -func NewIngestDeletePipelineService(client *Client) *IngestDeletePipelineService { - return &IngestDeletePipelineService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IngestDeletePipelineService) Pretty(pretty bool) *IngestDeletePipelineService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IngestDeletePipelineService) Human(human bool) *IngestDeletePipelineService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IngestDeletePipelineService) ErrorTrace(errorTrace bool) *IngestDeletePipelineService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IngestDeletePipelineService) FilterPath(filterPath ...string) *IngestDeletePipelineService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IngestDeletePipelineService) Header(name string, value string) *IngestDeletePipelineService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IngestDeletePipelineService) Headers(headers http.Header) *IngestDeletePipelineService { - s.headers = headers - return s -} - -// Id is documented as: Pipeline ID. -func (s *IngestDeletePipelineService) Id(id string) *IngestDeletePipelineService { - s.id = id - return s -} - -// MasterTimeout is documented as: Explicit operation timeout for connection to master node. -func (s *IngestDeletePipelineService) MasterTimeout(masterTimeout string) *IngestDeletePipelineService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout is documented as: Explicit operation timeout. -func (s *IngestDeletePipelineService) Timeout(timeout string) *IngestDeletePipelineService { - s.timeout = timeout - return s -} - -// buildURL builds the URL for the operation. -func (s *IngestDeletePipelineService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IngestDeletePipelineService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IngestDeletePipelineService) Do(ctx context.Context) (*IngestDeletePipelineResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IngestDeletePipelineResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IngestDeletePipelineResponse is the response of IngestDeletePipelineService.Do. -type IngestDeletePipelineResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/ingest_get_pipeline.go b/vendor/github.com/olivere/elastic/v7/ingest_get_pipeline.go deleted file mode 100644 index 5f7e027..0000000 --- a/vendor/github.com/olivere/elastic/v7/ingest_get_pipeline.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IngestGetPipelineService returns pipelines based on ID. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/get-pipeline-api.html -// for documentation. -type IngestGetPipelineService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id []string - masterTimeout string -} - -// NewIngestGetPipelineService creates a new IngestGetPipelineService. -func NewIngestGetPipelineService(client *Client) *IngestGetPipelineService { - return &IngestGetPipelineService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IngestGetPipelineService) Pretty(pretty bool) *IngestGetPipelineService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IngestGetPipelineService) Human(human bool) *IngestGetPipelineService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IngestGetPipelineService) ErrorTrace(errorTrace bool) *IngestGetPipelineService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IngestGetPipelineService) FilterPath(filterPath ...string) *IngestGetPipelineService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IngestGetPipelineService) Header(name string, value string) *IngestGetPipelineService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IngestGetPipelineService) Headers(headers http.Header) *IngestGetPipelineService { - s.headers = headers - return s -} - -// Id is a list of pipeline ids. Wildcards supported. -func (s *IngestGetPipelineService) Id(id ...string) *IngestGetPipelineService { - s.id = append(s.id, id...) - return s -} - -// MasterTimeout is an explicit operation timeout for connection to master node. -func (s *IngestGetPipelineService) MasterTimeout(masterTimeout string) *IngestGetPipelineService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *IngestGetPipelineService) buildURL() (string, url.Values, error) { - var err error - var path string - - // Build URL - if len(s.id) > 0 { - path, err = uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ - "id": strings.Join(s.id, ","), - }) - } else { - path = "/_ingest/pipeline" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IngestGetPipelineService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *IngestGetPipelineService) Do(ctx context.Context) (IngestGetPipelineResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret IngestGetPipelineResponse - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// IngestGetPipelineResponse is the response of IngestGetPipelineService.Do. -type IngestGetPipelineResponse map[string]*IngestGetPipeline - -// IngestGetPipeline describes a specific ingest pipeline, its -// processors etc. -type IngestGetPipeline struct { - Description string `json:"description"` - Processors []map[string]interface{} `json:"processors"` - Version int64 `json:"version,omitempty"` - OnFailure []map[string]interface{} `json:"on_failure,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/ingest_put_pipeline.go b/vendor/github.com/olivere/elastic/v7/ingest_put_pipeline.go deleted file mode 100644 index 5f780f6..0000000 --- a/vendor/github.com/olivere/elastic/v7/ingest_put_pipeline.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IngestPutPipelineService adds pipelines and updates existing pipelines in -// the cluster. -// -// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/7.0/put-pipeline-api.html. -type IngestPutPipelineService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - masterTimeout string - timeout string - bodyJson interface{} - bodyString string -} - -// NewIngestPutPipelineService creates a new IngestPutPipelineService. -func NewIngestPutPipelineService(client *Client) *IngestPutPipelineService { - return &IngestPutPipelineService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IngestPutPipelineService) Pretty(pretty bool) *IngestPutPipelineService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IngestPutPipelineService) Human(human bool) *IngestPutPipelineService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IngestPutPipelineService) ErrorTrace(errorTrace bool) *IngestPutPipelineService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IngestPutPipelineService) FilterPath(filterPath ...string) *IngestPutPipelineService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IngestPutPipelineService) Header(name string, value string) *IngestPutPipelineService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IngestPutPipelineService) Headers(headers http.Header) *IngestPutPipelineService { - s.headers = headers - return s -} - -// Id is the pipeline ID. -func (s *IngestPutPipelineService) Id(id string) *IngestPutPipelineService { - s.id = id - return s -} - -// MasterTimeout is an explicit operation timeout for connection to master node. -func (s *IngestPutPipelineService) MasterTimeout(masterTimeout string) *IngestPutPipelineService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout specifies an explicit operation timeout. -func (s *IngestPutPipelineService) Timeout(timeout string) *IngestPutPipelineService { - s.timeout = timeout - return s -} - -// BodyJson is the ingest definition, defined as a JSON-serializable document. -// Use e.g. a map[string]interface{} here. -func (s *IngestPutPipelineService) BodyJson(body interface{}) *IngestPutPipelineService { - s.bodyJson = body - return s -} - -// BodyString is the ingest definition, specified as a string. -func (s *IngestPutPipelineService) BodyString(body string) *IngestPutPipelineService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IngestPutPipelineService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_ingest/pipeline/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IngestPutPipelineService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IngestPutPipelineService) Do(ctx context.Context) (*IngestPutPipelineResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IngestPutPipelineResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IngestPutPipelineResponse is the response of IngestPutPipelineService.Do. -type IngestPutPipelineResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/ingest_simulate_pipeline.go b/vendor/github.com/olivere/elastic/v7/ingest_simulate_pipeline.go deleted file mode 100644 index 70de000..0000000 --- a/vendor/github.com/olivere/elastic/v7/ingest_simulate_pipeline.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// IngestSimulatePipelineService executes a specific pipeline against the set of -// documents provided in the body of the request. -// -// The API is documented at -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/simulate-pipeline-api.html. -type IngestSimulatePipelineService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - verbose *bool - bodyJson interface{} - bodyString string -} - -// NewIngestSimulatePipelineService creates a new IngestSimulatePipeline. -func NewIngestSimulatePipelineService(client *Client) *IngestSimulatePipelineService { - return &IngestSimulatePipelineService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *IngestSimulatePipelineService) Pretty(pretty bool) *IngestSimulatePipelineService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *IngestSimulatePipelineService) Human(human bool) *IngestSimulatePipelineService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *IngestSimulatePipelineService) ErrorTrace(errorTrace bool) *IngestSimulatePipelineService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *IngestSimulatePipelineService) FilterPath(filterPath ...string) *IngestSimulatePipelineService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *IngestSimulatePipelineService) Header(name string, value string) *IngestSimulatePipelineService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *IngestSimulatePipelineService) Headers(headers http.Header) *IngestSimulatePipelineService { - s.headers = headers - return s -} - -// Id specifies the pipeline ID. -func (s *IngestSimulatePipelineService) Id(id string) *IngestSimulatePipelineService { - s.id = id - return s -} - -// Verbose mode. Display data output for each processor in executed pipeline. -func (s *IngestSimulatePipelineService) Verbose(verbose bool) *IngestSimulatePipelineService { - s.verbose = &verbose - return s -} - -// BodyJson is the ingest definition, defined as a JSON-serializable simulate -// definition. Use e.g. a map[string]interface{} here. -func (s *IngestSimulatePipelineService) BodyJson(body interface{}) *IngestSimulatePipelineService { - s.bodyJson = body - return s -} - -// BodyString is the simulate definition, defined as a string. -func (s *IngestSimulatePipelineService) BodyString(body string) *IngestSimulatePipelineService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *IngestSimulatePipelineService) buildURL() (string, url.Values, error) { - var err error - var path string - - // Build URL - if s.id != "" { - path, err = uritemplates.Expand("/_ingest/pipeline/{id}/_simulate", map[string]string{ - "id": s.id, - }) - } else { - path = "/_ingest/pipeline/_simulate" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.verbose; v != nil { - params.Set("verbose", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *IngestSimulatePipelineService) Validate() error { - var invalid []string - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *IngestSimulatePipelineService) Do(ctx context.Context) (*IngestSimulatePipelineResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(IngestSimulatePipelineResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// IngestSimulatePipelineResponse is the response of IngestSimulatePipeline.Do. -type IngestSimulatePipelineResponse struct { - Docs []*IngestSimulateDocumentResult `json:"docs"` -} - -type IngestSimulateDocumentResult struct { - Doc map[string]interface{} `json:"doc"` - ProcessorResults []*IngestSimulateProcessorResult `json:"processor_results"` -} - -type IngestSimulateProcessorResult struct { - ProcessorTag string `json:"tag"` - Doc map[string]interface{} `json:"doc"` -} diff --git a/vendor/github.com/olivere/elastic/v7/inner_hit.go b/vendor/github.com/olivere/elastic/v7/inner_hit.go deleted file mode 100644 index 8036626..0000000 --- a/vendor/github.com/olivere/elastic/v7/inner_hit.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// InnerHit implements a simple join for parent/child, nested, and even -// top-level documents in Elasticsearch. -// It is an experimental feature for Elasticsearch versions 1.5 (or greater). -// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-inner-hits.html -// for documentation. -// -// See the tests for SearchSource, HasChildFilter, HasChildQuery, -// HasParentFilter, HasParentQuery, NestedFilter, and NestedQuery -// for usage examples. -type InnerHit struct { - source *SearchSource - path string - typ string - - name string -} - -// NewInnerHit creates a new InnerHit. -func NewInnerHit() *InnerHit { - return &InnerHit{source: NewSearchSource()} -} - -func (hit *InnerHit) Path(path string) *InnerHit { - hit.path = path - return hit -} - -func (hit *InnerHit) Type(typ string) *InnerHit { - hit.typ = typ - return hit -} - -func (hit *InnerHit) Query(query Query) *InnerHit { - hit.source.Query(query) - return hit -} - -func (hit *InnerHit) From(from int) *InnerHit { - hit.source.From(from) - return hit -} - -func (hit *InnerHit) Size(size int) *InnerHit { - hit.source.Size(size) - return hit -} - -func (hit *InnerHit) TrackScores(trackScores bool) *InnerHit { - hit.source.TrackScores(trackScores) - return hit -} - -func (hit *InnerHit) Explain(explain bool) *InnerHit { - hit.source.Explain(explain) - return hit -} - -func (hit *InnerHit) Version(version bool) *InnerHit { - hit.source.Version(version) - return hit -} - -func (hit *InnerHit) StoredField(storedFieldName string) *InnerHit { - hit.source.StoredField(storedFieldName) - return hit -} - -func (hit *InnerHit) StoredFields(storedFieldNames ...string) *InnerHit { - hit.source.StoredFields(storedFieldNames...) - return hit -} - -func (hit *InnerHit) NoStoredFields() *InnerHit { - hit.source.NoStoredFields() - return hit -} - -func (hit *InnerHit) FetchSource(fetchSource bool) *InnerHit { - hit.source.FetchSource(fetchSource) - return hit -} - -func (hit *InnerHit) FetchSourceContext(fetchSourceContext *FetchSourceContext) *InnerHit { - hit.source.FetchSourceContext(fetchSourceContext) - return hit -} - -func (hit *InnerHit) DocvalueFields(docvalueFields ...string) *InnerHit { - hit.source.DocvalueFields(docvalueFields...) - return hit -} - -func (hit *InnerHit) DocvalueFieldsWithFormat(docvalueFields ...DocvalueField) *InnerHit { - hit.source.DocvalueFieldsWithFormat(docvalueFields...) - return hit -} - -func (hit *InnerHit) DocvalueField(docvalueField string) *InnerHit { - hit.source.DocvalueField(docvalueField) - return hit -} - -func (hit *InnerHit) DocvalueFieldWithFormat(docvalueField DocvalueField) *InnerHit { - hit.source.DocvalueFieldWithFormat(docvalueField) - return hit -} - -func (hit *InnerHit) ScriptFields(scriptFields ...*ScriptField) *InnerHit { - hit.source.ScriptFields(scriptFields...) - return hit -} - -func (hit *InnerHit) ScriptField(scriptField *ScriptField) *InnerHit { - hit.source.ScriptField(scriptField) - return hit -} - -func (hit *InnerHit) Sort(field string, ascending bool) *InnerHit { - hit.source.Sort(field, ascending) - return hit -} - -func (hit *InnerHit) SortWithInfo(info SortInfo) *InnerHit { - hit.source.SortWithInfo(info) - return hit -} - -func (hit *InnerHit) SortBy(sorter ...Sorter) *InnerHit { - hit.source.SortBy(sorter...) - return hit -} - -func (hit *InnerHit) Highlight(highlight *Highlight) *InnerHit { - hit.source.Highlight(highlight) - return hit -} - -func (hit *InnerHit) Highlighter() *Highlight { - return hit.source.Highlighter() -} - -func (hit *InnerHit) Name(name string) *InnerHit { - hit.name = name - return hit -} - -func (hit *InnerHit) Source() (interface{}, error) { - src, err := hit.source.Source() - if err != nil { - return nil, err - } - source, ok := src.(map[string]interface{}) - if !ok { - return nil, nil - } - - // Notice that hit.typ and hit.path are not exported here. - // They are only used with SearchSource and serialized there. - - if hit.name != "" { - source["name"] = hit.name - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/logger.go b/vendor/github.com/olivere/elastic/v7/logger.go deleted file mode 100644 index 095eb4c..0000000 --- a/vendor/github.com/olivere/elastic/v7/logger.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// Logger specifies the interface for all log operations. -type Logger interface { - Printf(format string, v ...interface{}) -} diff --git a/vendor/github.com/olivere/elastic/v7/mget.go b/vendor/github.com/olivere/elastic/v7/mget.go deleted file mode 100644 index 9e5f83b..0000000 --- a/vendor/github.com/olivere/elastic/v7/mget.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" -) - -// MgetService allows to get multiple documents based on an index, -// type (optional) and id (possibly routing). The response includes -// a docs array with all the fetched documents, each element similar -// in structure to a document provided by the Get API. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-multi-get.html -// for details. -type MgetService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - preference string - realtime *bool - refresh string - routing string - storedFields []string - items []*MultiGetItem -} - -// NewMgetService initializes a new Multi GET API request call. -func NewMgetService(client *Client) *MgetService { - builder := &MgetService{ - client: client, - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *MgetService) Pretty(pretty bool) *MgetService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *MgetService) Human(human bool) *MgetService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *MgetService) ErrorTrace(errorTrace bool) *MgetService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *MgetService) FilterPath(filterPath ...string) *MgetService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *MgetService) Header(name string, value string) *MgetService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *MgetService) Headers(headers http.Header) *MgetService { - s.headers = headers - return s -} - -// Preference specifies the node or shard the operation should be performed -// on (default: random). -func (s *MgetService) Preference(preference string) *MgetService { - s.preference = preference - return s -} - -// Refresh the shard containing the document before performing the operation. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *MgetService) Refresh(refresh string) *MgetService { - s.refresh = refresh - return s -} - -// Realtime specifies whether to perform the operation in realtime or search mode. -func (s *MgetService) Realtime(realtime bool) *MgetService { - s.realtime = &realtime - return s -} - -// Routing is the specific routing value. -func (s *MgetService) Routing(routing string) *MgetService { - s.routing = routing - return s -} - -// StoredFields is a list of fields to return in the response. -func (s *MgetService) StoredFields(storedFields ...string) *MgetService { - s.storedFields = append(s.storedFields, storedFields...) - return s -} - -// Add an item to the request. -func (s *MgetService) Add(items ...*MultiGetItem) *MgetService { - s.items = append(s.items, items...) - return s -} - -// Source returns the request body, which will be serialized into JSON. -func (s *MgetService) Source() (interface{}, error) { - source := make(map[string]interface{}) - items := make([]interface{}, len(s.items)) - for i, item := range s.items { - src, err := item.Source() - if err != nil { - return nil, err - } - items[i] = src - } - source["docs"] = items - return source, nil -} - -// Do executes the request. -func (s *MgetService) Do(ctx context.Context) (*MgetResponse, error) { - // Build url - path := "/_mget" - - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.realtime != nil { - params.Add("realtime", fmt.Sprintf("%v", *s.realtime)) - } - if s.preference != "" { - params.Add("preference", s.preference) - } - if s.refresh != "" { - params.Add("refresh", s.refresh) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if len(s.storedFields) > 0 { - params.Set("stored_fields", strings.Join(s.storedFields, ",")) - } - - // Set body - body, err := s.Source() - if err != nil { - return nil, err - } - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return result - ret := new(MgetResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Multi Get Item -- - -// MultiGetItem is a single document to retrieve via the MgetService. -type MultiGetItem struct { - index string - typ string - id string - routing string - storedFields []string - version *int64 // see org.elasticsearch.common.lucene.uid.Versions - versionType string // see org.elasticsearch.index.VersionType - fsc *FetchSourceContext -} - -// NewMultiGetItem initializes a new, single item for a Multi GET request. -func NewMultiGetItem() *MultiGetItem { - return &MultiGetItem{} -} - -// Index specifies the index name. -func (item *MultiGetItem) Index(index string) *MultiGetItem { - item.index = index - return item -} - -// Type specifies the type name. -func (item *MultiGetItem) Type(typ string) *MultiGetItem { - item.typ = typ - return item -} - -// Id specifies the identifier of the document. -func (item *MultiGetItem) Id(id string) *MultiGetItem { - item.id = id - return item -} - -// Routing is the specific routing value. -func (item *MultiGetItem) Routing(routing string) *MultiGetItem { - item.routing = routing - return item -} - -// StoredFields is a list of fields to return in the response. -func (item *MultiGetItem) StoredFields(storedFields ...string) *MultiGetItem { - item.storedFields = append(item.storedFields, storedFields...) - return item -} - -// Version can be MatchAny (-3), MatchAnyPre120 (0), NotFound (-1), -// or NotSet (-2). These are specified in org.elasticsearch.common.lucene.uid.Versions. -// The default in Elasticsearch is MatchAny (-3). -func (item *MultiGetItem) Version(version int64) *MultiGetItem { - item.version = &version - return item -} - -// VersionType can be "internal", "external", "external_gt", or "external_gte". -// See org.elasticsearch.index.VersionType in Elasticsearch source. -// It is "internal" by default. -func (item *MultiGetItem) VersionType(versionType string) *MultiGetItem { - item.versionType = versionType - return item -} - -// FetchSource allows to specify source filtering. -func (item *MultiGetItem) FetchSource(fetchSourceContext *FetchSourceContext) *MultiGetItem { - item.fsc = fetchSourceContext - return item -} - -// Source returns the serialized JSON to be sent to Elasticsearch as -// part of a MultiGet search. -func (item *MultiGetItem) Source() (interface{}, error) { - source := make(map[string]interface{}) - - source["_id"] = item.id - - if item.index != "" { - source["_index"] = item.index - } - if item.typ != "" { - source["_type"] = item.typ - } - if item.fsc != nil { - src, err := item.fsc.Source() - if err != nil { - return nil, err - } - source["_source"] = src - } - if item.routing != "" { - source["routing"] = item.routing - } - if len(item.storedFields) > 0 { - source["stored_fields"] = strings.Join(item.storedFields, ",") - } - if item.version != nil { - source["version"] = fmt.Sprintf("%d", *item.version) - } - if item.versionType != "" { - source["version_type"] = item.versionType - } - - return source, nil -} - -// -- Result of a Multi Get request. - -// MgetResponse is the outcome of a Multi GET API request. -type MgetResponse struct { - Docs []*GetResult `json:"docs,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/msearch.go b/vendor/github.com/olivere/elastic/v7/msearch.go deleted file mode 100644 index 1f6aed0..0000000 --- a/vendor/github.com/olivere/elastic/v7/msearch.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// MultiSearch executes one or more searches in one roundtrip. -type MultiSearchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - requests []*SearchRequest - indices []string - maxConcurrentRequests *int - preFilterShardSize *int -} - -func NewMultiSearchService(client *Client) *MultiSearchService { - builder := &MultiSearchService{ - client: client, - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *MultiSearchService) Pretty(pretty bool) *MultiSearchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *MultiSearchService) Human(human bool) *MultiSearchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *MultiSearchService) ErrorTrace(errorTrace bool) *MultiSearchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *MultiSearchService) FilterPath(filterPath ...string) *MultiSearchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *MultiSearchService) Header(name string, value string) *MultiSearchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *MultiSearchService) Headers(headers http.Header) *MultiSearchService { - s.headers = headers - return s -} - -func (s *MultiSearchService) Add(requests ...*SearchRequest) *MultiSearchService { - s.requests = append(s.requests, requests...) - return s -} - -func (s *MultiSearchService) Index(indices ...string) *MultiSearchService { - s.indices = append(s.indices, indices...) - return s -} - -func (s *MultiSearchService) MaxConcurrentSearches(max int) *MultiSearchService { - s.maxConcurrentRequests = &max - return s -} - -func (s *MultiSearchService) PreFilterShardSize(size int) *MultiSearchService { - s.preFilterShardSize = &size - return s -} - -func (s *MultiSearchService) Do(ctx context.Context) (*MultiSearchResult, error) { - // Build url - path := "/_msearch" - - // Parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.maxConcurrentRequests; v != nil { - params.Set("max_concurrent_searches", fmt.Sprintf("%v", *v)) - } - if v := s.preFilterShardSize; v != nil { - params.Set("pre_filter_shard_size", fmt.Sprintf("%v", *v)) - } - - // Set body - var lines []string - for _, sr := range s.requests { - // Set default indices if not specified in the request - if !sr.HasIndices() && len(s.indices) > 0 { - sr = sr.Index(s.indices...) - } - - header, err := json.Marshal(sr.header()) - if err != nil { - return nil, err - } - body, err := sr.Body() - if err != nil { - return nil, err - } - lines = append(lines, string(header)) - lines = append(lines, body) - } - body := strings.Join(lines, "\n") + "\n" // add trailing \n - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return result - ret := new(MultiSearchResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// MultiSearchResult is the outcome of running a multi-search operation. -type MultiSearchResult struct { - TookInMillis int64 `json:"took,omitempty"` // search time in milliseconds - Responses []*SearchResult `json:"responses,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/mtermvectors.go b/vendor/github.com/olivere/elastic/v7/mtermvectors.go deleted file mode 100644 index 59284e6..0000000 --- a/vendor/github.com/olivere/elastic/v7/mtermvectors.go +++ /dev/null @@ -1,526 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// MultiTermvectorService returns information and statistics on terms in the -// fields of a particular document. The document could be stored in the -// index or artificially provided by the user. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-multi-termvectors.html -// for documentation. -type MultiTermvectorService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - typ string - fieldStatistics *bool - fields []string - ids []string - offsets *bool - parent string - payloads *bool - positions *bool - preference string - realtime *bool - routing string - termStatistics *bool - version interface{} - versionType string - bodyJson interface{} - bodyString string - docs []*MultiTermvectorItem -} - -// NewMultiTermvectorService creates a new MultiTermvectorService. -func NewMultiTermvectorService(client *Client) *MultiTermvectorService { - return &MultiTermvectorService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *MultiTermvectorService) Pretty(pretty bool) *MultiTermvectorService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *MultiTermvectorService) Human(human bool) *MultiTermvectorService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *MultiTermvectorService) ErrorTrace(errorTrace bool) *MultiTermvectorService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *MultiTermvectorService) FilterPath(filterPath ...string) *MultiTermvectorService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *MultiTermvectorService) Header(name string, value string) *MultiTermvectorService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *MultiTermvectorService) Headers(headers http.Header) *MultiTermvectorService { - s.headers = headers - return s -} - -// Add adds documents to MultiTermvectors service. -func (s *MultiTermvectorService) Add(docs ...*MultiTermvectorItem) *MultiTermvectorService { - s.docs = append(s.docs, docs...) - return s -} - -// Index in which the document resides. -func (s *MultiTermvectorService) Index(index string) *MultiTermvectorService { - s.index = index - return s -} - -// Type of the document. -func (s *MultiTermvectorService) Type(typ string) *MultiTermvectorService { - s.typ = typ - return s -} - -// FieldStatistics specifies if document count, sum of document frequencies and sum of total term frequencies should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) FieldStatistics(fieldStatistics bool) *MultiTermvectorService { - s.fieldStatistics = &fieldStatistics - return s -} - -// Fields is a comma-separated list of fields to return. Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) Fields(fields []string) *MultiTermvectorService { - s.fields = fields - return s -} - -// Ids is a comma-separated list of documents ids. You must define ids as parameter or set "ids" or "docs" in the request body. -func (s *MultiTermvectorService) Ids(ids []string) *MultiTermvectorService { - s.ids = ids - return s -} - -// Offsets specifies if term offsets should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) Offsets(offsets bool) *MultiTermvectorService { - s.offsets = &offsets - return s -} - -// Parent id of documents. Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) Parent(parent string) *MultiTermvectorService { - s.parent = parent - return s -} - -// Payloads specifies if term payloads should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) Payloads(payloads bool) *MultiTermvectorService { - s.payloads = &payloads - return s -} - -// Positions specifies if term positions should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) Positions(positions bool) *MultiTermvectorService { - s.positions = &positions - return s -} - -// Preference specifies the node or shard the operation should be performed on (default: random). Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) Preference(preference string) *MultiTermvectorService { - s.preference = preference - return s -} - -// Realtime specifies if requests are real-time as opposed to near-real-time (default: true). -func (s *MultiTermvectorService) Realtime(realtime bool) *MultiTermvectorService { - s.realtime = &realtime - return s -} - -// Routing specific routing value. Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) Routing(routing string) *MultiTermvectorService { - s.routing = routing - return s -} - -// TermStatistics specifies if total term frequency and document frequency should be returned. Applies to all returned documents unless otherwise specified in body "params" or "docs". -func (s *MultiTermvectorService) TermStatistics(termStatistics bool) *MultiTermvectorService { - s.termStatistics = &termStatistics - return s -} - -// Version is explicit version number for concurrency control. -func (s *MultiTermvectorService) Version(version interface{}) *MultiTermvectorService { - s.version = version - return s -} - -// VersionType is specific version type. -func (s *MultiTermvectorService) VersionType(versionType string) *MultiTermvectorService { - s.versionType = versionType - return s -} - -// BodyJson is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. -func (s *MultiTermvectorService) BodyJson(body interface{}) *MultiTermvectorService { - s.bodyJson = body - return s -} - -// BodyString is documented as: Define ids, documents, parameters or a list of parameters per document here. You must at least provide a list of document ids. See documentation.. -func (s *MultiTermvectorService) BodyString(body string) *MultiTermvectorService { - s.bodyString = body - return s -} - -func (s *MultiTermvectorService) Source() interface{} { - source := make(map[string]interface{}) - docs := make([]interface{}, len(s.docs)) - for i, doc := range s.docs { - docs[i] = doc.Source() - } - source["docs"] = docs - return source -} - -// buildURL builds the URL for the operation. -func (s *MultiTermvectorService) buildURL() (string, url.Values, error) { - var path string - var err error - - if s.index != "" && s.typ != "" { - path, err = uritemplates.Expand("/{index}/{type}/_mtermvectors", map[string]string{ - "index": s.index, - "type": s.typ, - }) - } else if s.index != "" && s.typ == "" { - path, err = uritemplates.Expand("/{index}/_mtermvectors", map[string]string{ - "index": s.index, - }) - } else { - path = "/_mtermvectors" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.fieldStatistics != nil { - params.Set("field_statistics", fmt.Sprintf("%v", *s.fieldStatistics)) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if len(s.ids) > 0 { - params.Set("ids", strings.Join(s.ids, ",")) - } - if s.offsets != nil { - params.Set("offsets", fmt.Sprintf("%v", *s.offsets)) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.payloads != nil { - params.Set("payloads", fmt.Sprintf("%v", *s.payloads)) - } - if s.positions != nil { - params.Set("positions", fmt.Sprintf("%v", *s.positions)) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.realtime != nil { - params.Set("realtime", fmt.Sprintf("%v", *s.realtime)) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.termStatistics != nil { - params.Set("term_statistics", fmt.Sprintf("%v", *s.termStatistics)) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *MultiTermvectorService) Validate() error { - var invalid []string - if s.index == "" && s.typ != "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *MultiTermvectorService) Do(ctx context.Context) (*MultiTermvectorResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else if len(s.bodyString) > 0 { - body = s.bodyString - } else { - body = s.Source() - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(MultiTermvectorResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// MultiTermvectorResponse is the response of MultiTermvectorService.Do. -type MultiTermvectorResponse struct { - Docs []*TermvectorsResponse `json:"docs"` -} - -// -- MultiTermvectorItem -- - -// MultiTermvectorItem is a single document to retrieve via MultiTermvectorService. -type MultiTermvectorItem struct { - index string - typ string - id string - doc interface{} - fieldStatistics *bool - fields []string - perFieldAnalyzer map[string]string - offsets *bool - parent string - payloads *bool - positions *bool - preference string - realtime *bool - routing string - termStatistics *bool -} - -func NewMultiTermvectorItem() *MultiTermvectorItem { - return &MultiTermvectorItem{} -} - -func (s *MultiTermvectorItem) Index(index string) *MultiTermvectorItem { - s.index = index - return s -} - -func (s *MultiTermvectorItem) Type(typ string) *MultiTermvectorItem { - s.typ = typ - return s -} - -func (s *MultiTermvectorItem) Id(id string) *MultiTermvectorItem { - s.id = id - return s -} - -// Doc is the document to analyze. -func (s *MultiTermvectorItem) Doc(doc interface{}) *MultiTermvectorItem { - s.doc = doc - return s -} - -// FieldStatistics specifies if document count, sum of document frequencies -// and sum of total term frequencies should be returned. -func (s *MultiTermvectorItem) FieldStatistics(fieldStatistics bool) *MultiTermvectorItem { - s.fieldStatistics = &fieldStatistics - return s -} - -// Fields a list of fields to return. -func (s *MultiTermvectorItem) Fields(fields ...string) *MultiTermvectorItem { - if s.fields == nil { - s.fields = make([]string, 0) - } - s.fields = append(s.fields, fields...) - return s -} - -// PerFieldAnalyzer allows to specify a different analyzer than the one -// at the field. -func (s *MultiTermvectorItem) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *MultiTermvectorItem { - s.perFieldAnalyzer = perFieldAnalyzer - return s -} - -// Offsets specifies if term offsets should be returned. -func (s *MultiTermvectorItem) Offsets(offsets bool) *MultiTermvectorItem { - s.offsets = &offsets - return s -} - -// Parent id of documents. -func (s *MultiTermvectorItem) Parent(parent string) *MultiTermvectorItem { - s.parent = parent - return s -} - -// Payloads specifies if term payloads should be returned. -func (s *MultiTermvectorItem) Payloads(payloads bool) *MultiTermvectorItem { - s.payloads = &payloads - return s -} - -// Positions specifies if term positions should be returned. -func (s *MultiTermvectorItem) Positions(positions bool) *MultiTermvectorItem { - s.positions = &positions - return s -} - -// Preference specify the node or shard the operation -// should be performed on (default: random). -func (s *MultiTermvectorItem) Preference(preference string) *MultiTermvectorItem { - s.preference = preference - return s -} - -// Realtime specifies if request is real-time as opposed to -// near-real-time (default: true). -func (s *MultiTermvectorItem) Realtime(realtime bool) *MultiTermvectorItem { - s.realtime = &realtime - return s -} - -// Routing is a specific routing value. -func (s *MultiTermvectorItem) Routing(routing string) *MultiTermvectorItem { - s.routing = routing - return s -} - -// TermStatistics specifies if total term frequency and document frequency -// should be returned. -func (s *MultiTermvectorItem) TermStatistics(termStatistics bool) *MultiTermvectorItem { - s.termStatistics = &termStatistics - return s -} - -// Source returns the serialized JSON to be sent to Elasticsearch as -// part of a MultiTermvector. -func (s *MultiTermvectorItem) Source() interface{} { - source := make(map[string]interface{}) - - source["_id"] = s.id - - if s.index != "" { - source["_index"] = s.index - } - if s.typ != "" { - source["_type"] = s.typ - } - if s.fields != nil { - source["fields"] = s.fields - } - if s.fieldStatistics != nil { - source["field_statistics"] = fmt.Sprintf("%v", *s.fieldStatistics) - } - if s.offsets != nil { - source["offsets"] = s.offsets - } - if s.parent != "" { - source["parent"] = s.parent - } - if s.payloads != nil { - source["payloads"] = fmt.Sprintf("%v", *s.payloads) - } - if s.positions != nil { - source["positions"] = fmt.Sprintf("%v", *s.positions) - } - if s.preference != "" { - source["preference"] = s.preference - } - if s.realtime != nil { - source["realtime"] = fmt.Sprintf("%v", *s.realtime) - } - if s.routing != "" { - source["routing"] = s.routing - } - if s.termStatistics != nil { - source["term_statistics"] = fmt.Sprintf("%v", *s.termStatistics) - } - if s.doc != nil { - source["doc"] = s.doc - } - if s.perFieldAnalyzer != nil && len(s.perFieldAnalyzer) > 0 { - source["per_field_analyzer"] = s.perFieldAnalyzer - } - - return source -} diff --git a/vendor/github.com/olivere/elastic/v7/nodes_info.go b/vendor/github.com/olivere/elastic/v7/nodes_info.go deleted file mode 100644 index 3037df4..0000000 --- a/vendor/github.com/olivere/elastic/v7/nodes_info.go +++ /dev/null @@ -1,419 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// NodesInfoService allows to retrieve one or more or all of the -// cluster nodes information. -// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-nodes-info.html. -type NodesInfoService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - nodeId []string - metric []string - flatSettings *bool -} - -// NewNodesInfoService creates a new NodesInfoService. -func NewNodesInfoService(client *Client) *NodesInfoService { - return &NodesInfoService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *NodesInfoService) Pretty(pretty bool) *NodesInfoService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *NodesInfoService) Human(human bool) *NodesInfoService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *NodesInfoService) ErrorTrace(errorTrace bool) *NodesInfoService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *NodesInfoService) FilterPath(filterPath ...string) *NodesInfoService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *NodesInfoService) Header(name string, value string) *NodesInfoService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *NodesInfoService) Headers(headers http.Header) *NodesInfoService { - s.headers = headers - return s -} - -// NodeId is a list of node IDs or names to limit the returned information. -// Use "_local" to return information from the node you're connecting to, -// leave empty to get information from all nodes. -func (s *NodesInfoService) NodeId(nodeId ...string) *NodesInfoService { - s.nodeId = append(s.nodeId, nodeId...) - return s -} - -// Metric is a list of metrics you wish returned. Leave empty to return all. -// Valid metrics are: settings, os, process, jvm, thread_pool, network, -// transport, http, and plugins. -func (s *NodesInfoService) Metric(metric ...string) *NodesInfoService { - s.metric = append(s.metric, metric...) - return s -} - -// FlatSettings returns settings in flat format (default: false). -func (s *NodesInfoService) FlatSettings(flatSettings bool) *NodesInfoService { - s.flatSettings = &flatSettings - return s -} - -// buildURL builds the URL for the operation. -func (s *NodesInfoService) buildURL() (string, url.Values, error) { - var nodeId, metric string - - if len(s.nodeId) > 0 { - nodeId = strings.Join(s.nodeId, ",") - } else { - nodeId = "_all" - } - - if len(s.metric) > 0 { - metric = strings.Join(s.metric, ",") - } else { - metric = "_all" - } - - // Build URL - path, err := uritemplates.Expand("/_nodes/{node_id}/{metric}", map[string]string{ - "node_id": nodeId, - "metric": metric, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.flatSettings != nil { - params.Set("flat_settings", fmt.Sprintf("%v", *s.flatSettings)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *NodesInfoService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *NodesInfoService) Do(ctx context.Context) (*NodesInfoResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(NodesInfoResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// NodesInfoResponse is the response of NodesInfoService.Do. -type NodesInfoResponse struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]*NodesInfoNode `json:"nodes"` -} - -// NodesInfoNode represents information about a node in the cluster. -type NodesInfoNode struct { - // Name of the node, e.g. "Mister Fear" - Name string `json:"name"` - // TransportAddress, e.g. "127.0.0.1:9300" - TransportAddress string `json:"transport_address"` - // Host is the host name, e.g. "macbookair" - Host string `json:"host"` - // IP is the IP address, e.g. "192.168.1.2" - IP string `json:"ip"` - // Version is the Elasticsearch version running on the node, e.g. "1.4.3" - Version string `json:"version"` - // BuildHash is the Elasticsearch build bash, e.g. "36a29a7" - BuildHash string `json:"build_hash"` - - // TotalIndexingBuffer represents the total heap allowed to be used to - // hold recently indexed documents before they must be written to disk. - TotalIndexingBuffer int64 `json:"total_indexing_buffer"` // e.g. 16gb - // TotalIndexingBufferInBytes is the same as TotalIndexingBuffer, but - // expressed in bytes. - TotalIndexingBufferInBytes string `json:"total_indexing_buffer_in_bytes"` - - // Roles of the node, e.g. [master, ingest, data] - Roles []string `json:"roles"` - - // Attributes of the node. - Attributes map[string]string `json:"attributes"` - - // Settings of the node, e.g. paths and pidfile. - Settings map[string]interface{} `json:"settings"` - - // OS information, e.g. CPU and memory. - OS *NodesInfoNodeOS `json:"os"` - - // Process information, e.g. max file descriptors. - Process *NodesInfoNodeProcess `json:"process"` - - // JVM information, e.g. VM version. - JVM *NodesInfoNodeJVM `json:"jvm"` - - // ThreadPool information. - ThreadPool *NodesInfoNodeThreadPool `json:"thread_pool"` - - // Network information. - Transport *NodesInfoNodeTransport `json:"transport"` - - // HTTP information. - HTTP *NodesInfoNodeHTTP `json:"http"` - - // Plugins information. - Plugins []*NodesInfoNodePlugin `json:"plugins"` - - // Modules information. - Modules []*NodesInfoNodeModule `json:"modules"` - - // Ingest information. - Ingest *NodesInfoNodeIngest `json:"ingest"` -} - -// HasRole returns true if the node fulfills the given role. -func (n *NodesInfoNode) HasRole(role string) bool { - for _, r := range n.Roles { - if r == role { - return true - } - } - return false -} - -// IsMaster returns true if the node is a master node. -func (n *NodesInfoNode) IsMaster() bool { - return n.HasRole("master") -} - -// IsData returns true if the node is a data node. -func (n *NodesInfoNode) IsData() bool { - return n.HasRole("data") -} - -// IsIngest returns true if the node is an ingest node. -func (n *NodesInfoNode) IsIngest() bool { - return n.HasRole("ingest") -} - -// NodesInfoNodeOS represents OS-specific details about a node. -type NodesInfoNodeOS struct { - RefreshInterval string `json:"refresh_interval"` // e.g. 1s - RefreshIntervalInMillis int `json:"refresh_interval_in_millis"` // e.g. 1000 - Name string `json:"name"` // e.g. Linux - Arch string `json:"arch"` // e.g. amd64 - Version string `json:"version"` // e.g. 4.9.87-linuxkit-aufs - AvailableProcessors int `json:"available_processors"` // e.g. 4 - AllocatedProcessors int `json:"allocated_processors"` // e.g. 4 -} - -// NodesInfoNodeProcess represents process-related information. -type NodesInfoNodeProcess struct { - RefreshInterval string `json:"refresh_interval"` // e.g. 1s - RefreshIntervalInMillis int64 `json:"refresh_interval_in_millis"` // e.g. 1000 - ID int `json:"id"` // process id, e.g. 87079 - Mlockall bool `json:"mlockall"` // e.g. false -} - -// NodesInfoNodeJVM represents JVM-related information. -type NodesInfoNodeJVM struct { - PID int `json:"pid"` // process id, e.g. 87079 - Version string `json:"version"` // e.g. "1.8.0_161" - VMName string `json:"vm_name"` // e.g. "OpenJDK 64-Bit Server VM" - VMVersion string `json:"vm_version"` // e.g. "25.161-b14" - VMVendor string `json:"vm_vendor"` // e.g. "Oracle Corporation" - StartTime time.Time `json:"start_time"` // e.g. "2018-03-30T11:06:36.644Z" - StartTimeInMillis int64 `json:"start_time_in_millis"` // e.g. 1522407996644 - - // Mem information - Mem struct { - HeapInit string `json:"heap_init"` // e.g. "1gb" - HeapInitInBytes int `json:"heap_init_in_bytes"` // e.g. 1073741824 - HeapMax string `json:"heap_max"` // e.g. "1007.3mb" - HeapMaxInBytes int `json:"heap_max_in_bytes"` // e.g. 1056309248 - NonHeapInit string `json:"non_heap_init"` // e.g. "2.4mb" - NonHeapInitInBytes int `json:"non_heap_init_in_bytes"` // e.g. 2555904 - NonHeapMax string `json:"non_heap_max"` // e.g. "0b" - NonHeapMaxInBytes int `json:"non_heap_max_in_bytes"` // e.g. 0 - DirectMax string `json:"direct_max"` // e.g. "1007.3mb" - DirectMaxInBytes int `json:"direct_max_in_bytes"` // e.g. 1056309248 - } `json:"mem"` - - GCCollectors []string `json:"gc_collectors"` // e.g. ["ParNew", "ConcurrentMarkSweep"] - MemoryPools []string `json:"memory_pools"` // e.g. ["Code Cache", "Metaspace", "Compressed Class Space", "Par Eden Space", "Par Survivor Space", "CMS Old Gen"] - - // UsingCompressedOrdinaryObjectPointers should be a bool, but is a - // string in 6.2.3. We use an interface{} for now so that it won't break - // when this will be fixed in later versions of Elasticsearch. - UsingCompressedOrdinaryObjectPointers interface{} `json:"using_compressed_ordinary_object_pointers"` - - InputArguments []string `json:"input_arguments"` // e.g. ["-Xms1g", "-Xmx1g" ...] -} - -// NodesInfoNodeThreadPool represents information about the thread pool. -type NodesInfoNodeThreadPool struct { - ForceMerge *NodesInfoNodeThreadPoolSection `json:"force_merge"` - FetchShardStarted *NodesInfoNodeThreadPoolSection `json:"fetch_shard_started"` - Listener *NodesInfoNodeThreadPoolSection `json:"listener"` - Index *NodesInfoNodeThreadPoolSection `json:"index"` - Refresh *NodesInfoNodeThreadPoolSection `json:"refresh"` - Generic *NodesInfoNodeThreadPoolSection `json:"generic"` - Warmer *NodesInfoNodeThreadPoolSection `json:"warmer"` - Search *NodesInfoNodeThreadPoolSection `json:"search"` - Flush *NodesInfoNodeThreadPoolSection `json:"flush"` - FetchShardStore *NodesInfoNodeThreadPoolSection `json:"fetch_shard_store"` - Management *NodesInfoNodeThreadPoolSection `json:"management"` - Get *NodesInfoNodeThreadPoolSection `json:"get"` - Bulk *NodesInfoNodeThreadPoolSection `json:"bulk"` - Snapshot *NodesInfoNodeThreadPoolSection `json:"snapshot"` - - Percolate *NodesInfoNodeThreadPoolSection `json:"percolate"` // check - Bench *NodesInfoNodeThreadPoolSection `json:"bench"` // check - Suggest *NodesInfoNodeThreadPoolSection `json:"suggest"` // deprecated - Optimize *NodesInfoNodeThreadPoolSection `json:"optimize"` // deprecated - Merge *NodesInfoNodeThreadPoolSection `json:"merge"` // deprecated -} - -// NodesInfoNodeThreadPoolSection represents information about a certain -// type of thread pool, e.g. for indexing or searching. -type NodesInfoNodeThreadPoolSection struct { - Type string `json:"type"` // e.g. fixed, scaling, or fixed_auto_queue_size - Min int `json:"min"` // e.g. 4 - Max int `json:"max"` // e.g. 4 - KeepAlive string `json:"keep_alive"` // e.g. "5m" - QueueSize interface{} `json:"queue_size"` // e.g. "1k" or -1 -} - -// NodesInfoNodeTransport represents transport-related information. -type NodesInfoNodeTransport struct { - BoundAddress []string `json:"bound_address"` - PublishAddress string `json:"publish_address"` - Profiles map[string]*NodesInfoNodeTransportProfile `json:"profiles"` -} - -// NodesInfoNodeTransportProfile represents a transport profile. -type NodesInfoNodeTransportProfile struct { - BoundAddress []string `json:"bound_address"` - PublishAddress string `json:"publish_address"` -} - -// NodesInfoNodeHTTP represents HTTP-related information. -type NodesInfoNodeHTTP struct { - BoundAddress []string `json:"bound_address"` // e.g. ["127.0.0.1:9200", "[fe80::1]:9200", "[::1]:9200"] - PublishAddress string `json:"publish_address"` // e.g. "127.0.0.1:9300" - MaxContentLength string `json:"max_content_length"` // e.g. "100mb" - MaxContentLengthInBytes int64 `json:"max_content_length_in_bytes"` -} - -// NodesInfoNodePlugin represents information about a plugin. -type NodesInfoNodePlugin struct { - Name string `json:"name"` // e.g. "ingest-geoip" - Version string `json:"version"` // e.g. "6.2.3" - ElasticsearchVersion string `json:"elasticsearch_version"` - JavaVersion string `json:"java_version"` - Description string `json:"description"` // e.g. "Ingest processor ..." - Classname string `json:"classname"` // e.g. "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin" - ExtendedPlugins []string `json:"extended_plugins"` - HasNativeController bool `json:"has_native_controller"` - RequiresKeystore bool `json:"requires_keystore"` -} - -// NodesInfoNodeModule represents information about a module. -type NodesInfoNodeModule struct { - Name string `json:"name"` // e.g. "ingest-geoip" - Version string `json:"version"` // e.g. "6.2.3" - ElasticsearchVersion string `json:"elasticsearch_version"` - JavaVersion string `json:"java_version"` - Description string `json:"description"` // e.g. "Ingest processor ..." - Classname string `json:"classname"` // e.g. "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin" - ExtendedPlugins []string `json:"extended_plugins"` - HasNativeController bool `json:"has_native_controller"` - RequiresKeystore bool `json:"requires_keystore"` -} - -// NodesInfoNodeIngest represents information about the ingester. -type NodesInfoNodeIngest struct { - Processors []*NodesInfoNodeIngestProcessorInfo `json:"processors"` -} - -// NodesInfoNodeIngestProcessorInfo represents ingest processor info. -type NodesInfoNodeIngestProcessorInfo struct { - Type string `json:"type"` // e.g. append, convert, date etc. -} diff --git a/vendor/github.com/olivere/elastic/v7/nodes_stats.go b/vendor/github.com/olivere/elastic/v7/nodes_stats.go deleted file mode 100644 index a57651e..0000000 --- a/vendor/github.com/olivere/elastic/v7/nodes_stats.go +++ /dev/null @@ -1,747 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// NodesStatsService returns node statistics. -// See http://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-nodes-stats.html -// for details. -type NodesStatsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - metric []string - indexMetric []string - nodeId []string - completionFields []string - fielddataFields []string - fields []string - groups *bool - level string - timeout string - types []string -} - -// NewNodesStatsService creates a new NodesStatsService. -func NewNodesStatsService(client *Client) *NodesStatsService { - return &NodesStatsService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *NodesStatsService) Pretty(pretty bool) *NodesStatsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *NodesStatsService) Human(human bool) *NodesStatsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *NodesStatsService) ErrorTrace(errorTrace bool) *NodesStatsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *NodesStatsService) FilterPath(filterPath ...string) *NodesStatsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *NodesStatsService) Header(name string, value string) *NodesStatsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *NodesStatsService) Headers(headers http.Header) *NodesStatsService { - s.headers = headers - return s -} - -// Metric limits the information returned to the specified metrics. -func (s *NodesStatsService) Metric(metric ...string) *NodesStatsService { - s.metric = append(s.metric, metric...) - return s -} - -// IndexMetric limits the information returned for `indices` metric -// to the specific index metrics. Isn't used if `indices` (or `all`) -// metric isn't specified.. -func (s *NodesStatsService) IndexMetric(indexMetric ...string) *NodesStatsService { - s.indexMetric = append(s.indexMetric, indexMetric...) - return s -} - -// NodeId is a list of node IDs or names to limit the returned information; -// use `_local` to return information from the node you're connecting to, -// leave empty to get information from all nodes. -func (s *NodesStatsService) NodeId(nodeId ...string) *NodesStatsService { - s.nodeId = append(s.nodeId, nodeId...) - return s -} - -// CompletionFields is a list of fields for `fielddata` and `suggest` -// index metric (supports wildcards). -func (s *NodesStatsService) CompletionFields(completionFields ...string) *NodesStatsService { - s.completionFields = append(s.completionFields, completionFields...) - return s -} - -// FielddataFields is a list of fields for `fielddata` index metric (supports wildcards). -func (s *NodesStatsService) FielddataFields(fielddataFields ...string) *NodesStatsService { - s.fielddataFields = append(s.fielddataFields, fielddataFields...) - return s -} - -// Fields is a list of fields for `fielddata` and `completion` index metric (supports wildcards). -func (s *NodesStatsService) Fields(fields ...string) *NodesStatsService { - s.fields = append(s.fields, fields...) - return s -} - -// Groups is a list of search groups for `search` index metric. -func (s *NodesStatsService) Groups(groups bool) *NodesStatsService { - s.groups = &groups - return s -} - -// Level specifies whether to return indices stats aggregated at node, index or shard level. -func (s *NodesStatsService) Level(level string) *NodesStatsService { - s.level = level - return s -} - -// Timeout specifies an explicit operation timeout. -func (s *NodesStatsService) Timeout(timeout string) *NodesStatsService { - s.timeout = timeout - return s -} - -// Types a list of document types for the `indexing` index metric. -func (s *NodesStatsService) Types(types ...string) *NodesStatsService { - s.types = append(s.types, types...) - return s -} - -// buildURL builds the URL for the operation. -func (s *NodesStatsService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 { - path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}/{index_metric}", map[string]string{ - "index_metric": strings.Join(s.indexMetric, ","), - "node_id": strings.Join(s.nodeId, ","), - "metric": strings.Join(s.metric, ","), - }) - } else if len(s.nodeId) > 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 { - path, err = uritemplates.Expand("/_nodes/{node_id}/stats/{metric}", map[string]string{ - "node_id": strings.Join(s.nodeId, ","), - "metric": strings.Join(s.metric, ","), - }) - } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 { - path, err = uritemplates.Expand("/_nodes/{node_id}/stats/_all/{index_metric}", map[string]string{ - "index_metric": strings.Join(s.indexMetric, ","), - "node_id": strings.Join(s.nodeId, ","), - }) - } else if len(s.nodeId) > 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 { - path, err = uritemplates.Expand("/_nodes/{node_id}/stats", map[string]string{ - "node_id": strings.Join(s.nodeId, ","), - }) - } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) > 0 { - path, err = uritemplates.Expand("/_nodes/stats/{metric}/{index_metric}", map[string]string{ - "index_metric": strings.Join(s.indexMetric, ","), - "metric": strings.Join(s.metric, ","), - }) - } else if len(s.nodeId) == 0 && len(s.metric) > 0 && len(s.indexMetric) == 0 { - path, err = uritemplates.Expand("/_nodes/stats/{metric}", map[string]string{ - "metric": strings.Join(s.metric, ","), - }) - } else if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) > 0 { - path, err = uritemplates.Expand("/_nodes/stats/_all/{index_metric}", map[string]string{ - "index_metric": strings.Join(s.indexMetric, ","), - }) - } else { // if len(s.nodeId) == 0 && len(s.metric) == 0 && len(s.indexMetric) == 0 { - path = "/_nodes/stats" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if len(s.completionFields) > 0 { - params.Set("completion_fields", strings.Join(s.completionFields, ",")) - } - if len(s.fielddataFields) > 0 { - params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if s.groups != nil { - params.Set("groups", fmt.Sprintf("%v", *s.groups)) - } - if s.level != "" { - params.Set("level", s.level) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if len(s.types) > 0 { - params.Set("types", strings.Join(s.types, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *NodesStatsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *NodesStatsService) Do(ctx context.Context) (*NodesStatsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(NodesStatsResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// NodesStatsResponse is the response of NodesStatsService.Do. -type NodesStatsResponse struct { - ClusterName string `json:"cluster_name"` - Nodes map[string]*NodesStatsNode `json:"nodes"` -} - -type NodesStatsNode struct { - // Timestamp when these stats we're gathered. - Timestamp int64 `json:"timestamp"` - // Name of the node, e.g. "Mister Fear" - Name string `json:"name"` - // TransportAddress, e.g. "127.0.0.1:9300" - TransportAddress string `json:"transport_address"` - // Host is the host name, e.g. "macbookair" - Host string `json:"host"` - // IP is an IP address, e.g. "192.168.1.2" - IP string `json:"ip"` - // Roles is a list of the roles of the node, e.g. master, data, ingest. - Roles []string `json:"roles"` - - // Attributes of the node. - Attributes map[string]interface{} `json:"attributes"` - - // Indices returns index information. - Indices *NodesStatsIndex `json:"indices"` - - // OS information, e.g. CPU and memory. - OS *NodesStatsNodeOS `json:"os"` - - // Process information, e.g. max file descriptors. - Process *NodesStatsNodeProcess `json:"process"` - - // JVM information, e.g. VM version. - JVM *NodesStatsNodeJVM `json:"jvm"` - - // ThreadPool information. - ThreadPool map[string]*NodesStatsNodeThreadPool `json:"thread_pool"` - - // FS returns information about the filesystem. - FS *NodesStatsNodeFS `json:"fs"` - - // Network information. - Transport *NodesStatsNodeTransport `json:"transport"` - - // HTTP information. - HTTP *NodesStatsNodeHTTP `json:"http"` - - // Breaker contains information about circuit breakers. - Breaker map[string]*NodesStatsBreaker `json:"breakers"` - - // ScriptStats information. - ScriptStats *NodesStatsScriptStats `json:"script"` - - // Discovery information. - Discovery *NodesStatsDiscovery `json:"discovery"` - - // Ingest information - Ingest *NodesStatsIngest `json:"ingest"` -} - -type NodesStatsIndex struct { - Docs *NodesStatsDocsStats `json:"docs"` - Store *NodesStatsStoreStats `json:"store"` - Indexing *NodesStatsIndexingStats `json:"indexing"` - Get *NodesStatsGetStats `json:"get"` - Search *NodesStatsSearchStats `json:"search"` - Merges *NodesStatsMergeStats `json:"merges"` - Refresh *NodesStatsRefreshStats `json:"refresh"` - Flush *NodesStatsFlushStats `json:"flush"` - Warmer *NodesStatsWarmerStats `json:"warmer"` - QueryCache *NodesStatsQueryCacheStats `json:"query_cache"` - Fielddata *NodesStatsFielddataStats `json:"fielddata"` - Percolate *NodesStatsPercolateStats `json:"percolate"` - Completion *NodesStatsCompletionStats `json:"completion"` - Segments *NodesStatsSegmentsStats `json:"segments"` - Translog *NodesStatsTranslogStats `json:"translog"` - Suggest *NodesStatsSuggestStats `json:"suggest"` - RequestCache *NodesStatsRequestCacheStats `json:"request_cache"` - Recovery NodesStatsRecoveryStats `json:"recovery"` - - Indices map[string]*NodesStatsIndex `json:"indices"` // for level=indices - Shards map[string]*NodesStatsIndex `json:"shards"` // for level=shards -} - -type NodesStatsDocsStats struct { - Count int64 `json:"count"` - Deleted int64 `json:"deleted"` -} - -type NodesStatsStoreStats struct { - Size string `json:"size"` - SizeInBytes int64 `json:"size_in_bytes"` -} - -type NodesStatsIndexingStats struct { - IndexTotal int64 `json:"index_total"` - IndexTime string `json:"index_time"` - IndexTimeInMillis int64 `json:"index_time_in_millis"` - IndexCurrent int64 `json:"index_current"` - IndexFailed int64 `json:"index_failed"` - DeleteTotal int64 `json:"delete_total"` - DeleteTime string `json:"delete_time"` - DeleteTimeInMillis int64 `json:"delete_time_in_millis"` - DeleteCurrent int64 `json:"delete_current"` - NoopUpdateTotal int64 `json:"noop_update_total"` - IsThrottled bool `json:"is_throttled"` - ThrottledTime string `json:"throttle_time"` // no typo, see https://github.com/elastic/elasticsearch/blob/ff99bc1d3f8a7ea72718872d214ec2097dfca276/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java#L244 - ThrottledTimeInMillis int64 `json:"throttle_time_in_millis"` - - Types map[string]*NodesStatsIndexingStats `json:"types"` // stats for individual types -} - -type NodesStatsGetStats struct { - Total int64 `json:"total"` - Time string `json:"get_time"` - TimeInMillis int64 `json:"time_in_millis"` - Exists int64 `json:"exists"` - ExistsTime string `json:"exists_time"` - ExistsTimeInMillis int64 `json:"exists_in_millis"` - Missing int64 `json:"missing"` - MissingTime string `json:"missing_time"` - MissingTimeInMillis int64 `json:"missing_in_millis"` - Current int64 `json:"current"` -} - -type NodesStatsSearchStats struct { - OpenContexts int64 `json:"open_contexts"` - QueryTotal int64 `json:"query_total"` - QueryTime string `json:"query_time"` - QueryTimeInMillis int64 `json:"query_time_in_millis"` - QueryCurrent int64 `json:"query_current"` - FetchTotal int64 `json:"fetch_total"` - FetchTime string `json:"fetch_time"` - FetchTimeInMillis int64 `json:"fetch_time_in_millis"` - FetchCurrent int64 `json:"fetch_current"` - ScrollTotal int64 `json:"scroll_total"` - ScrollTime string `json:"scroll_time"` - ScrollTimeInMillis int64 `json:"scroll_time_in_millis"` - ScrollCurrent int64 `json:"scroll_current"` - - Groups map[string]*NodesStatsSearchStats `json:"groups"` // stats for individual groups -} - -type NodesStatsMergeStats struct { - Current int64 `json:"current"` - CurrentDocs int64 `json:"current_docs"` - CurrentSize string `json:"current_size"` - CurrentSizeInBytes int64 `json:"current_size_in_bytes"` - Total int64 `json:"total"` - TotalTime string `json:"total_time"` - TotalTimeInMillis int64 `json:"total_time_in_millis"` - TotalDocs int64 `json:"total_docs"` - TotalSize string `json:"total_size"` - TotalSizeInBytes int64 `json:"total_size_in_bytes"` - TotalStoppedTime string `json:"total_stopped_time"` - TotalStoppedTimeInMillis int64 `json:"total_stopped_time_in_millis"` - TotalThrottledTime string `json:"total_throttled_time"` - TotalThrottledTimeInMillis int64 `json:"total_throttled_time_in_millis"` - TotalThrottleBytes string `json:"total_auto_throttle"` - TotalThrottleBytesInBytes int64 `json:"total_auto_throttle_in_bytes"` -} - -type NodesStatsRefreshStats struct { - Total int64 `json:"total"` - TotalTime string `json:"total_time"` - TotalTimeInMillis int64 `json:"total_time_in_millis"` -} - -type NodesStatsFlushStats struct { - Total int64 `json:"total"` - TotalTime string `json:"total_time"` - TotalTimeInMillis int64 `json:"total_time_in_millis"` -} - -type NodesStatsWarmerStats struct { - Current int64 `json:"current"` - Total int64 `json:"total"` - TotalTime string `json:"total_time"` - TotalTimeInMillis int64 `json:"total_time_in_millis"` -} - -type NodesStatsQueryCacheStats struct { - MemorySize string `json:"memory_size"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - TotalCount int64 `json:"total_count"` - HitCount int64 `json:"hit_count"` - MissCount int64 `json:"miss_count"` - CacheSize int64 `json:"cache_size"` - CacheCount int64 `json:"cache_count"` - Evictions int64 `json:"evictions"` -} - -type NodesStatsFielddataStats struct { - MemorySize string `json:"memory_size"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - Evictions int64 `json:"evictions"` - Fields map[string]struct { - MemorySize string `json:"memory_size"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - } `json:"fields"` -} - -type NodesStatsPercolateStats struct { - Total int64 `json:"total"` - Time string `json:"time"` - TimeInMillis int64 `json:"time_in_millis"` - Current int64 `json:"current"` - MemorySize string `json:"memory_size"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - Queries int64 `json:"queries"` -} - -type NodesStatsCompletionStats struct { - Size string `json:"size"` - SizeInBytes int64 `json:"size_in_bytes"` - Fields map[string]struct { - Size string `json:"size"` - SizeInBytes int64 `json:"size_in_bytes"` - } `json:"fields"` -} - -type NodesStatsSegmentsStats struct { - Count int64 `json:"count"` - Memory string `json:"memory"` - MemoryInBytes int64 `json:"memory_in_bytes"` - TermsMemory string `json:"terms_memory"` - TermsMemoryInBytes int64 `json:"terms_memory_in_bytes"` - StoredFieldsMemory string `json:"stored_fields_memory"` - StoredFieldsMemoryInBytes int64 `json:"stored_fields_memory_in_bytes"` - TermVectorsMemory string `json:"term_vectors_memory"` - TermVectorsMemoryInBytes int64 `json:"term_vectors_memory_in_bytes"` - NormsMemory string `json:"norms_memory"` - NormsMemoryInBytes int64 `json:"norms_memory_in_bytes"` - DocValuesMemory string `json:"doc_values_memory"` - DocValuesMemoryInBytes int64 `json:"doc_values_memory_in_bytes"` - IndexWriterMemory string `json:"index_writer_memory"` - IndexWriterMemoryInBytes int64 `json:"index_writer_memory_in_bytes"` - IndexWriterMaxMemory string `json:"index_writer_max_memory"` - IndexWriterMaxMemoryInBytes int64 `json:"index_writer_max_memory_in_bytes"` - VersionMapMemory string `json:"version_map_memory"` - VersionMapMemoryInBytes int64 `json:"version_map_memory_in_bytes"` - FixedBitSetMemory string `json:"fixed_bit_set"` // not a typo - FixedBitSetMemoryInBytes int64 `json:"fixed_bit_set_memory_in_bytes"` -} - -type NodesStatsTranslogStats struct { - Operations int64 `json:"operations"` - Size string `json:"size"` - SizeInBytes int64 `json:"size_in_bytes"` -} - -type NodesStatsSuggestStats struct { - Total int64 `json:"total"` - TotalTime string `json:"total_time"` - TotalTimeInMillis int64 `json:"total_time_in_millis"` - Current int64 `json:"current"` -} - -type NodesStatsRequestCacheStats struct { - MemorySize string `json:"memory_size"` - MemorySizeInBytes int64 `json:"memory_size_in_bytes"` - Evictions int64 `json:"evictions"` - HitCount int64 `json:"hit_count"` - MissCount int64 `json:"miss_count"` -} - -type NodesStatsRecoveryStats struct { - CurrentAsSource int `json:"current_as_source"` - CurrentAsTarget int `json:"current_as_target"` -} - -type NodesStatsNodeOS struct { - Timestamp int64 `json:"timestamp"` - CPU *NodesStatsNodeOSCPU `json:"cpu"` - Mem *NodesStatsNodeOSMem `json:"mem"` - Swap *NodesStatsNodeOSSwap `json:"swap"` -} - -type NodesStatsNodeOSCPU struct { - Percent int `json:"percent"` - LoadAverage map[string]float64 `json:"load_average"` // keys are: 1m, 5m, and 15m -} - -type NodesStatsNodeOSMem struct { - Total string `json:"total"` - TotalInBytes int64 `json:"total_in_bytes"` - Free string `json:"free"` - FreeInBytes int64 `json:"free_in_bytes"` - Used string `json:"used"` - UsedInBytes int64 `json:"used_in_bytes"` - FreePercent int `json:"free_percent"` - UsedPercent int `json:"used_percent"` -} - -type NodesStatsNodeOSSwap struct { - Total string `json:"total"` - TotalInBytes int64 `json:"total_in_bytes"` - Free string `json:"free"` - FreeInBytes int64 `json:"free_in_bytes"` - Used string `json:"used"` - UsedInBytes int64 `json:"used_in_bytes"` -} - -type NodesStatsNodeProcess struct { - Timestamp int64 `json:"timestamp"` - OpenFileDescriptors int64 `json:"open_file_descriptors"` - MaxFileDescriptors int64 `json:"max_file_descriptors"` - CPU struct { - Percent int `json:"percent"` - Total string `json:"total"` - TotalInMillis int64 `json:"total_in_millis"` - } `json:"cpu"` - Mem struct { - TotalVirtual string `json:"total_virtual"` - TotalVirtualInBytes int64 `json:"total_virtual_in_bytes"` - } `json:"mem"` -} - -type NodesStatsNodeJVM struct { - Timestamp int64 `json:"timestamp"` - Uptime string `json:"uptime"` - UptimeInMillis int64 `json:"uptime_in_millis"` - Mem *NodesStatsNodeJVMMem `json:"mem"` - Threads *NodesStatsNodeJVMThreads `json:"threads"` - GC *NodesStatsNodeJVMGC `json:"gc"` - BufferPools map[string]*NodesStatsNodeJVMBufferPool `json:"buffer_pools"` - Classes *NodesStatsNodeJVMClasses `json:"classes"` -} - -type NodesStatsNodeJVMMem struct { - HeapUsed string `json:"heap_used"` - HeapUsedInBytes int64 `json:"heap_used_in_bytes"` - HeapUsedPercent int `json:"heap_used_percent"` - HeapCommitted string `json:"heap_committed"` - HeapCommittedInBytes int64 `json:"heap_committed_in_bytes"` - HeapMax string `json:"heap_max"` - HeapMaxInBytes int64 `json:"heap_max_in_bytes"` - NonHeapUsed string `json:"non_heap_used"` - NonHeapUsedInBytes int64 `json:"non_heap_used_in_bytes"` - NonHeapCommitted string `json:"non_heap_committed"` - NonHeapCommittedInBytes int64 `json:"non_heap_committed_in_bytes"` - Pools map[string]struct { - Used string `json:"used"` - UsedInBytes int64 `json:"used_in_bytes"` - Max string `json:"max"` - MaxInBytes int64 `json:"max_in_bytes"` - PeakUsed string `json:"peak_used"` - PeakUsedInBytes int64 `json:"peak_used_in_bytes"` - PeakMax string `json:"peak_max"` - PeakMaxInBytes int64 `json:"peak_max_in_bytes"` - } `json:"pools"` -} - -type NodesStatsNodeJVMThreads struct { - Count int64 `json:"count"` - PeakCount int64 `json:"peak_count"` -} - -type NodesStatsNodeJVMGC struct { - Collectors map[string]*NodesStatsNodeJVMGCCollector `json:"collectors"` -} - -type NodesStatsNodeJVMGCCollector struct { - CollectionCount int64 `json:"collection_count"` - CollectionTime string `json:"collection_time"` - CollectionTimeInMillis int64 `json:"collection_time_in_millis"` -} - -type NodesStatsNodeJVMBufferPool struct { - Count int64 `json:"count"` - TotalCapacity string `json:"total_capacity"` - TotalCapacityInBytes int64 `json:"total_capacity_in_bytes"` -} - -type NodesStatsNodeJVMClasses struct { - CurrentLoadedCount int64 `json:"current_loaded_count"` - TotalLoadedCount int64 `json:"total_loaded_count"` - TotalUnloadedCount int64 `json:"total_unloaded_count"` -} - -type NodesStatsNodeThreadPool struct { - Threads int `json:"threads"` - Queue int `json:"queue"` - Active int `json:"active"` - Rejected int64 `json:"rejected"` - Largest int `json:"largest"` - Completed int64 `json:"completed"` -} - -type NodesStatsNodeFS struct { - Timestamp int64 `json:"timestamp"` - Total *NodesStatsNodeFSEntry `json:"total"` - Data []*NodesStatsNodeFSEntry `json:"data"` - IOStats *NodesStatsNodeFSIOStats `json:"io_stats"` -} - -type NodesStatsNodeFSEntry struct { - Path string `json:"path"` - Mount string `json:"mount"` - Type string `json:"type"` - Total string `json:"total"` - TotalInBytes int64 `json:"total_in_bytes"` - Free string `json:"free"` - FreeInBytes int64 `json:"free_in_bytes"` - Available string `json:"available"` - AvailableInBytes int64 `json:"available_in_bytes"` - Spins string `json:"spins"` -} - -type NodesStatsNodeFSIOStats struct { - Devices []*NodesStatsNodeFSIOStatsEntry `json:"devices"` - Total *NodesStatsNodeFSIOStatsEntry `json:"total"` -} - -type NodesStatsNodeFSIOStatsEntry struct { - DeviceName string `json:"device_name"` - Operations int64 `json:"operations"` - ReadOperations int64 `json:"read_operations"` - WriteOperations int64 `json:"write_operations"` - ReadKilobytes int64 `json:"read_kilobytes"` - WriteKilobytes int64 `json:"write_kilobytes"` -} - -type NodesStatsNodeTransport struct { - ServerOpen int `json:"server_open"` - RxCount int64 `json:"rx_count"` - RxSize string `json:"rx_size"` - RxSizeInBytes int64 `json:"rx_size_in_bytes"` - TxCount int64 `json:"tx_count"` - TxSize string `json:"tx_size"` - TxSizeInBytes int64 `json:"tx_size_in_bytes"` -} - -type NodesStatsNodeHTTP struct { - CurrentOpen int `json:"current_open"` - TotalOpened int `json:"total_opened"` -} - -type NodesStatsBreaker struct { - LimitSize string `json:"limit_size"` - LimitSizeInBytes int64 `json:"limit_size_in_bytes"` - EstimatedSize string `json:"estimated_size"` - EstimatedSizeInBytes int64 `json:"estimated_size_in_bytes"` - Overhead float64 `json:"overhead"` - Tripped int64 `json:"tripped"` -} - -type NodesStatsScriptStats struct { - Compilations int64 `json:"compilations"` - CacheEvictions int64 `json:"cache_evictions"` -} - -type NodesStatsDiscovery struct { - ClusterStateQueue *NodesStatsDiscoveryStats `json:"cluster_state_queue"` -} - -type NodesStatsDiscoveryStats struct { - Total int64 `json:"total"` - Pending int64 `json:"pending"` - Committed int64 `json:"committed"` -} - -type NodesStatsIngest struct { - Total *NodesStatsIngestStats `json:"total"` - Pipelines interface{} `json:"pipelines"` -} - -type NodesStatsIngestStats struct { - Count int64 `json:"count"` - Time string `json:"time"` - TimeInMillis int64 `json:"time_in_millis"` - Current int64 `json:"current"` - Failed int64 `json:"failed"` -} diff --git a/vendor/github.com/olivere/elastic/v7/ping.go b/vendor/github.com/olivere/elastic/v7/ping.go deleted file mode 100644 index 1ad7861..0000000 --- a/vendor/github.com/olivere/elastic/v7/ping.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// PingService checks if an Elasticsearch server on a given URL is alive. -// When asked for, it can also return various information about the -// Elasticsearch server, e.g. the Elasticsearch version number. -// -// Ping simply starts a HTTP GET request to the URL of the server. -// If the server responds with HTTP Status code 200 OK, the server is alive. -type PingService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - url string - timeout string - httpHeadOnly bool -} - -// PingResult is the result returned from querying the Elasticsearch server. -type PingResult struct { - Name string `json:"name"` - ClusterName string `json:"cluster_name"` - Version struct { - Number string `json:"number"` // e.g. "7.0.0" - BuildFlavor string `json:"build_flavor"` // e.g. "oss" or "default" - BuildType string `json:"build_type"` // e.g. "docker" - BuildHash string `json:"build_hash"` // e.g. "b7e28a7" - BuildDate string `json:"build_date"` // e.g. "2019-04-05T22:55:32.697037Z" - BuildSnapshot bool `json:"build_snapshot"` // e.g. false - LuceneVersion string `json:"lucene_version"` // e.g. "8.0.0" - MinimumWireCompatibilityVersion string `json:"minimum_wire_compatibility_version"` // e.g. "6.7.0" - MinimumIndexCompatibilityVersion string `json:"minimum_index_compatibility_version"` // e.g. "6.0.0-beta1" - } `json:"version"` - TagLine string `json:"tagline"` -} - -func NewPingService(client *Client) *PingService { - return &PingService{ - client: client, - url: DefaultURL, - httpHeadOnly: false, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *PingService) Pretty(pretty bool) *PingService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *PingService) Human(human bool) *PingService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *PingService) ErrorTrace(errorTrace bool) *PingService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *PingService) FilterPath(filterPath ...string) *PingService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *PingService) Header(name string, value string) *PingService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *PingService) Headers(headers http.Header) *PingService { - s.headers = headers - return s -} - -func (s *PingService) URL(url string) *PingService { - s.url = url - return s -} - -func (s *PingService) Timeout(timeout string) *PingService { - s.timeout = timeout - return s -} - -// HeadOnly makes the service to only return the status code in Do; -// the PingResult will be nil. -func (s *PingService) HttpHeadOnly(httpHeadOnly bool) *PingService { - s.httpHeadOnly = httpHeadOnly - return s -} - -// Do returns the PingResult, the HTTP status code of the Elasticsearch -// server, and an error. -func (s *PingService) Do(ctx context.Context) (*PingResult, int, error) { - s.client.mu.RLock() - basicAuth := s.client.basicAuth - basicAuthUsername := s.client.basicAuthUsername - basicAuthPassword := s.client.basicAuthPassword - defaultHeaders := s.client.headers - s.client.mu.RUnlock() - - url_ := strings.TrimSuffix(s.url, "/") + "/" - - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if len(params) > 0 { - url_ += "?" + params.Encode() - } - - var method string - if s.httpHeadOnly { - method = "HEAD" - } else { - method = "GET" - } - - // Notice: This service must NOT use PerformRequest! - req, err := NewRequest(method, url_) - if err != nil { - return nil, 0, err - } - if len(s.headers) > 0 { - for key, values := range s.headers { - for _, v := range values { - req.Header.Add(key, v) - } - } - } - if len(defaultHeaders) > 0 { - for key, values := range defaultHeaders { - for _, v := range values { - req.Header.Add(key, v) - } - } - } - - if basicAuth { - req.SetBasicAuth(basicAuthUsername, basicAuthPassword) - } - - res, err := s.client.c.Do((*http.Request)(req).WithContext(ctx)) - if err != nil { - return nil, 0, err - } - defer res.Body.Close() - - var ret *PingResult - if !s.httpHeadOnly { - ret = new(PingResult) - if err := json.NewDecoder(res.Body).Decode(ret); err != nil { - return nil, res.StatusCode, err - } - } - - return ret, res.StatusCode, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/plugins.go b/vendor/github.com/olivere/elastic/v7/plugins.go deleted file mode 100644 index 60bda75..0000000 --- a/vendor/github.com/olivere/elastic/v7/plugins.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "context" - -// HasPlugin indicates whether the cluster has the named plugin. -func (c *Client) HasPlugin(name string) (bool, error) { - plugins, err := c.Plugins() - if err != nil { - return false, nil - } - for _, plugin := range plugins { - if plugin == name { - return true, nil - } - } - return false, nil -} - -// Plugins returns the list of all registered plugins. -func (c *Client) Plugins() ([]string, error) { - stats, err := c.ClusterStats().Do(context.Background()) - if err != nil { - return nil, err - } - if stats == nil { - return nil, err - } - if stats.Nodes == nil { - return nil, err - } - var plugins []string - for _, plugin := range stats.Nodes.Plugins { - plugins = append(plugins, plugin.Name) - } - return plugins, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/query.go b/vendor/github.com/olivere/elastic/v7/query.go deleted file mode 100644 index ad01354..0000000 --- a/vendor/github.com/olivere/elastic/v7/query.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// Query represents the generic query interface. A query's sole purpose -// is to return the source of the query as a JSON-serializable object. -// Returning map[string]interface{} is the norm for queries. -type Query interface { - // Source returns the JSON-serializable query request. - Source() (interface{}, error) -} diff --git a/vendor/github.com/olivere/elastic/v7/reindex.go b/vendor/github.com/olivere/elastic/v7/reindex.go deleted file mode 100644 index 2bf74a6..0000000 --- a/vendor/github.com/olivere/elastic/v7/reindex.go +++ /dev/null @@ -1,745 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "errors" - "fmt" - "net/http" - "net/url" - "strings" -) - -// ReindexService is a method to copy documents from one index to another. -// It is documented at https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-reindex.html. -type ReindexService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - refresh string - timeout string - waitForActiveShards string - waitForCompletion *bool - requestsPerSecond *int - slices interface{} - body interface{} - source *ReindexSource - destination *ReindexDestination - conflicts string - size *int - script *Script -} - -// NewReindexService creates a new ReindexService. -func NewReindexService(client *Client) *ReindexService { - return &ReindexService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ReindexService) Pretty(pretty bool) *ReindexService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ReindexService) Human(human bool) *ReindexService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ReindexService) ErrorTrace(errorTrace bool) *ReindexService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ReindexService) FilterPath(filterPath ...string) *ReindexService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ReindexService) Header(name string, value string) *ReindexService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ReindexService) Headers(headers http.Header) *ReindexService { - s.headers = headers - return s -} - -// WaitForActiveShards sets the number of shard copies that must be active before -// proceeding with the reindex operation. Defaults to 1, meaning the primary shard only. -// Set to `all` for all shard copies, otherwise set to any non-negative value less than or -// equal to the total number of copies for the shard (number of replicas + 1). -func (s *ReindexService) WaitForActiveShards(waitForActiveShards string) *ReindexService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// RequestsPerSecond specifies the throttle to set on this request in sub-requests per second. -// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. -func (s *ReindexService) RequestsPerSecond(requestsPerSecond int) *ReindexService { - s.requestsPerSecond = &requestsPerSecond - return s -} - -// Slices specifies the number of slices this task should be divided into. Defaults to 1. -// It used to be a number, but can be set to "auto" as of 6.7. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-reindex.html#docs-reindex-slice -// for details. -func (s *ReindexService) Slices(slices interface{}) *ReindexService { - s.slices = slices - return s -} - -// Refresh indicates whether Elasticsearch should refresh the effected indexes -// immediately. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *ReindexService) Refresh(refresh string) *ReindexService { - s.refresh = refresh - return s -} - -// Timeout is the time each individual bulk request should wait for shards -// that are unavailable. -func (s *ReindexService) Timeout(timeout string) *ReindexService { - s.timeout = timeout - return s -} - -// WaitForCompletion indicates whether Elasticsearch should block until the -// reindex is complete. -func (s *ReindexService) WaitForCompletion(waitForCompletion bool) *ReindexService { - s.waitForCompletion = &waitForCompletion - return s -} - -// Source specifies the source of the reindexing process. -func (s *ReindexService) Source(source *ReindexSource) *ReindexService { - s.source = source - return s -} - -// SourceIndex specifies the source index of the reindexing process. -func (s *ReindexService) SourceIndex(index string) *ReindexService { - if s.source == nil { - s.source = NewReindexSource() - } - s.source = s.source.Index(index) - return s -} - -// Destination specifies the destination of the reindexing process. -func (s *ReindexService) Destination(destination *ReindexDestination) *ReindexService { - s.destination = destination - return s -} - -// DestinationIndex specifies the destination index of the reindexing process. -func (s *ReindexService) DestinationIndex(index string) *ReindexService { - if s.destination == nil { - s.destination = NewReindexDestination() - } - s.destination = s.destination.Index(index) - return s -} - -// DestinationIndexAndType specifies both the destination index and type -// of the reindexing process. -func (s *ReindexService) DestinationIndexAndType(index, typ string) *ReindexService { - if s.destination == nil { - s.destination = NewReindexDestination() - } - s.destination = s.destination.Index(index) - s.destination = s.destination.Type(typ) - return s -} - -// Conflicts indicates what to do when the process detects version conflicts. -// Possible values are "proceed" and "abort". -func (s *ReindexService) Conflicts(conflicts string) *ReindexService { - s.conflicts = conflicts - return s -} - -// AbortOnVersionConflict aborts the request on version conflicts. -// It is an alias to setting Conflicts("abort"). -func (s *ReindexService) AbortOnVersionConflict() *ReindexService { - s.conflicts = "abort" - return s -} - -// ProceedOnVersionConflict aborts the request on version conflicts. -// It is an alias to setting Conflicts("proceed"). -func (s *ReindexService) ProceedOnVersionConflict() *ReindexService { - s.conflicts = "proceed" - return s -} - -// Size sets an upper limit for the number of processed documents. -func (s *ReindexService) Size(size int) *ReindexService { - s.size = &size - return s -} - -// Script allows for modification of the documents as they are reindexed -// from source to destination. -func (s *ReindexService) Script(script *Script) *ReindexService { - s.script = script - return s -} - -// Body specifies the body of the request to send to Elasticsearch. -// It overrides settings specified with other setters, e.g. Query. -func (s *ReindexService) Body(body interface{}) *ReindexService { - s.body = body - return s -} - -// buildURL builds the URL for the operation. -func (s *ReindexService) buildURL() (string, url.Values, error) { - // Build URL path - path := "/_reindex" - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.requestsPerSecond != nil { - params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) - } - if s.slices != nil { - params.Set("slices", fmt.Sprintf("%v", s.slices)) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - if s.waitForCompletion != nil { - params.Set("wait_for_completion", fmt.Sprintf("%v", *s.waitForCompletion)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ReindexService) Validate() error { - var invalid []string - if s.body != nil { - return nil - } - if s.source == nil { - invalid = append(invalid, "Source") - } else { - if len(s.source.request.indices) == 0 { - invalid = append(invalid, "Source.Index") - } - } - if s.destination == nil { - invalid = append(invalid, "Destination") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// getBody returns the body part of the document request. -func (s *ReindexService) getBody() (interface{}, error) { - if s.body != nil { - return s.body, nil - } - - body := make(map[string]interface{}) - - if s.conflicts != "" { - body["conflicts"] = s.conflicts - } - if s.size != nil { - body["size"] = *s.size - } - if s.script != nil { - out, err := s.script.Source() - if err != nil { - return nil, err - } - body["script"] = out - } - - src, err := s.source.Source() - if err != nil { - return nil, err - } - body["source"] = src - - dst, err := s.destination.Source() - if err != nil { - return nil, err - } - body["dest"] = dst - - return body, nil -} - -// Do executes the operation. -func (s *ReindexService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - body, err := s.getBody() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(BulkIndexByScrollResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - ret.Header = res.Header - return ret, nil -} - -// DoAsync executes the reindexing operation asynchronously by starting a new task. -// Callers need to use the Task Management API to watch the outcome of the reindexing -// operation. -func (s *ReindexService) DoAsync(ctx context.Context) (*StartTaskResult, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // DoAsync only makes sense with WaitForCompletion set to false - if s.waitForCompletion != nil && *s.waitForCompletion { - return nil, fmt.Errorf("cannot start a task with WaitForCompletion set to true") - } - f := false - s.waitForCompletion = &f - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - body, err := s.getBody() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(StartTaskResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - ret.Header = res.Header - return ret, nil -} - -// -- Source of Reindex -- - -// ReindexSource specifies the source of a Reindex process. -type ReindexSource struct { - request *SearchRequest - remoteInfo *ReindexRemoteInfo -} - -// NewReindexSource creates a new ReindexSource. -func NewReindexSource() *ReindexSource { - return &ReindexSource{ - request: NewSearchRequest(), - } -} - -// Request specifies the search request used for source. -func (r *ReindexSource) Request(request *SearchRequest) *ReindexSource { - if request == nil { - r.request = NewSearchRequest() - } else { - r.request = request - } - return r -} - -// SearchType is the search operation type. Possible values are -// "query_then_fetch" and "dfs_query_then_fetch". -func (r *ReindexSource) SearchType(searchType string) *ReindexSource { - r.request = r.request.SearchType(searchType) - return r -} - -func (r *ReindexSource) SearchTypeDfsQueryThenFetch() *ReindexSource { - r.request = r.request.SearchType("dfs_query_then_fetch") - return r -} - -func (r *ReindexSource) SearchTypeQueryThenFetch() *ReindexSource { - r.request = r.request.SearchType("query_then_fetch") - return r -} - -func (r *ReindexSource) Index(indices ...string) *ReindexSource { - r.request = r.request.Index(indices...) - return r -} - -func (r *ReindexSource) Type(types ...string) *ReindexSource { - r.request = r.request.Type(types...) - return r -} - -func (r *ReindexSource) Preference(preference string) *ReindexSource { - r.request = r.request.Preference(preference) - return r -} - -func (r *ReindexSource) RequestCache(requestCache bool) *ReindexSource { - r.request = r.request.RequestCache(requestCache) - return r -} - -func (r *ReindexSource) Scroll(scroll string) *ReindexSource { - r.request = r.request.Scroll(scroll) - return r -} - -func (r *ReindexSource) Query(query Query) *ReindexSource { - r.request = r.request.Query(query) - return r -} - -// Sort adds a sort order. -func (r *ReindexSource) Sort(field string, ascending bool) *ReindexSource { - r.request = r.request.Sort(field, ascending) - return r -} - -// SortWithInfo adds a sort order. -func (r *ReindexSource) SortWithInfo(info SortInfo) *ReindexSource { - r.request = r.request.SortWithInfo(info) - return r -} - -// SortBy adds a sort order. -func (r *ReindexSource) SortBy(sorter ...Sorter) *ReindexSource { - r.request = r.request.SortBy(sorter...) - return r -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (r *ReindexSource) FetchSource(fetchSource bool) *ReindexSource { - r.request = r.request.FetchSource(fetchSource) - return r -} - -// FetchSourceIncludeExclude specifies that _source should be returned -// with each hit, where "include" and "exclude" serve as a simple wildcard -// matcher that gets applied to its fields -// (e.g. include := []string{"obj1.*","obj2.*"}, exclude := []string{"description.*"}). -func (r *ReindexSource) FetchSourceIncludeExclude(include, exclude []string) *ReindexSource { - r.request = r.request.FetchSourceIncludeExclude(include, exclude) - return r -} - -// FetchSourceContext indicates how the _source should be fetched. -func (r *ReindexSource) FetchSourceContext(fsc *FetchSourceContext) *ReindexSource { - r.request = r.request.FetchSourceContext(fsc) - return r -} - -// RemoteInfo sets up reindexing from a remote cluster. -func (r *ReindexSource) RemoteInfo(ri *ReindexRemoteInfo) *ReindexSource { - r.remoteInfo = ri - return r -} - -// Source returns a serializable JSON request for the request. -func (r *ReindexSource) Source() (interface{}, error) { - src, err := r.request.sourceAsMap() - if err != nil { - return nil, err - } - source, ok := src.(map[string]interface{}) - if !ok { - return nil, errors.New("unable to use SearchRequest as map[string]interface{}") - } - - switch len(r.request.indices) { - case 1: - source["index"] = r.request.indices[0] - default: - source["index"] = r.request.indices - } - switch len(r.request.types) { - case 0: - case 1: - source["type"] = r.request.types[0] - default: - source["type"] = r.request.types - } - if r.remoteInfo != nil { - src, err := r.remoteInfo.Source() - if err != nil { - return nil, err - } - source["remote"] = src - } - return source, nil -} - -// ReindexRemoteInfo contains information for reindexing from a remote cluster. -type ReindexRemoteInfo struct { - host string - username string - password string - socketTimeout string // e.g. "1m" or "30s" - connectTimeout string // e.g. "1m" or "30s" -} - -// NewReindexRemoteInfo creates a new ReindexRemoteInfo. -func NewReindexRemoteInfo() *ReindexRemoteInfo { - return &ReindexRemoteInfo{} -} - -// Host sets the host information of the remote cluster. -// It must be of the form "http(s)://:" -func (ri *ReindexRemoteInfo) Host(host string) *ReindexRemoteInfo { - ri.host = host - return ri -} - -// Username sets the username to authenticate with the remote cluster. -func (ri *ReindexRemoteInfo) Username(username string) *ReindexRemoteInfo { - ri.username = username - return ri -} - -// Password sets the password to authenticate with the remote cluster. -func (ri *ReindexRemoteInfo) Password(password string) *ReindexRemoteInfo { - ri.password = password - return ri -} - -// SocketTimeout sets the socket timeout to connect with the remote cluster. -// Use ES compatible values like e.g. "30s" or "1m". -func (ri *ReindexRemoteInfo) SocketTimeout(timeout string) *ReindexRemoteInfo { - ri.socketTimeout = timeout - return ri -} - -// ConnectTimeout sets the connection timeout to connect with the remote cluster. -// Use ES compatible values like e.g. "30s" or "1m". -func (ri *ReindexRemoteInfo) ConnectTimeout(timeout string) *ReindexRemoteInfo { - ri.connectTimeout = timeout - return ri -} - -// Source returns the serializable JSON data for the request. -func (ri *ReindexRemoteInfo) Source() (interface{}, error) { - res := make(map[string]interface{}) - res["host"] = ri.host - if len(ri.username) > 0 { - res["username"] = ri.username - } - if len(ri.password) > 0 { - res["password"] = ri.password - } - if len(ri.socketTimeout) > 0 { - res["socket_timeout"] = ri.socketTimeout - } - if len(ri.connectTimeout) > 0 { - res["connect_timeout"] = ri.connectTimeout - } - return res, nil -} - -// -- Destination of Reindex -- - -// ReindexDestination is the destination of a Reindex API call. -// It is basically the meta data of a BulkIndexRequest. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-reindex.html -// for details. -type ReindexDestination struct { - index string - typ string - routing string - parent string - opType string - version int64 // default is MATCH_ANY - versionType string // default is "internal" - pipeline string -} - -// NewReindexDestination returns a new ReindexDestination. -func NewReindexDestination() *ReindexDestination { - return &ReindexDestination{} -} - -// Index specifies name of the Elasticsearch index to use as the destination -// of a reindexing process. -func (r *ReindexDestination) Index(index string) *ReindexDestination { - r.index = index - return r -} - -// Type specifies the Elasticsearch type to use for reindexing. -func (r *ReindexDestination) Type(typ string) *ReindexDestination { - r.typ = typ - return r -} - -// Routing specifies a routing value for the reindexing request. -// It can be "keep", "discard", or start with "=". The latter specifies -// the routing on the bulk request. -func (r *ReindexDestination) Routing(routing string) *ReindexDestination { - r.routing = routing - return r -} - -// Keep sets the routing on the bulk request sent for each match to the routing -// of the match (the default). -func (r *ReindexDestination) Keep() *ReindexDestination { - r.routing = "keep" - return r -} - -// Discard sets the routing on the bulk request sent for each match to null. -func (r *ReindexDestination) Discard() *ReindexDestination { - r.routing = "discard" - return r -} - -// Parent specifies the identifier of the parent document (if available). -func (r *ReindexDestination) Parent(parent string) *ReindexDestination { - r.parent = parent - return r -} - -// OpType specifies if this request should follow create-only or upsert -// behavior. This follows the OpType of the standard document index API. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-index_.html#operation-type -// for details. -func (r *ReindexDestination) OpType(opType string) *ReindexDestination { - r.opType = opType - return r -} - -// Version indicates the version of the document as part of an optimistic -// concurrency model. -func (r *ReindexDestination) Version(version int64) *ReindexDestination { - r.version = version - return r -} - -// VersionType specifies how versions are created. -func (r *ReindexDestination) VersionType(versionType string) *ReindexDestination { - r.versionType = versionType - return r -} - -// Pipeline specifies the pipeline to use for reindexing. -func (r *ReindexDestination) Pipeline(pipeline string) *ReindexDestination { - r.pipeline = pipeline - return r -} - -// Source returns a serializable JSON request for the request. -func (r *ReindexDestination) Source() (interface{}, error) { - source := make(map[string]interface{}) - if r.index != "" { - source["index"] = r.index - } - if r.typ != "" { - source["type"] = r.typ - } - if r.routing != "" { - source["routing"] = r.routing - } - if r.opType != "" { - source["op_type"] = r.opType - } - if r.parent != "" { - source["parent"] = r.parent - } - if r.version > 0 { - source["version"] = r.version - } - if r.versionType != "" { - source["version_type"] = r.versionType - } - if r.pipeline != "" { - source["pipeline"] = r.pipeline - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/request.go b/vendor/github.com/olivere/elastic/v7/request.go deleted file mode 100644 index 288c837..0000000 --- a/vendor/github.com/olivere/elastic/v7/request.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "compress/gzip" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "runtime" - "strings" -) - -// Elasticsearch-specific HTTP request -type Request http.Request - -// NewRequest is a http.Request and adds features such as encoding the body. -func NewRequest(method, url string) (*Request, error) { - req, err := http.NewRequest(method, url, nil) - if err != nil { - return nil, err - } - req.Header.Add("User-Agent", "elastic/"+Version+" ("+runtime.GOOS+"-"+runtime.GOARCH+")") - req.Header.Add("Accept", "application/json") - req.Header.Set("Content-Type", "application/json") - return (*Request)(req), nil -} - -// SetBasicAuth wraps http.Request's SetBasicAuth. -func (r *Request) SetBasicAuth(username, password string) { - ((*http.Request)(r)).SetBasicAuth(username, password) -} - -// SetBody encodes the body in the request. You may pass a flag to -// compress the request via gzip. -func (r *Request) SetBody(body interface{}, gzipCompress bool) error { - switch b := body.(type) { - case string: - if gzipCompress { - return r.setBodyGzip(b) - } - return r.setBodyString(b) - default: - if gzipCompress { - return r.setBodyGzip(body) - } - return r.setBodyJson(body) - } -} - -// setBodyJson encodes the body as a struct to be marshaled via json.Marshal. -func (r *Request) setBodyJson(data interface{}) error { - body, err := json.Marshal(data) - if err != nil { - return err - } - r.Header.Set("Content-Type", "application/json") - r.setBodyReader(bytes.NewReader(body)) - return nil -} - -// setBodyString encodes the body as a string. -func (r *Request) setBodyString(body string) error { - return r.setBodyReader(strings.NewReader(body)) -} - -// setBodyGzip gzip's the body. It accepts both strings and structs as body. -// The latter will be encoded via json.Marshal. -func (r *Request) setBodyGzip(body interface{}) error { - switch b := body.(type) { - case string: - buf := new(bytes.Buffer) - w := gzip.NewWriter(buf) - if _, err := w.Write([]byte(b)); err != nil { - return err - } - if err := w.Close(); err != nil { - return err - } - r.Header.Add("Content-Encoding", "gzip") - r.Header.Add("Vary", "Accept-Encoding") - return r.setBodyReader(bytes.NewReader(buf.Bytes())) - default: - data, err := json.Marshal(b) - if err != nil { - return err - } - buf := new(bytes.Buffer) - w := gzip.NewWriter(buf) - if _, err := w.Write(data); err != nil { - return err - } - if err := w.Close(); err != nil { - return err - } - r.Header.Add("Content-Encoding", "gzip") - r.Header.Add("Vary", "Accept-Encoding") - r.Header.Set("Content-Type", "application/json") - return r.setBodyReader(bytes.NewReader(buf.Bytes())) - } -} - -// setBodyReader writes the body from an io.Reader. -func (r *Request) setBodyReader(body io.Reader) error { - rc, ok := body.(io.ReadCloser) - if !ok && body != nil { - rc = ioutil.NopCloser(body) - } - r.Body = rc - if body != nil { - switch v := body.(type) { - case *strings.Reader: - r.ContentLength = int64(v.Len()) - case *bytes.Buffer: - r.ContentLength = int64(v.Len()) - } - } - return nil -} diff --git a/vendor/github.com/olivere/elastic/v7/rescore.go b/vendor/github.com/olivere/elastic/v7/rescore.go deleted file mode 100644 index 9b7eaee..0000000 --- a/vendor/github.com/olivere/elastic/v7/rescore.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -type Rescore struct { - rescorer Rescorer - windowSize *int - defaultRescoreWindowSize *int -} - -func NewRescore() *Rescore { - return &Rescore{} -} - -func (r *Rescore) WindowSize(windowSize int) *Rescore { - r.windowSize = &windowSize - return r -} - -func (r *Rescore) IsEmpty() bool { - return r.rescorer == nil -} - -func (r *Rescore) Rescorer(rescorer Rescorer) *Rescore { - r.rescorer = rescorer - return r -} - -func (r *Rescore) Source() (interface{}, error) { - source := make(map[string]interface{}) - if r.windowSize != nil { - source["window_size"] = *r.windowSize - } else if r.defaultRescoreWindowSize != nil { - source["window_size"] = *r.defaultRescoreWindowSize - } - rescorerSrc, err := r.rescorer.Source() - if err != nil { - return nil, err - } - source[r.rescorer.Name()] = rescorerSrc - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/rescorer.go b/vendor/github.com/olivere/elastic/v7/rescorer.go deleted file mode 100644 index ccd4bb8..0000000 --- a/vendor/github.com/olivere/elastic/v7/rescorer.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -type Rescorer interface { - Name() string - Source() (interface{}, error) -} - -// -- Query Rescorer -- - -type QueryRescorer struct { - query Query - rescoreQueryWeight *float64 - queryWeight *float64 - scoreMode string -} - -func NewQueryRescorer(query Query) *QueryRescorer { - return &QueryRescorer{ - query: query, - } -} - -func (r *QueryRescorer) Name() string { - return "query" -} - -func (r *QueryRescorer) RescoreQueryWeight(rescoreQueryWeight float64) *QueryRescorer { - r.rescoreQueryWeight = &rescoreQueryWeight - return r -} - -func (r *QueryRescorer) QueryWeight(queryWeight float64) *QueryRescorer { - r.queryWeight = &queryWeight - return r -} - -func (r *QueryRescorer) ScoreMode(scoreMode string) *QueryRescorer { - r.scoreMode = scoreMode - return r -} - -func (r *QueryRescorer) Source() (interface{}, error) { - rescoreQuery, err := r.query.Source() - if err != nil { - return nil, err - } - - source := make(map[string]interface{}) - source["rescore_query"] = rescoreQuery - if r.queryWeight != nil { - source["query_weight"] = *r.queryWeight - } - if r.rescoreQueryWeight != nil { - source["rescore_query_weight"] = *r.rescoreQueryWeight - } - if r.scoreMode != "" { - source["score_mode"] = r.scoreMode - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/response.go b/vendor/github.com/olivere/elastic/v7/response.go deleted file mode 100644 index 200ed3d..0000000 --- a/vendor/github.com/olivere/elastic/v7/response.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" -) - -var ( - // ErrResponseSize is raised if a response body exceeds the given max body size. - ErrResponseSize = errors.New("elastic: response size too large") -) - -// Response represents a response from Elasticsearch. -type Response struct { - // StatusCode is the HTTP status code, e.g. 200. - StatusCode int - // Header is the HTTP header from the HTTP response. - // Keys in the map are canonicalized (see http.CanonicalHeaderKey). - Header http.Header - // Body is the deserialized response body. - Body json.RawMessage - // DeprecationWarnings lists all deprecation warnings returned from - // Elasticsearch. - DeprecationWarnings []string -} - -// newResponse creates a new response from the HTTP response. -func (c *Client) newResponse(res *http.Response, maxBodySize int64) (*Response, error) { - r := &Response{ - StatusCode: res.StatusCode, - Header: res.Header, - DeprecationWarnings: res.Header["Warning"], - } - if res.Body != nil { - body := io.Reader(res.Body) - if maxBodySize > 0 { - if res.ContentLength > maxBodySize { - return nil, ErrResponseSize - } - body = io.LimitReader(body, maxBodySize+1) - } - slurp, err := ioutil.ReadAll(body) - if err != nil { - return nil, err - } - if maxBodySize > 0 && int64(len(slurp)) > maxBodySize { - return nil, ErrResponseSize - } - // HEAD requests return a body but no content - if len(slurp) > 0 { - r.Body = json.RawMessage(slurp) - } - } - return r, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/retrier.go b/vendor/github.com/olivere/elastic/v7/retrier.go deleted file mode 100644 index e05ca66..0000000 --- a/vendor/github.com/olivere/elastic/v7/retrier.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "net/http" - "time" -) - -// RetrierFunc specifies the signature of a Retry function, and is an adapter -// to allow the use of ordinary Retry functions. If f is a function with the -// appropriate signature, RetrierFunc(f) is a Retrier that calls f. -type RetrierFunc func(context.Context, int, *http.Request, *http.Response, error) (time.Duration, bool, error) - -// Retry calls f. -func (f RetrierFunc) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) { - return f(ctx, retry, req, resp, err) -} - -// Retrier decides whether to retry a failed HTTP request with Elasticsearch. -type Retrier interface { - // Retry is called when a request has failed. It decides whether to retry - // the call, how long to wait for the next call, or whether to return an - // error (which will be returned to the service that started the HTTP - // request in the first place). - // - // Callers may also use this to inspect the HTTP request/response and - // the error that happened. Additional data can be passed through via - // the context. - Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) -} - -// -- StopRetrier -- - -// StopRetrier is an implementation that does no retries. -type StopRetrier struct { -} - -// NewStopRetrier returns a retrier that does no retries. -func NewStopRetrier() *StopRetrier { - return &StopRetrier{} -} - -// Retry does not retry. -func (r *StopRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) { - return 0, false, nil -} - -// -- BackoffRetrier -- - -// BackoffRetrier is an implementation that does nothing but return nil on Retry. -type BackoffRetrier struct { - backoff Backoff -} - -// NewBackoffRetrier returns a retrier that uses the given backoff strategy. -func NewBackoffRetrier(backoff Backoff) *BackoffRetrier { - return &BackoffRetrier{backoff: backoff} -} - -// Retry calls into the backoff strategy and its wait interval. -func (r *BackoffRetrier) Retry(ctx context.Context, retry int, req *http.Request, resp *http.Response, err error) (time.Duration, bool, error) { - wait, goahead := r.backoff.Next(retry) - return wait, goahead, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/retry.go b/vendor/github.com/olivere/elastic/v7/retry.go deleted file mode 100644 index 3571a3b..0000000 --- a/vendor/github.com/olivere/elastic/v7/retry.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -// This file is based on code (c) 2014 Cenk Altı and governed by the MIT license. -// See https://github.com/cenkalti/backoff for original source. - -package elastic - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives error returned -// from an operation. -// -// Notice that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error) - -// Retry the function f until it does not return error or BackOff stops. -// f is guaranteed to be run at least once. -// It is the caller's responsibility to reset b after Retry returns. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b Backoff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b Backoff, notify Notify) error { - var err error - var wait time.Duration - var retry bool - var n int - - for { - if err = operation(); err == nil { - return nil - } - - n++ - wait, retry = b.Next(n) - if !retry { - return err - } - - if notify != nil { - notify(err) - } - - time.Sleep(wait) - } -} diff --git a/vendor/github.com/olivere/elastic/v7/run-es.sh b/vendor/github.com/olivere/elastic/v7/run-es.sh deleted file mode 100644 index 8b60fbc..0000000 --- a/vendor/github.com/olivere/elastic/v7/run-es.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -VERSION=${VERSION:=6.4.0} -docker run --rm -p 9200:9200 -e "http.host=0.0.0.0" -e "transport.host=127.0.0.1" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms1g -Xmx1g" docker.elastic.co/elasticsearch/elasticsearch-oss:$VERSION elasticsearch -Enetwork.host=_local_,_site_ -Enetwork.publish_host=_local_ diff --git a/vendor/github.com/olivere/elastic/v7/run-tests.sh b/vendor/github.com/olivere/elastic/v7/run-tests.sh deleted file mode 100644 index 1204ad3..0000000 --- a/vendor/github.com/olivere/elastic/v7/run-tests.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -go test . ./aws/... ./config/... ./trace/... ./uritemplates/... diff --git a/vendor/github.com/olivere/elastic/v7/script.go b/vendor/github.com/olivere/elastic/v7/script.go deleted file mode 100644 index ce3bd83..0000000 --- a/vendor/github.com/olivere/elastic/v7/script.go +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "errors" - "fmt" - "strings" -) - -// Script holds all the parameters necessary to compile or find in cache -// and then execute a script. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html -// for details of scripting. -type Script struct { - script string - typ string - lang string - params map[string]interface{} -} - -// NewScript creates and initializes a new Script. By default, it is of -// type "inline". Use NewScriptStored for a stored script (where type is "id"). -func NewScript(script string) *Script { - return &Script{ - script: script, - typ: "inline", - params: make(map[string]interface{}), - } -} - -// NewScriptInline creates and initializes a new inline script, i.e. code. -func NewScriptInline(script string) *Script { - return NewScript(script).Type("inline") -} - -// NewScriptStored creates and initializes a new stored script. -func NewScriptStored(script string) *Script { - return NewScript(script).Type("id") -} - -// Script is either the cache key of the script to be compiled/executed -// or the actual script source code for inline scripts. For indexed -// scripts this is the id used in the request. For file scripts this is -// the file name. -func (s *Script) Script(script string) *Script { - s.script = script - return s -} - -// Type sets the type of script: "inline" or "id". -func (s *Script) Type(typ string) *Script { - s.typ = typ - return s -} - -// Lang sets the language of the script. The default scripting language -// is Painless ("painless"). -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html -// for details. -func (s *Script) Lang(lang string) *Script { - s.lang = lang - return s -} - -// Param adds a key/value pair to the parameters that this script will be executed with. -func (s *Script) Param(name string, value interface{}) *Script { - if s.params == nil { - s.params = make(map[string]interface{}) - } - s.params[name] = value - return s -} - -// Params sets the map of parameters this script will be executed with. -func (s *Script) Params(params map[string]interface{}) *Script { - s.params = params - return s -} - -// Source returns the JSON serializable data for this Script. -func (s *Script) Source() (interface{}, error) { - if s.typ == "" && s.lang == "" && len(s.params) == 0 { - return s.script, nil - } - source := make(map[string]interface{}) - // Beginning with 6.0, the type can only be "source" or "id" - if s.typ == "" || s.typ == "inline" { - src, err := s.rawScriptSource(s.script) - if err != nil { - return nil, err - } - source["source"] = src - } else { - source["id"] = s.script - } - if s.lang != "" { - source["lang"] = s.lang - } - if len(s.params) > 0 { - source["params"] = s.params - } - return source, nil -} - -// rawScriptSource returns an embeddable script. If it uses a short -// script form, e.g. "ctx._source.likes++" (without the quotes), it -// is quoted. Otherwise it returns the raw script that will be directly -// embedded into the JSON data. -func (s *Script) rawScriptSource(script string) (interface{}, error) { - v := strings.TrimSpace(script) - if !strings.HasPrefix(v, "{") && !strings.HasPrefix(v, `"`) { - v = fmt.Sprintf("%q", v) - } - raw := json.RawMessage(v) - return &raw, nil -} - -// -- Script Field -- - -// ScriptField is a single script field. -type ScriptField struct { - FieldName string // name of the field - - script *Script - ignoreFailure *bool // used in e.g. ScriptSource -} - -// NewScriptField creates and initializes a new ScriptField. -func NewScriptField(fieldName string, script *Script) *ScriptField { - return &ScriptField{FieldName: fieldName, script: script} -} - -// IgnoreFailure indicates whether to ignore failures. It is used -// in e.g. ScriptSource. -func (f *ScriptField) IgnoreFailure(ignore bool) *ScriptField { - f.ignoreFailure = &ignore - return f -} - -// Source returns the serializable JSON for the ScriptField. -func (f *ScriptField) Source() (interface{}, error) { - if f.script == nil { - return nil, errors.New("ScriptField expects script") - } - source := make(map[string]interface{}) - src, err := f.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - if v := f.ignoreFailure; v != nil { - source["ignore_failure"] = *v - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/script_delete.go b/vendor/github.com/olivere/elastic/v7/script_delete.go deleted file mode 100644 index dc68336..0000000 --- a/vendor/github.com/olivere/elastic/v7/script_delete.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// DeleteScriptService removes a stored script in Elasticsearch. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html -// for details. -type DeleteScriptService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - timeout string - masterTimeout string -} - -// NewDeleteScriptService creates a new DeleteScriptService. -func NewDeleteScriptService(client *Client) *DeleteScriptService { - return &DeleteScriptService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *DeleteScriptService) Pretty(pretty bool) *DeleteScriptService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *DeleteScriptService) Human(human bool) *DeleteScriptService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *DeleteScriptService) ErrorTrace(errorTrace bool) *DeleteScriptService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *DeleteScriptService) FilterPath(filterPath ...string) *DeleteScriptService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *DeleteScriptService) Header(name string, value string) *DeleteScriptService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *DeleteScriptService) Headers(headers http.Header) *DeleteScriptService { - s.headers = headers - return s -} - -// Id is the script ID. -func (s *DeleteScriptService) Id(id string) *DeleteScriptService { - s.id = id - return s -} - -// Timeout is an explicit operation timeout. -func (s *DeleteScriptService) Timeout(timeout string) *DeleteScriptService { - s.timeout = timeout - return s -} - -// MasterTimeout is the timeout for connecting to master. -func (s *DeleteScriptService) MasterTimeout(masterTimeout string) *DeleteScriptService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *DeleteScriptService) buildURL() (string, string, url.Values, error) { - var ( - err error - method = "DELETE" - path string - ) - - path, err = uritemplates.Expand("/_scripts/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timestamp", s.masterTimeout) - } - return method, path, params, nil -} - -// Validate checks if the operation is valid. -func (s *DeleteScriptService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *DeleteScriptService) Do(ctx context.Context) (*DeleteScriptResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - method, path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: method, - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(DeleteScriptResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// DeleteScriptResponse is the result of deleting a stored script -// in Elasticsearch. -type DeleteScriptResponse struct { - AcknowledgedResponse -} diff --git a/vendor/github.com/olivere/elastic/v7/script_get.go b/vendor/github.com/olivere/elastic/v7/script_get.go deleted file mode 100644 index 81cedac..0000000 --- a/vendor/github.com/olivere/elastic/v7/script_get.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// GetScriptService reads a stored script in Elasticsearch. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html -// for details. -type GetScriptService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string -} - -// NewGetScriptService creates a new GetScriptService. -func NewGetScriptService(client *Client) *GetScriptService { - return &GetScriptService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *GetScriptService) Pretty(pretty bool) *GetScriptService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *GetScriptService) Human(human bool) *GetScriptService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *GetScriptService) ErrorTrace(errorTrace bool) *GetScriptService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *GetScriptService) FilterPath(filterPath ...string) *GetScriptService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *GetScriptService) Header(name string, value string) *GetScriptService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *GetScriptService) Headers(headers http.Header) *GetScriptService { - s.headers = headers - return s -} - -// Id is the script ID. -func (s *GetScriptService) Id(id string) *GetScriptService { - s.id = id - return s -} - -// buildURL builds the URL for the operation. -func (s *GetScriptService) buildURL() (string, string, url.Values, error) { - var ( - err error - method = "GET" - path string - ) - - path, err = uritemplates.Expand("/_scripts/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return method, path, params, nil -} - -// Validate checks if the operation is valid. -func (s *GetScriptService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *GetScriptService) Do(ctx context.Context) (*GetScriptResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - method, path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: method, - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(GetScriptResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// GetScriptResponse is the result of getting a stored script -// in Elasticsearch. -type GetScriptResponse struct { - Id string `json:"_id"` - Found bool `json:"found"` - Script json.RawMessage `json:"script"` -} diff --git a/vendor/github.com/olivere/elastic/v7/script_put.go b/vendor/github.com/olivere/elastic/v7/script_put.go deleted file mode 100644 index f8b831a..0000000 --- a/vendor/github.com/olivere/elastic/v7/script_put.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// PutScriptService adds or updates a stored script in Elasticsearch. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html -// for details. -type PutScriptService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - context string - timeout string - masterTimeout string - bodyJson interface{} - bodyString string -} - -// NewPutScriptService creates a new PutScriptService. -func NewPutScriptService(client *Client) *PutScriptService { - return &PutScriptService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *PutScriptService) Pretty(pretty bool) *PutScriptService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *PutScriptService) Human(human bool) *PutScriptService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *PutScriptService) ErrorTrace(errorTrace bool) *PutScriptService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *PutScriptService) FilterPath(filterPath ...string) *PutScriptService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *PutScriptService) Header(name string, value string) *PutScriptService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *PutScriptService) Headers(headers http.Header) *PutScriptService { - s.headers = headers - return s -} - -// Id is the script ID. -func (s *PutScriptService) Id(id string) *PutScriptService { - s.id = id - return s -} - -// Context specifies the script context (optional). -func (s *PutScriptService) Context(context string) *PutScriptService { - s.context = context - return s -} - -// Timeout is an explicit operation timeout. -func (s *PutScriptService) Timeout(timeout string) *PutScriptService { - s.timeout = timeout - return s -} - -// MasterTimeout is the timeout for connecting to master. -func (s *PutScriptService) MasterTimeout(masterTimeout string) *PutScriptService { - s.masterTimeout = masterTimeout - return s -} - -// BodyJson is the document as a serializable JSON interface. -func (s *PutScriptService) BodyJson(body interface{}) *PutScriptService { - s.bodyJson = body - return s -} - -// BodyString is the document encoded as a string. -func (s *PutScriptService) BodyString(body string) *PutScriptService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *PutScriptService) buildURL() (string, string, url.Values, error) { - var ( - err error - method = "PUT" - path string - ) - - if s.context != "" { - path, err = uritemplates.Expand("/_scripts/{id}/{context}", map[string]string{ - "id": s.id, - "context": s.context, - }) - } else { - path, err = uritemplates.Expand("/_scripts/{id}", map[string]string{ - "id": s.id, - }) - } - if err != nil { - return "", "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timestamp", s.masterTimeout) - } - return method, path, params, nil -} - -// Validate checks if the operation is valid. -func (s *PutScriptService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *PutScriptService) Do(ctx context.Context) (*PutScriptResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - method, path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: method, - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(PutScriptResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// PutScriptResponse is the result of saving a stored script -// in Elasticsearch. -type PutScriptResponse struct { - AcknowledgedResponse -} diff --git a/vendor/github.com/olivere/elastic/v7/scroll.go b/vendor/github.com/olivere/elastic/v7/scroll.go deleted file mode 100644 index 3819d91..0000000 --- a/vendor/github.com/olivere/elastic/v7/scroll.go +++ /dev/null @@ -1,621 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "io" - "net/http" - "net/url" - "strings" - "sync" - - "github.com/olivere/elastic/v7/uritemplates" -) - -const ( - // DefaultScrollKeepAlive is the default time a scroll cursor will be kept alive. - DefaultScrollKeepAlive = "5m" -) - -// ScrollService iterates over pages of search results from Elasticsearch. -type ScrollService struct { - client *Client - retrier Retrier - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - indices []string - types []string - keepAlive string - body interface{} - ss *SearchSource - size *int - routing string - preference string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - maxResponseSize int64 - - mu sync.RWMutex - scrollId string -} - -// NewScrollService initializes and returns a new ScrollService. -func NewScrollService(client *Client) *ScrollService { - builder := &ScrollService{ - client: client, - ss: NewSearchSource(), - keepAlive: DefaultScrollKeepAlive, - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ScrollService) Pretty(pretty bool) *ScrollService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ScrollService) Human(human bool) *ScrollService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ScrollService) ErrorTrace(errorTrace bool) *ScrollService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ScrollService) FilterPath(filterPath ...string) *ScrollService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ScrollService) Header(name string, value string) *ScrollService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ScrollService) Headers(headers http.Header) *ScrollService { - s.headers = headers - return s -} - -// Retrier allows to set specific retry logic for this ScrollService. -// If not specified, it will use the client's default retrier. -func (s *ScrollService) Retrier(retrier Retrier) *ScrollService { - s.retrier = retrier - return s -} - -// Index sets the name of one or more indices to iterate over. -func (s *ScrollService) Index(indices ...string) *ScrollService { - if s.indices == nil { - s.indices = make([]string, 0) - } - s.indices = append(s.indices, indices...) - return s -} - -// Type sets the name of one or more types to iterate over. -// -// Deprecated: Types are in the process of being removed. Instead of using a type, prefer to -// filter on a field on the document. -func (s *ScrollService) Type(types ...string) *ScrollService { - if s.types == nil { - s.types = make([]string, 0) - } - s.types = append(s.types, types...) - return s -} - -// Scroll is an alias for KeepAlive, the time to keep -// the cursor alive (e.g. "5m" for 5 minutes). -func (s *ScrollService) Scroll(keepAlive string) *ScrollService { - s.keepAlive = keepAlive - return s -} - -// KeepAlive sets the maximum time after which the cursor will expire. -// It is "5m" by default. -func (s *ScrollService) KeepAlive(keepAlive string) *ScrollService { - s.keepAlive = keepAlive - return s -} - -// Size specifies the number of documents Elasticsearch should return -// from each shard, per page. -func (s *ScrollService) Size(size int) *ScrollService { - s.size = &size - return s -} - -// Highlight allows to highlight search results on one or more fields -func (s *ScrollService) Highlight(highlight *Highlight) *ScrollService { - s.ss = s.ss.Highlight(highlight) - return s -} - -// Body sets the raw body to send to Elasticsearch. This can be e.g. a string, -// a map[string]interface{} or anything that can be serialized into JSON. -// Notice that setting the body disables the use of SearchSource and many -// other properties of the ScanService. -func (s *ScrollService) Body(body interface{}) *ScrollService { - s.body = body - return s -} - -// SearchSource sets the search source builder to use with this iterator. -// Notice that only a certain number of properties can be used when scrolling, -// e.g. query and sorting. -func (s *ScrollService) SearchSource(searchSource *SearchSource) *ScrollService { - s.ss = searchSource - if s.ss == nil { - s.ss = NewSearchSource() - } - return s -} - -// Query sets the query to perform, e.g. a MatchAllQuery. -func (s *ScrollService) Query(query Query) *ScrollService { - s.ss = s.ss.Query(query) - return s -} - -// PostFilter is executed as the last filter. It only affects the -// search hits but not facets. See -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-post-filter.html -// for details. -func (s *ScrollService) PostFilter(postFilter Query) *ScrollService { - s.ss = s.ss.PostFilter(postFilter) - return s -} - -// Slice allows slicing the scroll request into several batches. -// This is supported in Elasticsearch 5.0 or later. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-scroll.html#sliced-scroll -// for details. -func (s *ScrollService) Slice(sliceQuery Query) *ScrollService { - s.ss = s.ss.Slice(sliceQuery) - return s -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (s *ScrollService) FetchSource(fetchSource bool) *ScrollService { - s.ss = s.ss.FetchSource(fetchSource) - return s -} - -// FetchSourceContext indicates how the _source should be fetched. -func (s *ScrollService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *ScrollService { - s.ss = s.ss.FetchSourceContext(fetchSourceContext) - return s -} - -// Version can be set to true to return a version for each search hit. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-version.html. -func (s *ScrollService) Version(version bool) *ScrollService { - s.ss = s.ss.Version(version) - return s -} - -// Sort adds a sort order. This can have negative effects on the performance -// of the scroll operation as Elasticsearch needs to sort first. -func (s *ScrollService) Sort(field string, ascending bool) *ScrollService { - s.ss = s.ss.Sort(field, ascending) - return s -} - -// SortWithInfo specifies a sort order. Notice that sorting can have a -// negative impact on scroll performance. -func (s *ScrollService) SortWithInfo(info SortInfo) *ScrollService { - s.ss = s.ss.SortWithInfo(info) - return s -} - -// SortBy specifies a sort order. Notice that sorting can have a -// negative impact on scroll performance. -func (s *ScrollService) SortBy(sorter ...Sorter) *ScrollService { - s.ss = s.ss.SortBy(sorter...) - return s -} - -// TrackTotalHits controls if the total hit count for the query should be tracked. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.1/search-request-track-total-hits.html -// for details. -func (s *ScrollService) TrackTotalHits(trackTotalHits interface{}) *ScrollService { - s.ss = s.ss.TrackTotalHits(trackTotalHits) - return s -} - -// Routing is a list of specific routing values to control the shards -// the search will be executed on. -func (s *ScrollService) Routing(routings ...string) *ScrollService { - s.routing = strings.Join(routings, ",") - return s -} - -// Preference sets the preference to execute the search. Defaults to -// randomize across shards ("random"). Can be set to "_local" to prefer -// local shards, "_primary" to execute on primary shards only, -// or a custom value which guarantees that the same order will be used -// across different requests. -func (s *ScrollService) Preference(preference string) *ScrollService { - s.preference = preference - return s -} - -// IgnoreUnavailable indicates whether the specified concrete indices -// should be ignored when unavailable (missing or closed). -func (s *ScrollService) IgnoreUnavailable(ignoreUnavailable bool) *ScrollService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` string -// or when no indices have been specified). -func (s *ScrollService) AllowNoIndices(allowNoIndices bool) *ScrollService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *ScrollService) ExpandWildcards(expandWildcards string) *ScrollService { - s.expandWildcards = expandWildcards - return s -} - -// MaxResponseSize sets an upper limit on the response body size that we accept, -// to guard against OOM situations. -func (s *ScrollService) MaxResponseSize(maxResponseSize int64) *ScrollService { - s.maxResponseSize = maxResponseSize - return s -} - -// ScrollId specifies the identifier of a scroll in action. -func (s *ScrollService) ScrollId(scrollId string) *ScrollService { - s.mu.Lock() - s.scrollId = scrollId - s.mu.Unlock() - return s -} - -// Do returns the next search result. It will return io.EOF as error if there -// are no more search results. -func (s *ScrollService) Do(ctx context.Context) (*SearchResult, error) { - s.mu.RLock() - nextScrollId := s.scrollId - s.mu.RUnlock() - if len(nextScrollId) == 0 { - return s.first(ctx) - } - return s.next(ctx) -} - -// Clear cancels the current scroll operation. If you don't do this manually, -// the scroll will be expired automatically by Elasticsearch. You can control -// how long a scroll cursor is kept alive with the KeepAlive func. -func (s *ScrollService) Clear(ctx context.Context) error { - s.mu.RLock() - scrollId := s.scrollId - s.mu.RUnlock() - if len(scrollId) == 0 { - return nil - } - - path := "/_search/scroll" - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - body := struct { - ScrollId []string `json:"scroll_id,omitempty"` - }{ - ScrollId: []string{scrollId}, - } - - _, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Body: body, - Retrier: s.retrier, - }) - if err != nil { - return err - } - - return nil -} - -// -- First -- - -// first takes the first page of search results. -func (s *ScrollService) first(ctx context.Context) (*SearchResult, error) { - // Get URL and parameters for request - path, params, err := s.buildFirstURL() - if err != nil { - return nil, err - } - - // Get HTTP request body - body, err := s.bodyFirst() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Retrier: s.retrier, - Headers: s.headers, - MaxResponseSize: s.maxResponseSize, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SearchResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - s.mu.Lock() - s.scrollId = ret.ScrollId - s.mu.Unlock() - if ret.Hits == nil || len(ret.Hits.Hits) == 0 { - return ret, io.EOF - } - return ret, nil -} - -// buildFirstURL builds the URL for retrieving the first page. -func (s *ScrollService) buildFirstURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.indices) == 0 && len(s.types) == 0 { - path = "/_search" - } else if len(s.indices) > 0 && len(s.types) == 0 { - path, err = uritemplates.Expand("/{index}/_search", map[string]string{ - "index": strings.Join(s.indices, ","), - }) - } else if len(s.indices) == 0 && len(s.types) > 0 { - path, err = uritemplates.Expand("/_all/{typ}/_search", map[string]string{ - "typ": strings.Join(s.types, ","), - }) - } else { - path, err = uritemplates.Expand("/{index}/{typ}/_search", map[string]string{ - "index": strings.Join(s.indices, ","), - "typ": strings.Join(s.types, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - // Always add "hits._scroll_id", otherwise we cannot scroll - var found bool - for _, path := range s.filterPath { - if path == "_scroll_id" { - found = true - break - } - } - if !found { - s.filterPath = append(s.filterPath, "_scroll_id") - } - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.size != nil && *s.size > 0 { - params.Set("size", fmt.Sprintf("%d", *s.size)) - } - if len(s.keepAlive) > 0 { - params.Set("scroll", s.keepAlive) - } - if len(s.routing) > 0 { - params.Set("routing", s.routing) - } - if len(s.preference) > 0 { - params.Set("preference", s.preference) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if len(s.expandWildcards) > 0 { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - - return path, params, nil -} - -// bodyFirst returns the request to fetch the first batch of results. -func (s *ScrollService) bodyFirst() (interface{}, error) { - var err error - var body interface{} - - if s.body != nil { - body = s.body - } else { - // Use _doc sort by default if none is specified - if !s.ss.hasSort() { - // Use efficient sorting when no user-defined query/body is specified - s.ss = s.ss.SortBy(SortByDoc{}) - } - - // Body from search source - body, err = s.ss.Source() - if err != nil { - return nil, err - } - } - - return body, nil -} - -// -- Next -- - -func (s *ScrollService) next(ctx context.Context) (*SearchResult, error) { - // Get URL for request - path, params, err := s.buildNextURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - body, err := s.bodyNext() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Retrier: s.retrier, - Headers: s.headers, - MaxResponseSize: s.maxResponseSize, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SearchResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - s.mu.Lock() - s.scrollId = ret.ScrollId - s.mu.Unlock() - if ret.Hits == nil || len(ret.Hits.Hits) == 0 { - return ret, io.EOF - } - return ret, nil -} - -// buildNextURL builds the URL for the operation. -func (s *ScrollService) buildNextURL() (string, url.Values, error) { - path := "/_search/scroll" - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - // Always add "hits._scroll_id", otherwise we cannot scroll - var found bool - for _, path := range s.filterPath { - if path == "_scroll_id" { - found = true - break - } - } - if !found { - s.filterPath = append(s.filterPath, "_scroll_id") - } - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - - return path, params, nil -} - -// body returns the request to fetch the next batch of results. -func (s *ScrollService) bodyNext() (interface{}, error) { - s.mu.RLock() - body := struct { - Scroll string `json:"scroll"` - ScrollId string `json:"scroll_id,omitempty"` - }{ - Scroll: s.keepAlive, - ScrollId: s.scrollId, - } - s.mu.RUnlock() - return body, nil -} - -// DocvalueField adds a single field to load from the field data cache -// and return as part of the search. -func (s *ScrollService) DocvalueField(docvalueField string) *ScrollService { - s.ss = s.ss.DocvalueField(docvalueField) - return s -} - -// DocvalueFieldWithFormat adds a single field to load from the field data cache -// and return as part of the search. -func (s *ScrollService) DocvalueFieldWithFormat(docvalueField DocvalueField) *ScrollService { - s.ss = s.ss.DocvalueFieldWithFormat(docvalueField) - return s -} - -// DocvalueFields adds one or more fields to load from the field data cache -// and return as part of the search. -func (s *ScrollService) DocvalueFields(docvalueFields ...string) *ScrollService { - s.ss = s.ss.DocvalueFields(docvalueFields...) - return s -} - -// DocvalueFieldsWithFormat adds one or more fields to load from the field data cache -// and return as part of the search. -func (s *ScrollService) DocvalueFieldsWithFormat(docvalueFields ...DocvalueField) *ScrollService { - s.ss = s.ss.DocvalueFieldsWithFormat(docvalueFields...) - return s -} diff --git a/vendor/github.com/olivere/elastic/v7/search.go b/vendor/github.com/olivere/elastic/v7/search.go deleted file mode 100644 index 801dff6..0000000 --- a/vendor/github.com/olivere/elastic/v7/search.go +++ /dev/null @@ -1,844 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "reflect" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// Search for documents in Elasticsearch. -type SearchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - searchSource *SearchSource // q - source interface{} - searchType string // search_type - index []string - typ []string - routing string // routing - preference string // preference - requestCache *bool // request_cache - ignoreUnavailable *bool // ignore_unavailable - ignoreThrottled *bool // ignore_throttled - allowNoIndices *bool // allow_no_indices - expandWildcards string // expand_wildcards - lenient *bool // lenient - maxResponseSize int64 - allowPartialSearchResults *bool // allow_partial_search_results - typedKeys *bool // typed_keys - seqNoPrimaryTerm *bool // seq_no_primary_term - batchedReduceSize *int // batched_reduce_size - maxConcurrentShardRequests *int // max_concurrent_shard_requests - preFilterShardSize *int // pre_filter_shard_size - restTotalHitsAsInt *bool // rest_total_hits_as_int - - ccsMinimizeRoundtrips *bool // ccs_minimize_roundtrips - -} - -// NewSearchService creates a new service for searching in Elasticsearch. -func NewSearchService(client *Client) *SearchService { - builder := &SearchService{ - client: client, - searchSource: NewSearchSource(), - } - return builder -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SearchService) Pretty(pretty bool) *SearchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SearchService) Human(human bool) *SearchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SearchService) ErrorTrace(errorTrace bool) *SearchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SearchService) FilterPath(filterPath ...string) *SearchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SearchService) Header(name string, value string) *SearchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SearchService) Headers(headers http.Header) *SearchService { - s.headers = headers - return s -} - -// SearchSource sets the search source builder to use with this service. -func (s *SearchService) SearchSource(searchSource *SearchSource) *SearchService { - s.searchSource = searchSource - if s.searchSource == nil { - s.searchSource = NewSearchSource() - } - return s -} - -// Source allows the user to set the request body manually without using -// any of the structs and interfaces in Elastic. -func (s *SearchService) Source(source interface{}) *SearchService { - s.source = source - return s -} - -// Index sets the names of the indices to use for search. -func (s *SearchService) Index(index ...string) *SearchService { - s.index = append(s.index, index...) - return s -} - -// Type adds search restrictions for a list of types. -// -// Deprecated: Types are in the process of being removed. Instead of using a type, prefer to -// filter on a field on the document. -func (s *SearchService) Type(typ ...string) *SearchService { - s.typ = append(s.typ, typ...) - return s -} - -// Timeout sets the timeout to use, e.g. "1s" or "1000ms". -func (s *SearchService) Timeout(timeout string) *SearchService { - s.searchSource = s.searchSource.Timeout(timeout) - return s -} - -// Profile sets the Profile API flag on the search source. -// When enabled, a search executed by this service will return query -// profiling data. -func (s *SearchService) Profile(profile bool) *SearchService { - s.searchSource = s.searchSource.Profile(profile) - return s -} - -// Collapse adds field collapsing. -func (s *SearchService) Collapse(collapse *CollapseBuilder) *SearchService { - s.searchSource = s.searchSource.Collapse(collapse) - return s -} - -// TimeoutInMillis sets the timeout in milliseconds. -func (s *SearchService) TimeoutInMillis(timeoutInMillis int) *SearchService { - s.searchSource = s.searchSource.TimeoutInMillis(timeoutInMillis) - return s -} - -// TerminateAfter specifies the maximum number of documents to collect for -// each shard, upon reaching which the query execution will terminate early. -func (s *SearchService) TerminateAfter(terminateAfter int) *SearchService { - s.searchSource = s.searchSource.TerminateAfter(terminateAfter) - return s -} - -// SearchType sets the search operation type. Valid values are: -// "dfs_query_then_fetch" and "query_then_fetch". -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-search-type.html -// for details. -func (s *SearchService) SearchType(searchType string) *SearchService { - s.searchType = searchType - return s -} - -// Routing is a list of specific routing values to control the shards -// the search will be executed on. -func (s *SearchService) Routing(routings ...string) *SearchService { - s.routing = strings.Join(routings, ",") - return s -} - -// Preference sets the preference to execute the search. Defaults to -// randomize across shards ("random"). Can be set to "_local" to prefer -// local shards, "_primary" to execute on primary shards only, -// or a custom value which guarantees that the same order will be used -// across different requests. -func (s *SearchService) Preference(preference string) *SearchService { - s.preference = preference - return s -} - -// RequestCache indicates whether the cache should be used for this -// request or not, defaults to index level setting. -func (s *SearchService) RequestCache(requestCache bool) *SearchService { - s.requestCache = &requestCache - return s -} - -// Query sets the query to perform, e.g. MatchAllQuery. -func (s *SearchService) Query(query Query) *SearchService { - s.searchSource = s.searchSource.Query(query) - return s -} - -// PostFilter will be executed after the query has been executed and -// only affects the search hits, not the aggregations. -// This filter is always executed as the last filtering mechanism. -func (s *SearchService) PostFilter(postFilter Query) *SearchService { - s.searchSource = s.searchSource.PostFilter(postFilter) - return s -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (s *SearchService) FetchSource(fetchSource bool) *SearchService { - s.searchSource = s.searchSource.FetchSource(fetchSource) - return s -} - -// FetchSourceContext indicates how the _source should be fetched. -func (s *SearchService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchService { - s.searchSource = s.searchSource.FetchSourceContext(fetchSourceContext) - return s -} - -// Highlight adds highlighting to the search. -func (s *SearchService) Highlight(highlight *Highlight) *SearchService { - s.searchSource = s.searchSource.Highlight(highlight) - return s -} - -// GlobalSuggestText defines the global text to use with all suggesters. -// This avoids repetition. -func (s *SearchService) GlobalSuggestText(globalText string) *SearchService { - s.searchSource = s.searchSource.GlobalSuggestText(globalText) - return s -} - -// Suggester adds a suggester to the search. -func (s *SearchService) Suggester(suggester Suggester) *SearchService { - s.searchSource = s.searchSource.Suggester(suggester) - return s -} - -// Aggregation adds an aggreation to perform as part of the search. -func (s *SearchService) Aggregation(name string, aggregation Aggregation) *SearchService { - s.searchSource = s.searchSource.Aggregation(name, aggregation) - return s -} - -// MinScore sets the minimum score below which docs will be filtered out. -func (s *SearchService) MinScore(minScore float64) *SearchService { - s.searchSource = s.searchSource.MinScore(minScore) - return s -} - -// From index to start the search from. Defaults to 0. -func (s *SearchService) From(from int) *SearchService { - s.searchSource = s.searchSource.From(from) - return s -} - -// Size is the number of search hits to return. Defaults to 10. -func (s *SearchService) Size(size int) *SearchService { - s.searchSource = s.searchSource.Size(size) - return s -} - -// Explain indicates whether each search hit should be returned with -// an explanation of the hit (ranking). -func (s *SearchService) Explain(explain bool) *SearchService { - s.searchSource = s.searchSource.Explain(explain) - return s -} - -// Version indicates whether each search hit should be returned with -// a version associated to it. -func (s *SearchService) Version(version bool) *SearchService { - s.searchSource = s.searchSource.Version(version) - return s -} - -// Sort adds a sort order. -func (s *SearchService) Sort(field string, ascending bool) *SearchService { - s.searchSource = s.searchSource.Sort(field, ascending) - return s -} - -// SortWithInfo adds a sort order. -func (s *SearchService) SortWithInfo(info SortInfo) *SearchService { - s.searchSource = s.searchSource.SortWithInfo(info) - return s -} - -// SortBy adds a sort order. -func (s *SearchService) SortBy(sorter ...Sorter) *SearchService { - s.searchSource = s.searchSource.SortBy(sorter...) - return s -} - -// DocvalueField adds a single field to load from the field data cache -// and return as part of the search. -func (s *SearchService) DocvalueField(docvalueField string) *SearchService { - s.searchSource = s.searchSource.DocvalueField(docvalueField) - return s -} - -// DocvalueFieldWithFormat adds a single field to load from the field data cache -// and return as part of the search. -func (s *SearchService) DocvalueFieldWithFormat(docvalueField DocvalueField) *SearchService { - s.searchSource = s.searchSource.DocvalueFieldWithFormat(docvalueField) - return s -} - -// DocvalueFields adds one or more fields to load from the field data cache -// and return as part of the search. -func (s *SearchService) DocvalueFields(docvalueFields ...string) *SearchService { - s.searchSource = s.searchSource.DocvalueFields(docvalueFields...) - return s -} - -// DocvalueFieldsWithFormat adds one or more fields to load from the field data cache -// and return as part of the search. -func (s *SearchService) DocvalueFieldsWithFormat(docvalueFields ...DocvalueField) *SearchService { - s.searchSource = s.searchSource.DocvalueFieldsWithFormat(docvalueFields...) - return s -} - -// NoStoredFields indicates that no stored fields should be loaded, resulting in only -// id and type to be returned per field. -func (s *SearchService) NoStoredFields() *SearchService { - s.searchSource = s.searchSource.NoStoredFields() - return s -} - -// StoredField adds a single field to load and return (note, must be stored) as -// part of the search request. If none are specified, the source of the -// document will be returned. -func (s *SearchService) StoredField(fieldName string) *SearchService { - s.searchSource = s.searchSource.StoredField(fieldName) - return s -} - -// StoredFields sets the fields to load and return as part of the search request. -// If none are specified, the source of the document will be returned. -func (s *SearchService) StoredFields(fields ...string) *SearchService { - s.searchSource = s.searchSource.StoredFields(fields...) - return s -} - -// TrackScores is applied when sorting and controls if scores will be -// tracked as well. Defaults to false. -func (s *SearchService) TrackScores(trackScores bool) *SearchService { - s.searchSource = s.searchSource.TrackScores(trackScores) - return s -} - -// TrackTotalHits controls if the total hit count for the query should be tracked. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.1/search-request-track-total-hits.html -// for details. -func (s *SearchService) TrackTotalHits(trackTotalHits interface{}) *SearchService { - s.searchSource = s.searchSource.TrackTotalHits(trackTotalHits) - return s -} - -// SearchAfter allows a different form of pagination by using a live cursor, -// using the results of the previous page to help the retrieval of the next. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-search-after.html -func (s *SearchService) SearchAfter(sortValues ...interface{}) *SearchService { - s.searchSource = s.searchSource.SearchAfter(sortValues...) - return s -} - -// DefaultRescoreWindowSize sets the rescore window size for rescores -// that don't specify their window. -func (s *SearchService) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchService { - s.searchSource = s.searchSource.DefaultRescoreWindowSize(defaultRescoreWindowSize) - return s -} - -// Rescorer adds a rescorer to the search. -func (s *SearchService) Rescorer(rescore *Rescore) *SearchService { - s.searchSource = s.searchSource.Rescorer(rescore) - return s -} - -// IgnoreUnavailable indicates whether the specified concrete indices -// should be ignored when unavailable (missing or closed). -func (s *SearchService) IgnoreUnavailable(ignoreUnavailable bool) *SearchService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// IgnoreThrottled indicates whether specified concrete, expanded or aliased -// indices should be ignored when throttled. -func (s *SearchService) IgnoreThrottled(ignoreThrottled bool) *SearchService { - s.ignoreThrottled = &ignoreThrottled - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` string -// or when no indices have been specified). -func (s *SearchService) AllowNoIndices(allowNoIndices bool) *SearchService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *SearchService) ExpandWildcards(expandWildcards string) *SearchService { - s.expandWildcards = expandWildcards - return s -} - -// Lenient specifies whether format-based query failures (such as providing -// text to a numeric field) should be ignored. -func (s *SearchService) Lenient(lenient bool) *SearchService { - s.lenient = &lenient - return s -} - -// MaxResponseSize sets an upper limit on the response body size that we accept, -// to guard against OOM situations. -func (s *SearchService) MaxResponseSize(maxResponseSize int64) *SearchService { - s.maxResponseSize = maxResponseSize - return s -} - -// AllowPartialSearchResults indicates if an error should be returned if -// there is a partial search failure or timeout. -func (s *SearchService) AllowPartialSearchResults(enabled bool) *SearchService { - s.allowPartialSearchResults = &enabled - return s -} - -// TypedKeys specifies whether aggregation and suggester names should be -// prefixed by their respective types in the response. -func (s *SearchService) TypedKeys(enabled bool) *SearchService { - s.typedKeys = &enabled - return s -} - -// SeqNoPrimaryTerm specifies whether to return sequence number and -// primary term of the last modification of each hit. -func (s *SearchService) SeqNoPrimaryTerm(enabled bool) *SearchService { - s.seqNoPrimaryTerm = &enabled - return s -} - -// BatchedReduceSize specifies the number of shard results that should be reduced -// at once on the coordinating node. This value should be used as a protection -// mechanism to reduce the memory overhead per search request if the potential -// number of shards in the request can be large. -func (s *SearchService) BatchedReduceSize(size int) *SearchService { - s.batchedReduceSize = &size - return s -} - -// MaxConcurrentShardRequests specifies the number of concurrent shard requests -// this search executes concurrently. This value should be used to limit the -// impact of the search on the cluster in order to limit the number of -// concurrent shard requests. -func (s *SearchService) MaxConcurrentShardRequests(max int) *SearchService { - s.maxConcurrentShardRequests = &max - return s -} - -// PreFilterShardSize specifies a threshold that enforces a pre-filter roundtrip -// to prefilter search shards based on query rewriting if the number of shards -// the search request expands to exceeds the threshold. This filter roundtrip -// can limit the number of shards significantly if for instance a shard can -// not match any documents based on it's rewrite method i.e. if date filters are -// mandatory to match but the shard bounds and the query are disjoint. -func (s *SearchService) PreFilterShardSize(threshold int) *SearchService { - s.preFilterShardSize = &threshold - return s -} - -// RestTotalHitsAsInt indicates whether hits.total should be rendered as an -// integer or an object in the rest search response. -func (s *SearchService) RestTotalHitsAsInt(enabled bool) *SearchService { - s.restTotalHitsAsInt = &enabled - return s -} - -// CCSMinimizeRoundtrips indicates whether network round-trips should be minimized -// as part of cross-cluster search requests execution. -func (s *SearchService) CCSMinimizeRoundtrips(enabled bool) *SearchService { - s.ccsMinimizeRoundtrips = &enabled - return s -} - -// buildURL builds the URL for the operation. -func (s *SearchService) buildURL() (string, url.Values, error) { - var err error - var path string - - if len(s.index) > 0 && len(s.typ) > 0 { - path, err = uritemplates.Expand("/{index}/{type}/_search", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - } else if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_search", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else if len(s.typ) > 0 { - path, err = uritemplates.Expand("/_all/{type}/_search", map[string]string{ - "type": strings.Join(s.typ, ","), - }) - } else { - path = "/_search" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.searchType != "" { - params.Set("search_type", s.searchType) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if v := s.requestCache; v != nil { - params.Set("request_cache", fmt.Sprint(*v)) - } - if v := s.allowNoIndices; v != nil { - params.Set("allow_no_indices", fmt.Sprint(*v)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if v := s.lenient; v != nil { - params.Set("lenient", fmt.Sprint(*v)) - } - if v := s.ignoreUnavailable; v != nil { - params.Set("ignore_unavailable", fmt.Sprint(*v)) - } - if v := s.ignoreThrottled; v != nil { - params.Set("ignore_throttled", fmt.Sprint(*v)) - } - if s.seqNoPrimaryTerm != nil { - params.Set("seq_no_primary_term", fmt.Sprint(*s.seqNoPrimaryTerm)) - } - if v := s.allowPartialSearchResults; v != nil { - params.Set("allow_partial_search_results", fmt.Sprint(*v)) - } - if v := s.typedKeys; v != nil { - params.Set("typed_keys", fmt.Sprint(*v)) - } - if v := s.batchedReduceSize; v != nil { - params.Set("batched_reduce_size", fmt.Sprint(*v)) - } - if v := s.maxConcurrentShardRequests; v != nil { - params.Set("max_concurrent_shard_requests", fmt.Sprint(*v)) - } - if v := s.preFilterShardSize; v != nil { - params.Set("pre_filter_shard_size", fmt.Sprint(*v)) - } - if v := s.restTotalHitsAsInt; v != nil { - params.Set("rest_total_hits_as_int", fmt.Sprint(*v)) - } - if v := s.ccsMinimizeRoundtrips; v != nil { - params.Set("ccs_minimize_roundtrips", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *SearchService) Validate() error { - return nil -} - -// Do executes the search and returns a SearchResult. -func (s *SearchService) Do(ctx context.Context) (*SearchResult, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Perform request - var body interface{} - if s.source != nil { - body = s.source - } else { - src, err := s.searchSource.Source() - if err != nil { - return nil, err - } - body = src - } - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - MaxResponseSize: s.maxResponseSize, - }) - if err != nil { - return nil, err - } - - // Return search results - ret := new(SearchResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - ret.Header = res.Header - return nil, err - } - ret.Header = res.Header - return ret, nil -} - -// SearchResult is the result of a search in Elasticsearch. -type SearchResult struct { - Header http.Header `json:"-"` - TookInMillis int64 `json:"took,omitempty"` // search time in milliseconds - TerminatedEarly bool `json:"terminated_early,omitempty"` // request terminated early - NumReducePhases int `json:"num_reduce_phases,omitempty"` - Clusters []*SearchResultCluster `json:"_clusters,omitempty"` // 6.1.0+ - ScrollId string `json:"_scroll_id,omitempty"` // only used with Scroll and Scan operations - Hits *SearchHits `json:"hits,omitempty"` // the actual search hits - Suggest SearchSuggest `json:"suggest,omitempty"` // results from suggesters - Aggregations Aggregations `json:"aggregations,omitempty"` // results from aggregations - TimedOut bool `json:"timed_out,omitempty"` // true if the search timed out - Error *ErrorDetails `json:"error,omitempty"` // only used in MultiGet - Profile *SearchProfile `json:"profile,omitempty"` // profiling results, if optional Profile API was active for this search - Shards *ShardsInfo `json:"_shards,omitempty"` // shard information - Status int `json:"status,omitempty"` // used in MultiSearch -} - -// SearchResultCluster holds information about a search response -// from a cluster. -type SearchResultCluster struct { - Successful int `json:"successful,omitempty"` - Total int `json:"total,omitempty"` - Skipped int `json:"skipped,omitempty"` -} - -// TotalHits is a convenience function to return the number of hits for -// a search result. The return value might not be accurate, unless -// track_total_hits parameter has set to true. -func (r *SearchResult) TotalHits() int64 { - if r.Hits != nil && r.Hits.TotalHits != nil { - return r.Hits.TotalHits.Value - } - return 0 -} - -// Each is a utility function to iterate over all hits. It saves you from -// checking for nil values. Notice that Each will ignore errors in -// serializing JSON and hits with empty/nil _source will get an empty -// value -func (r *SearchResult) Each(typ reflect.Type) []interface{} { - if r.Hits == nil || r.Hits.Hits == nil || len(r.Hits.Hits) == 0 { - return nil - } - var slice []interface{} - for _, hit := range r.Hits.Hits { - v := reflect.New(typ).Elem() - if hit.Source == nil { - slice = append(slice, v.Interface()) - continue - } - if err := json.Unmarshal(hit.Source, v.Addr().Interface()); err == nil { - slice = append(slice, v.Interface()) - } - } - return slice -} - -// SearchHits specifies the list of search hits. -type SearchHits struct { - TotalHits *TotalHits `json:"total,omitempty"` // total number of hits found - MaxScore *float64 `json:"max_score,omitempty"` // maximum score of all hits - Hits []*SearchHit `json:"hits,omitempty"` // the actual hits returned -} - -// NestedHit is a nested innerhit -type NestedHit struct { - Field string `json:"field"` - Offset int `json:"offset,omitempty"` - Child *NestedHit `json:"_nested,omitempty"` -} - -// TotalHits specifies total number of hits and its relation -type TotalHits struct { - Value int64 `json:"value"` // value of the total hit count - Relation string `json:"relation"` // how the value should be interpreted: accurate ("eq") or a lower bound ("gte") -} - -// SearchHit is a single hit. -type SearchHit struct { - Score *float64 `json:"_score,omitempty"` // computed score - Index string `json:"_index,omitempty"` // index name - Type string `json:"_type,omitempty"` // type meta field - Id string `json:"_id,omitempty"` // external or internal - Uid string `json:"_uid,omitempty"` // uid meta field (see MapperService.java for all meta fields) - Routing string `json:"_routing,omitempty"` // routing meta field - Parent string `json:"_parent,omitempty"` // parent meta field - Version *int64 `json:"_version,omitempty"` // version number, when Version is set to true in SearchService - SeqNo *int64 `json:"_seq_no"` - PrimaryTerm *int64 `json:"_primary_term"` - Sort []interface{} `json:"sort,omitempty"` // sort information - Highlight SearchHitHighlight `json:"highlight,omitempty"` // highlighter information - Source json.RawMessage `json:"_source,omitempty"` // stored document source - Fields map[string]interface{} `json:"fields,omitempty"` // returned (stored) fields - Explanation *SearchExplanation `json:"_explanation,omitempty"` // explains how the score was computed - MatchedQueries []string `json:"matched_queries,omitempty"` // matched queries - InnerHits map[string]*SearchHitInnerHits `json:"inner_hits,omitempty"` // inner hits with ES >= 1.5.0 - Nested *NestedHit `json:"_nested,omitempty"` // for nested inner hits - Shard string `json:"_shard,omitempty"` // used e.g. in Search Explain - Node string `json:"_node,omitempty"` // used e.g. in Search Explain - - // HighlightFields - // SortValues - // MatchedFilters -} - -// SearchHitInnerHits is used for inner hits. -type SearchHitInnerHits struct { - Hits *SearchHits `json:"hits,omitempty"` -} - -// SearchExplanation explains how the score for a hit was computed. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-explain.html. -type SearchExplanation struct { - Value float64 `json:"value"` // e.g. 1.0 - Description string `json:"description"` // e.g. "boost" or "ConstantScore(*:*), product of:" - Details []SearchExplanation `json:"details,omitempty"` // recursive details -} - -// Suggest - -// SearchSuggest is a map of suggestions. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters.html. -type SearchSuggest map[string][]SearchSuggestion - -// SearchSuggestion is a single search suggestion. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters.html. -type SearchSuggestion struct { - Text string `json:"text"` - Offset int `json:"offset"` - Length int `json:"length"` - Options []SearchSuggestionOption `json:"options"` -} - -// SearchSuggestionOption is an option of a SearchSuggestion. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters.html. -type SearchSuggestionOption struct { - Text string `json:"text"` - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id"` - Score float64 `json:"score"` // term and phrase suggesters uses "score" as of 6.2.4 - ScoreUnderscore float64 `json:"_score"` // completion and context suggesters uses "_score" as of 6.2.4 - Highlighted string `json:"highlighted"` - CollateMatch bool `json:"collate_match"` - Freq int `json:"freq"` // from TermSuggestion.Option in Java API - Source json.RawMessage `json:"_source"` - Contexts map[string][]string `json:"contexts,omitempty"` -} - -// SearchProfile is a list of shard profiling data collected during -// query execution in the "profile" section of a SearchResult -type SearchProfile struct { - Shards []SearchProfileShardResult `json:"shards"` -} - -// SearchProfileShardResult returns the profiling data for a single shard -// accessed during the search query or aggregation. -type SearchProfileShardResult struct { - ID string `json:"id"` - Searches []QueryProfileShardResult `json:"searches"` - Aggregations []ProfileResult `json:"aggregations"` -} - -// QueryProfileShardResult is a container class to hold the profile results -// for a single shard in the request. It comtains a list of query profiles, -// a collector tree and a total rewrite tree. -type QueryProfileShardResult struct { - Query []ProfileResult `json:"query,omitempty"` - RewriteTime int64 `json:"rewrite_time,omitempty"` - Collector []interface{} `json:"collector,omitempty"` -} - -// CollectorResult holds the profile timings of the collectors used in the -// search. Children's CollectorResults may be embedded inside of a parent -// CollectorResult. -type CollectorResult struct { - Name string `json:"name,omitempty"` - Reason string `json:"reason,omitempty"` - Time string `json:"time,omitempty"` - TimeNanos int64 `json:"time_in_nanos,omitempty"` - Children []CollectorResult `json:"children,omitempty"` -} - -// ProfileResult is the internal representation of a profiled query, -// corresponding to a single node in the query tree. -type ProfileResult struct { - Type string `json:"type"` - Description string `json:"description,omitempty"` - NodeTime string `json:"time,omitempty"` - NodeTimeNanos int64 `json:"time_in_nanos,omitempty"` - Breakdown map[string]int64 `json:"breakdown,omitempty"` - Children []ProfileResult `json:"children,omitempty"` -} - -// Aggregations (see search_aggs.go) - -// Highlighting - -// SearchHitHighlight is the highlight information of a search hit. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-highlighting.html -// for a general discussion of highlighting. -type SearchHitHighlight map[string][]string diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs.go b/vendor/github.com/olivere/elastic/v7/search_aggs.go deleted file mode 100644 index 9080d12..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs.go +++ /dev/null @@ -1,1764 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "bytes" - "encoding/json" -) - -// Aggregations can be seen as a unit-of-work that build -// analytic information over a set of documents. It is -// (in many senses) the follow-up of facets in Elasticsearch. -// For more details about aggregations, visit: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations.html -type Aggregation interface { - // Source returns a JSON-serializable aggregation that is a fragment - // of the request sent to Elasticsearch. - Source() (interface{}, error) -} - -// Aggregations is a list of aggregations that are part of a search result. -type Aggregations map[string]json.RawMessage - -// Min returns min aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-min-aggregation.html -func (a Aggregations) Min(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Max returns max aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-max-aggregation.html -func (a Aggregations) Max(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Sum returns sum aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-sum-aggregation.html -func (a Aggregations) Sum(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Avg returns average aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-avg-aggregation.html -func (a Aggregations) Avg(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// WeightedAvg computes the weighted average of numeric values that are extracted from the aggregated documents. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-weight-avg-aggregation.html -func (a Aggregations) WeightedAvg(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// ValueCount returns value-count aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-valuecount-aggregation.html -func (a Aggregations) ValueCount(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Cardinality returns cardinality aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-cardinality-aggregation.html -func (a Aggregations) Cardinality(name string) (*AggregationValueMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationValueMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Stats returns stats aggregation results. -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-stats-aggregation.html -func (a Aggregations) Stats(name string) (*AggregationStatsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationStatsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// ExtendedStats returns extended stats aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-extendedstats-aggregation.html -func (a Aggregations) ExtendedStats(name string) (*AggregationExtendedStatsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationExtendedStatsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// MatrixStats returns matrix stats aggregation results. -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-matrix-stats-aggregation.html -func (a Aggregations) MatrixStats(name string) (*AggregationMatrixStats, bool) { - if raw, found := a[name]; found { - agg := new(AggregationMatrixStats) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Percentiles returns percentiles results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-percentile-aggregation.html -func (a Aggregations) Percentiles(name string) (*AggregationPercentilesMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPercentilesMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// PercentileRanks returns percentile ranks results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-percentile-rank-aggregation.html -func (a Aggregations) PercentileRanks(name string) (*AggregationPercentilesMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPercentilesMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// TopHits returns top-hits aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-top-hits-aggregation.html -func (a Aggregations) TopHits(name string) (*AggregationTopHitsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationTopHitsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Global returns global results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-global-aggregation.html -func (a Aggregations) Global(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Filter returns filter results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-filter-aggregation.html -func (a Aggregations) Filter(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Filters returns filters results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-filters-aggregation.html -func (a Aggregations) Filters(name string) (*AggregationBucketFilters, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketFilters) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// AdjacencyMatrix returning a form of adjacency matrix. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-adjacency-matrix-aggregation.html -func (a Aggregations) AdjacencyMatrix(name string) (*AggregationBucketAdjacencyMatrix, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketAdjacencyMatrix) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Missing returns missing results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-missing-aggregation.html -func (a Aggregations) Missing(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Nested returns nested results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-nested-aggregation.html -func (a Aggregations) Nested(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// ReverseNested returns reverse-nested results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-reverse-nested-aggregation.html -func (a Aggregations) ReverseNested(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Children returns children results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-children-aggregation.html -func (a Aggregations) Children(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Terms returns terms aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-terms-aggregation.html -func (a Aggregations) Terms(name string) (*AggregationBucketKeyItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketKeyItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// SignificantTerms returns significant terms aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html -func (a Aggregations) SignificantTerms(name string) (*AggregationBucketSignificantTerms, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketSignificantTerms) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Sampler returns sampler aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-sampler-aggregation.html -func (a Aggregations) Sampler(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// DiversifiedSampler returns diversified_sampler aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-diversified-sampler-aggregation.html -func (a Aggregations) DiversifiedSampler(name string) (*AggregationSingleBucket, bool) { - if raw, found := a[name]; found { - agg := new(AggregationSingleBucket) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Range returns range aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-range-aggregation.html -func (a Aggregations) Range(name string) (*AggregationBucketRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// KeyedRange returns keyed range aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-range-aggregation.html. -func (a Aggregations) KeyedRange(name string) (*AggregationBucketKeyedRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketKeyedRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// DateRange returns date range aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-daterange-aggregation.html -func (a Aggregations) DateRange(name string) (*AggregationBucketRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// IPRange returns IP range aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-iprange-aggregation.html -func (a Aggregations) IPRange(name string) (*AggregationBucketRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Histogram returns histogram aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-histogram-aggregation.html -func (a Aggregations) Histogram(name string) (*AggregationBucketHistogramItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketHistogramItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// AutoDateHistogram returns auto date histogram aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-datehistogram-aggregation.html -func (a Aggregations) AutoDateHistogram(name string) (*AggregationBucketHistogramItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketHistogramItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// DateHistogram returns date histogram aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-datehistogram-aggregation.html -func (a Aggregations) DateHistogram(name string) (*AggregationBucketHistogramItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketHistogramItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// KeyedDateHistogram returns date histogram aggregation results for keyed responses. -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-datehistogram-aggregation.html#_keyed_response_3 -func (a Aggregations) KeyedDateHistogram(name string) (*AggregationBucketKeyedHistogramItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketKeyedHistogramItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// GeoBounds returns geo-bounds aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-geobounds-aggregation.html -func (a Aggregations) GeoBounds(name string) (*AggregationGeoBoundsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationGeoBoundsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// GeoHash returns geo-hash aggregation results. -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-geohashgrid-aggregation.html -func (a Aggregations) GeoHash(name string) (*AggregationBucketKeyItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketKeyItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// GeoCentroid returns geo-centroid aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-geocentroid-aggregation.html -func (a Aggregations) GeoCentroid(name string) (*AggregationGeoCentroidMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationGeoCentroidMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// GeoDistance returns geo distance aggregation results. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-geodistance-aggregation.html -func (a Aggregations) GeoDistance(name string) (*AggregationBucketRangeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketRangeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// AvgBucket returns average bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-avg-bucket-aggregation.html -func (a Aggregations) AvgBucket(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// SumBucket returns sum bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-sum-bucket-aggregation.html -func (a Aggregations) SumBucket(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// StatsBucket returns stats bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-stats-bucket-aggregation.html -func (a Aggregations) StatsBucket(name string) (*AggregationPipelineStatsMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineStatsMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// PercentilesBucket returns stats bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html -func (a Aggregations) PercentilesBucket(name string) (*AggregationPipelinePercentilesMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelinePercentilesMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// MaxBucket returns maximum bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-max-bucket-aggregation.html -func (a Aggregations) MaxBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineBucketMetricValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// MinBucket returns minimum bucket pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-min-bucket-aggregation.html -func (a Aggregations) MinBucket(name string) (*AggregationPipelineBucketMetricValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineBucketMetricValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// MovAvg returns moving average pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html -// -// Deprecated: The MovAvgAggregation has been deprecated in 6.4.0. Use the more generate MovFnAggregation instead. -func (a Aggregations) MovAvg(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// MovFn returns moving function pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movfn-aggregation.html -func (a Aggregations) MovFn(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Derivative returns derivative pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-derivative-aggregation.html -func (a Aggregations) Derivative(name string) (*AggregationPipelineDerivative, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineDerivative) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// CumulativeSum returns a cumulative sum pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-cumulative-sum-aggregation.html -func (a Aggregations) CumulativeSum(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// BucketScript returns bucket script pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-bucket-script-aggregation.html -func (a Aggregations) BucketScript(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// SerialDiff returns serial differencing pipeline aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-serialdiff-aggregation.html -func (a Aggregations) SerialDiff(name string) (*AggregationPipelineSimpleValue, bool) { - if raw, found := a[name]; found { - agg := new(AggregationPipelineSimpleValue) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// Composite returns composite bucket aggregation results. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-composite-aggregation.html -// for details. -func (a Aggregations) Composite(name string) (*AggregationBucketCompositeItems, bool) { - if raw, found := a[name]; found { - agg := new(AggregationBucketCompositeItems) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// ScriptedMetric returns scripted metric aggregation results. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.2/search-aggregations-metrics-scripted-metric-aggregation.html -// for details. -func (a Aggregations) ScriptedMetric(name string) (*AggregationScriptedMetric, bool) { - if raw, found := a[name]; found { - agg := new(AggregationScriptedMetric) - if raw == nil { - return agg, true - } - if err := json.Unmarshal(raw, agg); err == nil { - return agg, true - } - } - return nil, false -} - -// -- Single value metric -- - -// AggregationValueMetric is a single-value metric, returned e.g. by a -// Min or Max aggregation. -type AggregationValueMetric struct { - Aggregations - - Value *float64 //`json:"value"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationValueMetric structure. -func (a *AggregationValueMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["value"]; ok && v != nil { - json.Unmarshal(v, &a.Value) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Stats metric -- - -// AggregationStatsMetric is a multi-value metric, returned by a Stats aggregation. -type AggregationStatsMetric struct { - Aggregations - - Count int64 // `json:"count"` - Min *float64 //`json:"min,omitempty"` - Max *float64 //`json:"max,omitempty"` - Avg *float64 //`json:"avg,omitempty"` - Sum *float64 //`json:"sum,omitempty"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationStatsMetric structure. -func (a *AggregationStatsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["count"]; ok && v != nil { - json.Unmarshal(v, &a.Count) - } - if v, ok := aggs["min"]; ok && v != nil { - json.Unmarshal(v, &a.Min) - } - if v, ok := aggs["max"]; ok && v != nil { - json.Unmarshal(v, &a.Max) - } - if v, ok := aggs["avg"]; ok && v != nil { - json.Unmarshal(v, &a.Avg) - } - if v, ok := aggs["sum"]; ok && v != nil { - json.Unmarshal(v, &a.Sum) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Extended stats metric -- - -// AggregationExtendedStatsMetric is a multi-value metric, returned by an ExtendedStats aggregation. -type AggregationExtendedStatsMetric struct { - Aggregations - - Count int64 // `json:"count"` - Min *float64 //`json:"min,omitempty"` - Max *float64 //`json:"max,omitempty"` - Avg *float64 //`json:"avg,omitempty"` - Sum *float64 //`json:"sum,omitempty"` - SumOfSquares *float64 //`json:"sum_of_squares,omitempty"` - Variance *float64 //`json:"variance,omitempty"` - StdDeviation *float64 //`json:"std_deviation,omitempty"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationExtendedStatsMetric structure. -func (a *AggregationExtendedStatsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["count"]; ok && v != nil { - json.Unmarshal(v, &a.Count) - } - if v, ok := aggs["min"]; ok && v != nil { - json.Unmarshal(v, &a.Min) - } - if v, ok := aggs["max"]; ok && v != nil { - json.Unmarshal(v, &a.Max) - } - if v, ok := aggs["avg"]; ok && v != nil { - json.Unmarshal(v, &a.Avg) - } - if v, ok := aggs["sum"]; ok && v != nil { - json.Unmarshal(v, &a.Sum) - } - if v, ok := aggs["sum_of_squares"]; ok && v != nil { - json.Unmarshal(v, &a.SumOfSquares) - } - if v, ok := aggs["variance"]; ok && v != nil { - json.Unmarshal(v, &a.Variance) - } - if v, ok := aggs["std_deviation"]; ok && v != nil { - json.Unmarshal(v, &a.StdDeviation) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Matrix Stats -- - -// AggregationMatrixStats is returned by a MatrixStats aggregation. -type AggregationMatrixStats struct { - Aggregations - - Fields []*AggregationMatrixStatsField // `json:"field,omitempty"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// AggregationMatrixStatsField represents running stats of a single field -// returned from MatrixStats aggregation. -type AggregationMatrixStatsField struct { - Name string `json:"name"` - Count int64 `json:"count"` - Mean float64 `json:"mean,omitempty"` - Variance float64 `json:"variance,omitempty"` - Skewness float64 `json:"skewness,omitempty"` - Kurtosis float64 `json:"kurtosis,omitempty"` - Covariance map[string]float64 `json:"covariance,omitempty"` - Correlation map[string]float64 `json:"correlation,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationMatrixStats structure. -func (a *AggregationMatrixStats) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["fields"]; ok && v != nil { - // RunningStats for every field - json.Unmarshal(v, &a.Fields) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Percentiles metric -- - -// AggregationPercentilesMetric is a multi-value metric, returned by a Percentiles aggregation. -type AggregationPercentilesMetric struct { - Aggregations - - Values map[string]float64 // `json:"values"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPercentilesMetric structure. -func (a *AggregationPercentilesMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["values"]; ok && v != nil { - json.Unmarshal(v, &a.Values) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Top-hits metric -- - -// AggregationTopHitsMetric is a metric returned by a TopHits aggregation. -type AggregationTopHitsMetric struct { - Aggregations - - Hits *SearchHits //`json:"hits"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationTopHitsMetric structure. -func (a *AggregationTopHitsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - a.Aggregations = aggs - a.Hits = new(SearchHits) - if v, ok := aggs["hits"]; ok && v != nil { - json.Unmarshal(v, &a.Hits) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - return nil -} - -// -- Geo-bounds metric -- - -// AggregationGeoBoundsMetric is a metric as returned by a GeoBounds aggregation. -type AggregationGeoBoundsMetric struct { - Aggregations - - Bounds struct { - TopLeft struct { - Latitude float64 `json:"lat"` - Longitude float64 `json:"lon"` - } `json:"top_left"` - BottomRight struct { - Latitude float64 `json:"lat"` - Longitude float64 `json:"lon"` - } `json:"bottom_right"` - } `json:"bounds"` - - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationGeoBoundsMetric structure. -func (a *AggregationGeoBoundsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["bounds"]; ok && v != nil { - json.Unmarshal(v, &a.Bounds) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationGeoCentroidMetric is a metric as returned by a GeoCentroid aggregation. -type AggregationGeoCentroidMetric struct { - Aggregations - - Location struct { - Latitude float64 `json:"lat"` - Longitude float64 `json:"lon"` - } `json:"location"` - - Count int // `json:"count,omitempty"` - - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationGeoCentroidMetric structure. -func (a *AggregationGeoCentroidMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["location"]; ok && v != nil { - json.Unmarshal(v, &a.Location) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - if v, ok := aggs["count"]; ok && v != nil { - json.Unmarshal(v, &a.Count) - } - a.Aggregations = aggs - return nil -} - -// -- Single bucket -- - -// AggregationSingleBucket is a single bucket, returned e.g. via an aggregation of type Global. -type AggregationSingleBucket struct { - Aggregations - - DocCount int64 // `json:"doc_count"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationSingleBucket structure. -func (a *AggregationSingleBucket) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.DocCount) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket range items -- - -// AggregationBucketRangeItems is a bucket aggregation that is e.g. returned -// with a range aggregation. -type AggregationBucketRangeItems struct { - Aggregations - - DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` - SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` - Buckets []*AggregationBucketRangeItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. -func (a *AggregationBucketRangeItems) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { - json.Unmarshal(v, &a.DocCountErrorUpperBound) - } - if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.SumOfOtherDocCount) - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketKeyedRangeItems is a bucket aggregation that is e.g. returned -// with a keyed range aggregation. -type AggregationBucketKeyedRangeItems struct { - Aggregations - - DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` - SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` - Buckets map[string]*AggregationBucketRangeItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItems structure. -func (a *AggregationBucketKeyedRangeItems) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { - json.Unmarshal(v, &a.DocCountErrorUpperBound) - } - if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.SumOfOtherDocCount) - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketRangeItem is a single bucket of an AggregationBucketRangeItems structure. -type AggregationBucketRangeItem struct { - Aggregations - - Key string //`json:"key"` - DocCount int64 //`json:"doc_count"` - From *float64 //`json:"from"` - FromAsString string //`json:"from_as_string"` - To *float64 //`json:"to"` - ToAsString string //`json:"to_as_string"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketRangeItem structure. -func (a *AggregationBucketRangeItem) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(v, &a.Key) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.DocCount) - } - if v, ok := aggs["from"]; ok && v != nil { - json.Unmarshal(v, &a.From) - } - if v, ok := aggs["from_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.FromAsString) - } - if v, ok := aggs["to"]; ok && v != nil { - json.Unmarshal(v, &a.To) - } - if v, ok := aggs["to_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.ToAsString) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket key items -- - -// AggregationBucketKeyItems is a bucket aggregation that is e.g. returned -// with a terms aggregation. -type AggregationBucketKeyItems struct { - Aggregations - - DocCountErrorUpperBound int64 //`json:"doc_count_error_upper_bound"` - SumOfOtherDocCount int64 //`json:"sum_other_doc_count"` - Buckets []*AggregationBucketKeyItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItems structure. -func (a *AggregationBucketKeyItems) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count_error_upper_bound"]; ok && v != nil { - json.Unmarshal(v, &a.DocCountErrorUpperBound) - } - if v, ok := aggs["sum_other_doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.SumOfOtherDocCount) - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketKeyItem is a single bucket of an AggregationBucketKeyItems structure. -type AggregationBucketKeyItem struct { - Aggregations - - Key interface{} //`json:"key"` - KeyAsString *string //`json:"key_as_string"` - KeyNumber json.Number - DocCount int64 //`json:"doc_count"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyItem structure. -func (a *AggregationBucketKeyItem) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - dec := json.NewDecoder(bytes.NewReader(data)) - dec.UseNumber() - if err := dec.Decode(&aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(v, &a.Key) - json.Unmarshal(v, &a.KeyNumber) - } - if v, ok := aggs["key_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.KeyAsString) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.DocCount) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket types for significant terms -- - -// AggregationBucketSignificantTerms is a bucket aggregation returned -// with a significant terms aggregation. -type AggregationBucketSignificantTerms struct { - Aggregations - - DocCount int64 //`json:"doc_count"` - Buckets []*AggregationBucketSignificantTerm //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerms structure. -func (a *AggregationBucketSignificantTerms) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.DocCount) - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketSignificantTerm is a single bucket of an AggregationBucketSignificantTerms structure. -type AggregationBucketSignificantTerm struct { - Aggregations - - Key string //`json:"key"` - DocCount int64 //`json:"doc_count"` - BgCount int64 //`json:"bg_count"` - Score float64 //`json:"score"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketSignificantTerm structure. -func (a *AggregationBucketSignificantTerm) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(v, &a.Key) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.DocCount) - } - if v, ok := aggs["bg_count"]; ok && v != nil { - json.Unmarshal(v, &a.BgCount) - } - if v, ok := aggs["score"]; ok && v != nil { - json.Unmarshal(v, &a.Score) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket filters -- - -// AggregationBucketFilters is a multi-bucket aggregation that is returned -// with a filters aggregation. -type AggregationBucketFilters struct { - Aggregations - - Buckets []*AggregationBucketKeyItem //`json:"buckets"` - NamedBuckets map[string]*AggregationBucketKeyItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketFilters structure. -func (a *AggregationBucketFilters) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - json.Unmarshal(v, &a.NamedBuckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket AdjacencyMatrix -- - -// AggregationBucketAdjacencyMatrix is a multi-bucket aggregation that is returned -// with a AdjacencyMatrix aggregation. -type AggregationBucketAdjacencyMatrix struct { - Aggregations - - Buckets []*AggregationBucketKeyItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketAdjacencyMatrix structure. -func (a *AggregationBucketAdjacencyMatrix) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Bucket histogram items -- - -// AggregationBucketHistogramItems is a bucket aggregation that is returned -// with a date histogram aggregation. -type AggregationBucketHistogramItems struct { - Aggregations - - Buckets []*AggregationBucketHistogramItem //`json:"buckets"` - Interval interface{} // `json:"interval"` // can be numeric or a string - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItems structure. -func (a *AggregationBucketHistogramItems) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - } - if v, ok := aggs["interval"]; ok && v != nil { - json.Unmarshal(v, &a.Interval) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketKeyedHistogramItems is a bucket aggregation that is returned -// with a (keyed) date histogram aggregation. -type AggregationBucketKeyedHistogramItems struct { - Aggregations - - Buckets map[string]*AggregationBucketHistogramItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketKeyedHistogramItems structure. -func (a *AggregationBucketKeyedHistogramItems) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketHistogramItem is a single bucket of an AggregationBucketHistogramItems structure. -type AggregationBucketHistogramItem struct { - Aggregations - - Key float64 //`json:"key"` - KeyAsString *string //`json:"key_as_string"` - DocCount int64 //`json:"doc_count"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketHistogramItem structure. -func (a *AggregationBucketHistogramItem) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(v, &a.Key) - } - if v, ok := aggs["key_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.KeyAsString) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.DocCount) - } - a.Aggregations = aggs - return nil -} - -// -- Pipeline simple value -- - -// AggregationPipelineSimpleValue is a simple value, returned e.g. by a -// MovAvg aggregation. -type AggregationPipelineSimpleValue struct { - Aggregations - - Value *float64 // `json:"value"` - ValueAsString string // `json:"value_as_string"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineSimpleValue structure. -func (a *AggregationPipelineSimpleValue) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["value"]; ok && v != nil { - json.Unmarshal(v, &a.Value) - } - if v, ok := aggs["value_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.ValueAsString) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Pipeline simple value -- - -// AggregationPipelineBucketMetricValue is a value returned e.g. by a -// MaxBucket aggregation. -type AggregationPipelineBucketMetricValue struct { - Aggregations - - Keys []interface{} // `json:"keys"` - Value *float64 // `json:"value"` - ValueAsString string // `json:"value_as_string"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineBucketMetricValue structure. -func (a *AggregationPipelineBucketMetricValue) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["keys"]; ok && v != nil { - json.Unmarshal(v, &a.Keys) - } - if v, ok := aggs["value"]; ok && v != nil { - json.Unmarshal(v, &a.Value) - } - if v, ok := aggs["value_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.ValueAsString) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Pipeline derivative -- - -// AggregationPipelineDerivative is the value returned by a -// Derivative aggregation. -type AggregationPipelineDerivative struct { - Aggregations - - Value *float64 // `json:"value"` - ValueAsString string // `json:"value_as_string"` - NormalizedValue *float64 // `json:"normalized_value"` - NormalizedValueAsString string // `json:"normalized_value_as_string"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineDerivative structure. -func (a *AggregationPipelineDerivative) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["value"]; ok && v != nil { - json.Unmarshal(v, &a.Value) - } - if v, ok := aggs["value_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.ValueAsString) - } - if v, ok := aggs["normalized_value"]; ok && v != nil { - json.Unmarshal(v, &a.NormalizedValue) - } - if v, ok := aggs["normalized_value_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.NormalizedValueAsString) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Pipeline stats metric -- - -// AggregationPipelineStatsMetric is a simple value, returned e.g. by a -// MovAvg aggregation. -type AggregationPipelineStatsMetric struct { - Aggregations - - Count int64 // `json:"count"` - CountAsString string // `json:"count_as_string"` - Min *float64 // `json:"min"` - MinAsString string // `json:"min_as_string"` - Max *float64 // `json:"max"` - MaxAsString string // `json:"max_as_string"` - Avg *float64 // `json:"avg"` - AvgAsString string // `json:"avg_as_string"` - Sum *float64 // `json:"sum"` - SumAsString string // `json:"sum_as_string"` - - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPipelineStatsMetric structure. -func (a *AggregationPipelineStatsMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["count"]; ok && v != nil { - json.Unmarshal(v, &a.Count) - } - if v, ok := aggs["count_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.CountAsString) - } - if v, ok := aggs["min"]; ok && v != nil { - json.Unmarshal(v, &a.Min) - } - if v, ok := aggs["min_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.MinAsString) - } - if v, ok := aggs["max"]; ok && v != nil { - json.Unmarshal(v, &a.Max) - } - if v, ok := aggs["max_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.MaxAsString) - } - if v, ok := aggs["avg"]; ok && v != nil { - json.Unmarshal(v, &a.Avg) - } - if v, ok := aggs["avg_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.AvgAsString) - } - if v, ok := aggs["sum"]; ok && v != nil { - json.Unmarshal(v, &a.Sum) - } - if v, ok := aggs["sum_as_string"]; ok && v != nil { - json.Unmarshal(v, &a.SumAsString) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Pipeline percentiles - -// AggregationPipelinePercentilesMetric is the value returned by a pipeline -// percentiles Metric aggregation -type AggregationPipelinePercentilesMetric struct { - Aggregations - - Values map[string]float64 // `json:"values"` - Meta map[string]interface{} // `json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationPipelinePercentilesMetric structure. -func (a *AggregationPipelinePercentilesMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["values"]; ok && v != nil { - json.Unmarshal(v, &a.Values) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} - -// -- Composite key items -- - -// AggregationBucketCompositeItems implements the response structure -// for a bucket aggregation of type composite. -type AggregationBucketCompositeItems struct { - Aggregations - - Buckets []*AggregationBucketCompositeItem //`json:"buckets"` - Meta map[string]interface{} // `json:"meta,omitempty"` - AfterKey map[string]interface{} // `json:"after_key,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItems structure. -func (a *AggregationBucketCompositeItems) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["buckets"]; ok && v != nil { - json.Unmarshal(v, &a.Buckets) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - if v, ok := aggs["after_key"]; ok && v != nil { - json.Unmarshal(v, &a.AfterKey) - } - a.Aggregations = aggs - return nil -} - -// AggregationBucketCompositeItem is a single bucket of an AggregationBucketCompositeItems structure. -type AggregationBucketCompositeItem struct { - Aggregations - - Key map[string]interface{} //`json:"key"` - DocCount int64 //`json:"doc_count"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationBucketCompositeItem structure. -func (a *AggregationBucketCompositeItem) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - dec := json.NewDecoder(bytes.NewReader(data)) - dec.UseNumber() - if err := dec.Decode(&aggs); err != nil { - return err - } - if v, ok := aggs["key"]; ok && v != nil { - json.Unmarshal(v, &a.Key) - } - if v, ok := aggs["doc_count"]; ok && v != nil { - json.Unmarshal(v, &a.DocCount) - } - a.Aggregations = aggs - return nil -} - -// AggregationScriptedMetric is the value return by a scripted metric aggregation. -// Value maybe one of map[string]interface{}/[]interface{}/string/bool/json.Number -type AggregationScriptedMetric struct { - Aggregations - - Value interface{} //`json:"value"` - Meta map[string]interface{} //`json:"meta,omitempty"` -} - -// UnmarshalJSON decodes JSON data and initializes an AggregationScriptedMetric structure. -func (a *AggregationScriptedMetric) UnmarshalJSON(data []byte) error { - var aggs map[string]json.RawMessage - if err := json.Unmarshal(data, &aggs); err != nil { - return err - } - if v, ok := aggs["value"]; ok && v != nil { - decoder := json.NewDecoder(bytes.NewReader(v)) - decoder.UseNumber() - decoder.Decode(&a.Value) - } - if v, ok := aggs["meta"]; ok && v != nil { - json.Unmarshal(v, &a.Meta) - } - a.Aggregations = aggs - return nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_adjacency_matrix.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_adjacency_matrix.go deleted file mode 100644 index 7dff0e0..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_adjacency_matrix.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// AdjacencyMatrixAggregation returning a form of adjacency matrix. -// The request provides a collection of named filter expressions, -// similar to the filters aggregation request. Each bucket in the -// response represents a non-empty cell in the matrix of intersecting filters. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-adjacency-matrix-aggregation.html -type AdjacencyMatrixAggregation struct { - filters map[string]Query - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -// NewAdjacencyMatrixAggregation initializes a new AdjacencyMatrixAggregation. -func NewAdjacencyMatrixAggregation() *AdjacencyMatrixAggregation { - return &AdjacencyMatrixAggregation{ - filters: make(map[string]Query), - subAggregations: make(map[string]Aggregation), - } -} - -// Filters adds the filter -func (a *AdjacencyMatrixAggregation) Filters(name string, filter Query) *AdjacencyMatrixAggregation { - a.filters[name] = filter - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *AdjacencyMatrixAggregation) SubAggregation(name string, subAggregation Aggregation) *AdjacencyMatrixAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *AdjacencyMatrixAggregation) Meta(metaData map[string]interface{}) *AdjacencyMatrixAggregation { - a.meta = metaData - return a -} - -// Source returns the a JSON-serializable interface. -func (a *AdjacencyMatrixAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "interactions" : { - // "adjacency_matrix" : { - // "filters" : { - // "grpA" : { "terms" : { "accounts" : ["hillary", "sidney"] }}, - // "grpB" : { "terms" : { "accounts" : ["donald", "mitt"] }}, - // "grpC" : { "terms" : { "accounts" : ["vladimir", "nigel"] }} - // } - // } - // } - // } - // This method returns only the (outer) { "adjacency_matrix" : {} } part. - - source := make(map[string]interface{}) - adjacencyMatrix := make(map[string]interface{}) - source["adjacency_matrix"] = adjacencyMatrix - - dict := make(map[string]interface{}) - for key, filter := range a.filters { - src, err := filter.Source() - if err != nil { - return nil, err - } - dict[key] = src - } - adjacencyMatrix["filters"] = dict - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_auto_date_histogram.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_auto_date_histogram.go deleted file mode 100644 index 281ee23..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_auto_date_histogram.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// AutoDateHistogramAggregation is a multi-bucket aggregation similar to the -// histogram except it can only be applied on date values, and the buckets num can bin pointed. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.3/search-aggregations-bucket-autodatehistogram-aggregation.html -type AutoDateHistogramAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - - buckets int - minDocCount *int64 - timeZone string - format string - minimumInterval string -} - -// NewAutoDateHistogramAggregation creates a new AutoDateHistogramAggregation. -func NewAutoDateHistogramAggregation() *AutoDateHistogramAggregation { - return &AutoDateHistogramAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -// Field on which the aggregation is processed. -func (a *AutoDateHistogramAggregation) Field(field string) *AutoDateHistogramAggregation { - a.field = field - return a -} - -// Script on which th -func (a *AutoDateHistogramAggregation) Script(script *Script) *AutoDateHistogramAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *AutoDateHistogramAggregation) Missing(missing interface{}) *AutoDateHistogramAggregation { - a.missing = missing - return a -} - -// SubAggregation sub aggregation -func (a *AutoDateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *AutoDateHistogramAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *AutoDateHistogramAggregation) Meta(metaData map[string]interface{}) *AutoDateHistogramAggregation { - a.meta = metaData - return a -} - -// Buckets buckets num by which the aggregation gets processed. -func (a *AutoDateHistogramAggregation) Buckets(buckets int) *AutoDateHistogramAggregation { - a.buckets = buckets - return a -} - -// MinDocCount sets the minimum document count per bucket. -// Buckets with less documents than this min value will not be returned. -func (a *AutoDateHistogramAggregation) MinDocCount(minDocCount int64) *AutoDateHistogramAggregation { - a.minDocCount = &minDocCount - return a -} - -// TimeZone sets the timezone in which to translate dates before computing buckets. -func (a *AutoDateHistogramAggregation) TimeZone(timeZone string) *AutoDateHistogramAggregation { - a.timeZone = timeZone - return a -} - -// Format sets the format to use for dates. -func (a *AutoDateHistogramAggregation) Format(format string) *AutoDateHistogramAggregation { - a.format = format - return a -} - -// MinimumInterval accepted units for minimum_interval are: year/month/day/hour/minute/second -func (a *AutoDateHistogramAggregation) MinimumInterval(interval string) *AutoDateHistogramAggregation { - a.minimumInterval = interval - return a -} - -// Source source for AutoDateHistogramAggregation -func (a *AutoDateHistogramAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "articles_over_time" : { - // "auto_date_histogram" : { - // "field" : "date", - // "buckets" : 10 - // } - // } - // } - // } - // - // This method returns only the { "auto_date_histogram" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["auto_date_histogram"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - if a.buckets > 0 { - opts["buckets"] = a.buckets - } - - if a.minDocCount != nil { - opts["min_doc_count"] = *a.minDocCount - } - if a.timeZone != "" { - opts["time_zone"] = a.timeZone - } - if a.format != "" { - opts["format"] = a.format - } - if a.minimumInterval != "" { - opts["minimum_interval"] = a.minimumInterval - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_children.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_children.go deleted file mode 100644 index 4078b1d..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_children.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ChildrenAggregation is a special single bucket aggregation that enables -// aggregating from buckets on parent document types to buckets on child documents. -// It is available from 1.4.0.Beta1 upwards. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-children-aggregation.html -type ChildrenAggregation struct { - typ string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewChildrenAggregation() *ChildrenAggregation { - return &ChildrenAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *ChildrenAggregation) Type(typ string) *ChildrenAggregation { - a.typ = typ - return a -} - -func (a *ChildrenAggregation) SubAggregation(name string, subAggregation Aggregation) *ChildrenAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *ChildrenAggregation) Meta(metaData map[string]interface{}) *ChildrenAggregation { - a.meta = metaData - return a -} - -func (a *ChildrenAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "to-answers" : { - // "children": { - // "type" : "answer" - // } - // } - // } - // } - // This method returns only the { "type" : ... } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["children"] = opts - opts["type"] = a.typ - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_composite.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_composite.go deleted file mode 100644 index 859c9e4..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_composite.go +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CompositeAggregation is a multi-bucket values source based aggregation -// that can be used to calculate unique composite values from source documents. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-composite-aggregation.html -// for details. -type CompositeAggregation struct { - after map[string]interface{} - size *int - sources []CompositeAggregationValuesSource - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -// NewCompositeAggregation creates a new CompositeAggregation. -func NewCompositeAggregation() *CompositeAggregation { - return &CompositeAggregation{ - sources: make([]CompositeAggregationValuesSource, 0), - subAggregations: make(map[string]Aggregation), - } -} - -// Size represents the number of composite buckets to return. -// Defaults to 10 as of Elasticsearch 6.1. -func (a *CompositeAggregation) Size(size int) *CompositeAggregation { - a.size = &size - return a -} - -// AggregateAfter sets the values that indicate which composite bucket this -// request should "aggregate after". -func (a *CompositeAggregation) AggregateAfter(after map[string]interface{}) *CompositeAggregation { - a.after = after - return a -} - -// Sources specifies the list of CompositeAggregationValuesSource instances to -// use in the aggregation. -func (a *CompositeAggregation) Sources(sources ...CompositeAggregationValuesSource) *CompositeAggregation { - a.sources = append(a.sources, sources...) - return a -} - -// SubAggregations of this aggregation. -func (a *CompositeAggregation) SubAggregation(name string, subAggregation Aggregation) *CompositeAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *CompositeAggregation) Meta(metaData map[string]interface{}) *CompositeAggregation { - a.meta = metaData - return a -} - -// Source returns the serializable JSON for this aggregation. -func (a *CompositeAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "my_composite_agg" : { - // "composite" : { - // "sources": [ - // {"my_term": { "terms": { "field": "product" }}}, - // {"my_histo": { "histogram": { "field": "price", "interval": 5 }}}, - // {"my_date": { "date_histogram": { "field": "timestamp", "interval": "1d" }}}, - // ], - // "size" : 10, - // "after" : ["a", 2, "c"] - // } - // } - // } - // } - // - // This method returns only the { "histogram" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["composite"] = opts - - sources := make([]interface{}, len(a.sources)) - for i, s := range a.sources { - src, err := s.Source() - if err != nil { - return nil, err - } - sources[i] = src - } - opts["sources"] = sources - - if a.size != nil { - opts["size"] = *a.size - } - - if a.after != nil { - opts["after"] = a.after - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} - -// -- Generic interface for CompositeAggregationValues -- - -// CompositeAggregationValuesSource specifies the interface that -// all implementations for CompositeAggregation's Sources method -// need to implement. -// -// The different implementations are described in -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-composite-aggregation.html#_values_source_2. -type CompositeAggregationValuesSource interface { - Source() (interface{}, error) -} - -// -- CompositeAggregationTermsValuesSource -- - -// CompositeAggregationTermsValuesSource is a source for the CompositeAggregation that handles terms -// it works very similar to a terms aggregation with slightly different syntax -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-composite-aggregation.html#_terms -// for details. -type CompositeAggregationTermsValuesSource struct { - name string - field string - script *Script - valueType string - missing interface{} - missingBucket *bool - order string -} - -// NewCompositeAggregationTermsValuesSource creates and initializes -// a new CompositeAggregationTermsValuesSource. -func NewCompositeAggregationTermsValuesSource(name string) *CompositeAggregationTermsValuesSource { - return &CompositeAggregationTermsValuesSource{ - name: name, - } -} - -// Field to use for this source. -func (a *CompositeAggregationTermsValuesSource) Field(field string) *CompositeAggregationTermsValuesSource { - a.field = field - return a -} - -// Script to use for this source. -func (a *CompositeAggregationTermsValuesSource) Script(script *Script) *CompositeAggregationTermsValuesSource { - a.script = script - return a -} - -// ValueType specifies the type of values produced by this source, -// e.g. "string" or "date". -func (a *CompositeAggregationTermsValuesSource) ValueType(valueType string) *CompositeAggregationTermsValuesSource { - a.valueType = valueType - return a -} - -// Order specifies the order in the values produced by this source. -// It can be either "asc" or "desc". -func (a *CompositeAggregationTermsValuesSource) Order(order string) *CompositeAggregationTermsValuesSource { - a.order = order - return a -} - -// Asc ensures the order of the values produced is ascending. -func (a *CompositeAggregationTermsValuesSource) Asc() *CompositeAggregationTermsValuesSource { - a.order = "asc" - return a -} - -// Desc ensures the order of the values produced is descending. -func (a *CompositeAggregationTermsValuesSource) Desc() *CompositeAggregationTermsValuesSource { - a.order = "desc" - return a -} - -// Missing specifies the value to use when the source finds a missing -// value in a document. -// -// Deprecated: Use MissingBucket instead. -func (a *CompositeAggregationTermsValuesSource) Missing(missing interface{}) *CompositeAggregationTermsValuesSource { - a.missing = missing - return a -} - -// MissingBucket, if true, will create an explicit null bucket which represents -// documents with missing values. -func (a *CompositeAggregationTermsValuesSource) MissingBucket(missingBucket bool) *CompositeAggregationTermsValuesSource { - a.missingBucket = &missingBucket - return a -} - -// Source returns the serializable JSON for this values source. -func (a *CompositeAggregationTermsValuesSource) Source() (interface{}, error) { - source := make(map[string]interface{}) - name := make(map[string]interface{}) - source[a.name] = name - values := make(map[string]interface{}) - name["terms"] = values - - // field - if a.field != "" { - values["field"] = a.field - } - - // script - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - values["script"] = src - } - - // missing - if a.missing != nil { - values["missing"] = a.missing - } - - // missing_bucket - if a.missingBucket != nil { - values["missing_bucket"] = *a.missingBucket - } - - // value_type - if a.valueType != "" { - values["value_type"] = a.valueType - } - - // order - if a.order != "" { - values["order"] = a.order - } - - return source, nil - -} - -// -- CompositeAggregationHistogramValuesSource -- - -// CompositeAggregationHistogramValuesSource is a source for the CompositeAggregation that handles histograms -// it works very similar to a terms histogram with slightly different syntax -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-composite-aggregation.html#_histogram -// for details. -type CompositeAggregationHistogramValuesSource struct { - name string - field string - script *Script - valueType string - missing interface{} - missingBucket *bool - order string - interval float64 -} - -// NewCompositeAggregationHistogramValuesSource creates and initializes -// a new CompositeAggregationHistogramValuesSource. -func NewCompositeAggregationHistogramValuesSource(name string, interval float64) *CompositeAggregationHistogramValuesSource { - return &CompositeAggregationHistogramValuesSource{ - name: name, - interval: interval, - } -} - -// Field to use for this source. -func (a *CompositeAggregationHistogramValuesSource) Field(field string) *CompositeAggregationHistogramValuesSource { - a.field = field - return a -} - -// Script to use for this source. -func (a *CompositeAggregationHistogramValuesSource) Script(script *Script) *CompositeAggregationHistogramValuesSource { - a.script = script - return a -} - -// ValueType specifies the type of values produced by this source, -// e.g. "string" or "date". -func (a *CompositeAggregationHistogramValuesSource) ValueType(valueType string) *CompositeAggregationHistogramValuesSource { - a.valueType = valueType - return a -} - -// Missing specifies the value to use when the source finds a missing -// value in a document. -// -// Deprecated: Use MissingBucket instead. -func (a *CompositeAggregationHistogramValuesSource) Missing(missing interface{}) *CompositeAggregationHistogramValuesSource { - a.missing = missing - return a -} - -// MissingBucket, if true, will create an explicit null bucket which represents -// documents with missing values. -func (a *CompositeAggregationHistogramValuesSource) MissingBucket(missingBucket bool) *CompositeAggregationHistogramValuesSource { - a.missingBucket = &missingBucket - return a -} - -// Order specifies the order in the values produced by this source. -// It can be either "asc" or "desc". -func (a *CompositeAggregationHistogramValuesSource) Order(order string) *CompositeAggregationHistogramValuesSource { - a.order = order - return a -} - -// Asc ensures the order of the values produced is ascending. -func (a *CompositeAggregationHistogramValuesSource) Asc() *CompositeAggregationHistogramValuesSource { - a.order = "asc" - return a -} - -// Desc ensures the order of the values produced is descending. -func (a *CompositeAggregationHistogramValuesSource) Desc() *CompositeAggregationHistogramValuesSource { - a.order = "desc" - return a -} - -// Interval specifies the interval to use. -func (a *CompositeAggregationHistogramValuesSource) Interval(interval float64) *CompositeAggregationHistogramValuesSource { - a.interval = interval - return a -} - -// Source returns the serializable JSON for this values source. -func (a *CompositeAggregationHistogramValuesSource) Source() (interface{}, error) { - source := make(map[string]interface{}) - name := make(map[string]interface{}) - source[a.name] = name - values := make(map[string]interface{}) - name["histogram"] = values - - // field - if a.field != "" { - values["field"] = a.field - } - - // script - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - values["script"] = src - } - - // missing - if a.missing != nil { - values["missing"] = a.missing - } - - // missing_bucket - if a.missingBucket != nil { - values["missing_bucket"] = *a.missingBucket - } - - // value_type - if a.valueType != "" { - values["value_type"] = a.valueType - } - - // order - if a.order != "" { - values["order"] = a.order - } - - // Histogram-related properties - values["interval"] = a.interval - - return source, nil - -} - -// -- CompositeAggregationDateHistogramValuesSource -- - -// CompositeAggregationDateHistogramValuesSource is a source for the CompositeAggregation that handles date histograms -// it works very similar to a date histogram aggregation with slightly different syntax -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.4/search-aggregations-bucket-composite-aggregation.html#_date_histogram -// for details. -type CompositeAggregationDateHistogramValuesSource struct { - name string - field string - script *Script - valueType string - missing interface{} - missingBucket *bool - order string - interval interface{} - fixedInterval interface{} - calendarInterval interface{} - format string - timeZone string -} - -// NewCompositeAggregationDateHistogramValuesSource creates and initializes -// a new CompositeAggregationDateHistogramValuesSource. -func NewCompositeAggregationDateHistogramValuesSource(name string) *CompositeAggregationDateHistogramValuesSource { - return &CompositeAggregationDateHistogramValuesSource{ - name: name, - } -} - -// Field to use for this source. -func (a *CompositeAggregationDateHistogramValuesSource) Field(field string) *CompositeAggregationDateHistogramValuesSource { - a.field = field - return a -} - -// Script to use for this source. -func (a *CompositeAggregationDateHistogramValuesSource) Script(script *Script) *CompositeAggregationDateHistogramValuesSource { - a.script = script - return a -} - -// ValueType specifies the type of values produced by this source, -// e.g. "string" or "date". -func (a *CompositeAggregationDateHistogramValuesSource) ValueType(valueType string) *CompositeAggregationDateHistogramValuesSource { - a.valueType = valueType - return a -} - -// Missing specifies the value to use when the source finds a missing -// value in a document. -// -// Deprecated: Use MissingBucket instead. -func (a *CompositeAggregationDateHistogramValuesSource) Missing(missing interface{}) *CompositeAggregationDateHistogramValuesSource { - a.missing = missing - return a -} - -// MissingBucket, if true, will create an explicit null bucket which represents -// documents with missing values. -func (a *CompositeAggregationDateHistogramValuesSource) MissingBucket(missingBucket bool) *CompositeAggregationDateHistogramValuesSource { - a.missingBucket = &missingBucket - return a -} - -// Order specifies the order in the values produced by this source. -// It can be either "asc" or "desc". -func (a *CompositeAggregationDateHistogramValuesSource) Order(order string) *CompositeAggregationDateHistogramValuesSource { - a.order = order - return a -} - -// Asc ensures the order of the values produced is ascending. -func (a *CompositeAggregationDateHistogramValuesSource) Asc() *CompositeAggregationDateHistogramValuesSource { - a.order = "asc" - return a -} - -// Desc ensures the order of the values produced is descending. -func (a *CompositeAggregationDateHistogramValuesSource) Desc() *CompositeAggregationDateHistogramValuesSource { - a.order = "desc" - return a -} - -// Interval to use for the date histogram, e.g. "1d" or a numeric value like "60". -// -// Deprecated: Use FixedInterval or CalendarInterval instead. -func (a *CompositeAggregationDateHistogramValuesSource) Interval(interval interface{}) *CompositeAggregationDateHistogramValuesSource { - a.interval = interval - return a -} - -// FixedInterval to use for the date histogram, e.g. "1d" or a numeric value like "60". -func (a *CompositeAggregationDateHistogramValuesSource) FixedInterval(fixedInterval interface{}) *CompositeAggregationDateHistogramValuesSource { - a.fixedInterval = fixedInterval - return a -} - -// CalendarInterval to use for the date histogram, e.g. "1d" or a numeric value like "60". -func (a *CompositeAggregationDateHistogramValuesSource) CalendarInterval(calendarInterval interface{}) *CompositeAggregationDateHistogramValuesSource { - a.calendarInterval = calendarInterval - return a -} - -// Format to use for the date histogram, e.g. "strict_date_optional_time" -func (a *CompositeAggregationDateHistogramValuesSource) Format(format string) *CompositeAggregationDateHistogramValuesSource { - a.format = format - return a -} - -// TimeZone to use for the dates. -func (a *CompositeAggregationDateHistogramValuesSource) TimeZone(timeZone string) *CompositeAggregationDateHistogramValuesSource { - a.timeZone = timeZone - return a -} - -// Source returns the serializable JSON for this values source. -func (a *CompositeAggregationDateHistogramValuesSource) Source() (interface{}, error) { - source := make(map[string]interface{}) - name := make(map[string]interface{}) - source[a.name] = name - values := make(map[string]interface{}) - name["date_histogram"] = values - - // field - if a.field != "" { - values["field"] = a.field - } - - // script - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - values["script"] = src - } - - // missing - if a.missing != nil { - values["missing"] = a.missing - } - - // missing_bucket - if a.missingBucket != nil { - values["missing_bucket"] = *a.missingBucket - } - - // value_type - if a.valueType != "" { - values["value_type"] = a.valueType - } - - // order - if a.order != "" { - values["order"] = a.order - } - - if a.format != "" { - values["format"] = a.format - } - - // DateHistogram-related properties - if v := a.interval; v != nil { - values["interval"] = v - } - if v := a.fixedInterval; v != nil { - values["fixed_interval"] = v - } - if v := a.calendarInterval; v != nil { - values["calendar_interval"] = v - } - - // timeZone - if a.timeZone != "" { - values["time_zone"] = a.timeZone - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_count_thresholds.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_count_thresholds.go deleted file mode 100644 index 53efdaf..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_count_thresholds.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// BucketCountThresholds is used in e.g. terms and significant text aggregations. -type BucketCountThresholds struct { - MinDocCount *int64 - ShardMinDocCount *int64 - RequiredSize *int - ShardSize *int -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_date_histogram.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_date_histogram.go deleted file mode 100644 index 4ab567c..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_date_histogram.go +++ /dev/null @@ -1,341 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// DateHistogramAggregation is a multi-bucket aggregation similar to the -// histogram except it can only be applied on date values. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-datehistogram-aggregation.html -type DateHistogramAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - - interval string - fixedInterval string - calendarInterval string - order string - orderAsc bool - minDocCount *int64 - extendedBoundsMin interface{} - extendedBoundsMax interface{} - timeZone string - format string - offset string - keyed *bool -} - -// NewDateHistogramAggregation creates a new DateHistogramAggregation. -func NewDateHistogramAggregation() *DateHistogramAggregation { - return &DateHistogramAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -// Field on which the aggregation is processed. -func (a *DateHistogramAggregation) Field(field string) *DateHistogramAggregation { - a.field = field - return a -} - -func (a *DateHistogramAggregation) Script(script *Script) *DateHistogramAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *DateHistogramAggregation) Missing(missing interface{}) *DateHistogramAggregation { - a.missing = missing - return a -} - -func (a *DateHistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *DateHistogramAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *DateHistogramAggregation) Meta(metaData map[string]interface{}) *DateHistogramAggregation { - a.meta = metaData - return a -} - -// Interval by which the aggregation gets processed. This field -// will be replaced by the two FixedInterval and CalendarInterval -// fields (see below). -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.4/search-aggregations-bucket-datehistogram-aggregation.html -// -// Deprecated: This field will be removed in the future. -func (a *DateHistogramAggregation) Interval(interval string) *DateHistogramAggregation { - a.interval = interval - return a -} - -// FixedInterval by which the aggregation gets processed. -// -// Allowed values are: "year", "1y", "quarter", "1q", "month", "1M", -// "week", "1w", "day", "1d", "hour", "1h", "minute", "1m", "second", -// or "1s". It also supports time settings like "1.5h". -// -// These units are not calendar-aware and are simply multiples of -// fixed, SI units. This is mutually exclusive with CalendarInterval. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.4/search-aggregations-bucket-datehistogram-aggregation.html -func (a *DateHistogramAggregation) FixedInterval(fixedInterval string) *DateHistogramAggregation { - a.fixedInterval = fixedInterval - return a -} - -// CalendarInterval by which the aggregation gets processed. -// -// Allowed values are: "year" ("1y", "y"), "quarter" ("1q", "q"), -// "month" ("1M", "M"), "week" ("1w", "w"), "day" ("d", "1d") -// -// These units are calendar-aware, meaning they respect leap -// additions, variable days per month etc. This is mutually -// exclusive with FixedInterval. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.4/search-aggregations-bucket-datehistogram-aggregation.html -func (a *DateHistogramAggregation) CalendarInterval(calendarInterval string) *DateHistogramAggregation { - a.calendarInterval = calendarInterval - return a -} - -// Order specifies the sort order. Valid values for order are: -// "_key", "_count", a sub-aggregation name, or a sub-aggregation name -// with a metric. -func (a *DateHistogramAggregation) Order(order string, asc bool) *DateHistogramAggregation { - a.order = order - a.orderAsc = asc - return a -} - -func (a *DateHistogramAggregation) OrderByCount(asc bool) *DateHistogramAggregation { - // "order" : { "_count" : "asc" } - a.order = "_count" - a.orderAsc = asc - return a -} - -func (a *DateHistogramAggregation) OrderByCountAsc() *DateHistogramAggregation { - return a.OrderByCount(true) -} - -func (a *DateHistogramAggregation) OrderByCountDesc() *DateHistogramAggregation { - return a.OrderByCount(false) -} - -func (a *DateHistogramAggregation) OrderByKey(asc bool) *DateHistogramAggregation { - // "order" : { "_key" : "asc" } - a.order = "_key" - a.orderAsc = asc - return a -} - -func (a *DateHistogramAggregation) OrderByKeyAsc() *DateHistogramAggregation { - return a.OrderByKey(true) -} - -func (a *DateHistogramAggregation) OrderByKeyDesc() *DateHistogramAggregation { - return a.OrderByKey(false) -} - -// OrderByAggregation creates a bucket ordering strategy which sorts buckets -// based on a single-valued calc get. -func (a *DateHistogramAggregation) OrderByAggregation(aggName string, asc bool) *DateHistogramAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "avg_height" : "desc" } - // }, - // "aggs" : { - // "avg_height" : { "avg" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName - a.orderAsc = asc - return a -} - -// OrderByAggregationAndMetric creates a bucket ordering strategy which -// sorts buckets based on a multi-valued calc get. -func (a *DateHistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *DateHistogramAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "height_stats.avg" : "desc" } - // }, - // "aggs" : { - // "height_stats" : { "stats" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName + "." + metric - a.orderAsc = asc - return a -} - -// MinDocCount sets the minimum document count per bucket. -// Buckets with less documents than this min value will not be returned. -func (a *DateHistogramAggregation) MinDocCount(minDocCount int64) *DateHistogramAggregation { - a.minDocCount = &minDocCount - return a -} - -// TimeZone sets the timezone in which to translate dates before computing buckets. -func (a *DateHistogramAggregation) TimeZone(timeZone string) *DateHistogramAggregation { - a.timeZone = timeZone - return a -} - -// Format sets the format to use for dates. -func (a *DateHistogramAggregation) Format(format string) *DateHistogramAggregation { - a.format = format - return a -} - -// Offset sets the offset of time intervals in the histogram, e.g. "+6h". -func (a *DateHistogramAggregation) Offset(offset string) *DateHistogramAggregation { - a.offset = offset - return a -} - -// ExtendedBounds accepts int, int64, string, or time.Time values. -// In case the lower value in the histogram would be greater than min or the -// upper value would be less than max, empty buckets will be generated. -func (a *DateHistogramAggregation) ExtendedBounds(min, max interface{}) *DateHistogramAggregation { - a.extendedBoundsMin = min - a.extendedBoundsMax = max - return a -} - -// ExtendedBoundsMin accepts int, int64, string, or time.Time values. -func (a *DateHistogramAggregation) ExtendedBoundsMin(min interface{}) *DateHistogramAggregation { - a.extendedBoundsMin = min - return a -} - -// ExtendedBoundsMax accepts int, int64, string, or time.Time values. -func (a *DateHistogramAggregation) ExtendedBoundsMax(max interface{}) *DateHistogramAggregation { - a.extendedBoundsMax = max - return a -} - -// Keyed specifies whether to return the results with a keyed response (or not). -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-datehistogram-aggregation.html#_keyed_response_3. -func (a *DateHistogramAggregation) Keyed(keyed bool) *DateHistogramAggregation { - a.keyed = &keyed - return a -} - -func (a *DateHistogramAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "articles_over_time" : { - // "date_histogram" : { - // "field" : "date", - // "fixed_interval" : "month" - // } - // } - // } - // } - // - // This method returns only the { "date_histogram" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["date_histogram"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - if s := a.interval; s != "" { - opts["interval"] = s - } - if s := a.fixedInterval; s != "" { - opts["fixed_interval"] = s - } - if s := a.calendarInterval; s != "" { - opts["calendar_interval"] = s - } - - if a.minDocCount != nil { - opts["min_doc_count"] = *a.minDocCount - } - if a.order != "" { - o := make(map[string]interface{}) - if a.orderAsc { - o[a.order] = "asc" - } else { - o[a.order] = "desc" - } - opts["order"] = o - } - if a.timeZone != "" { - opts["time_zone"] = a.timeZone - } - if a.offset != "" { - opts["offset"] = a.offset - } - if a.format != "" { - opts["format"] = a.format - } - if a.extendedBoundsMin != nil || a.extendedBoundsMax != nil { - bounds := make(map[string]interface{}) - if a.extendedBoundsMin != nil { - bounds["min"] = a.extendedBoundsMin - } - if a.extendedBoundsMax != nil { - bounds["max"] = a.extendedBoundsMax - } - opts["extended_bounds"] = bounds - } - if a.keyed != nil { - opts["keyed"] = *a.keyed - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_date_range.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_date_range.go deleted file mode 100644 index a4becaa..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_date_range.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "time" -) - -// DateRangeAggregation is a range aggregation that is dedicated for -// date values. The main difference between this aggregation and the -// normal range aggregation is that the from and to values can be expressed -// in Date Math expressions, and it is also possible to specify a -// date format by which the from and to response fields will be returned. -// Note that this aggregration includes the from value and excludes the to -// value for each range. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-daterange-aggregation.html -type DateRangeAggregation struct { - field string - script *Script - subAggregations map[string]Aggregation - meta map[string]interface{} - keyed *bool - unmapped *bool - timeZone string - format string - entries []DateRangeAggregationEntry -} - -type DateRangeAggregationEntry struct { - Key string - From interface{} - To interface{} -} - -func NewDateRangeAggregation() *DateRangeAggregation { - return &DateRangeAggregation{ - subAggregations: make(map[string]Aggregation), - entries: make([]DateRangeAggregationEntry, 0), - } -} - -func (a *DateRangeAggregation) Field(field string) *DateRangeAggregation { - a.field = field - return a -} - -func (a *DateRangeAggregation) Script(script *Script) *DateRangeAggregation { - a.script = script - return a -} - -func (a *DateRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *DateRangeAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *DateRangeAggregation) Meta(metaData map[string]interface{}) *DateRangeAggregation { - a.meta = metaData - return a -} - -func (a *DateRangeAggregation) Keyed(keyed bool) *DateRangeAggregation { - a.keyed = &keyed - return a -} - -func (a *DateRangeAggregation) Unmapped(unmapped bool) *DateRangeAggregation { - a.unmapped = &unmapped - return a -} - -func (a *DateRangeAggregation) TimeZone(timeZone string) *DateRangeAggregation { - a.timeZone = timeZone - return a -} - -func (a *DateRangeAggregation) Format(format string) *DateRangeAggregation { - a.format = format - return a -} - -func (a *DateRangeAggregation) AddRange(from, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *DateRangeAggregation) AddRangeWithKey(key string, from, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *DateRangeAggregation) AddUnboundedTo(from interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) - return a -} - -func (a *DateRangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) - return a -} - -func (a *DateRangeAggregation) AddUnboundedFrom(to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) - return a -} - -func (a *DateRangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) - return a -} - -func (a *DateRangeAggregation) Lt(to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: nil, To: to}) - return a -} - -func (a *DateRangeAggregation) LtWithKey(key string, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: nil, To: to}) - return a -} - -func (a *DateRangeAggregation) Between(from, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *DateRangeAggregation) BetweenWithKey(key string, from, to interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *DateRangeAggregation) Gt(from interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{From: from, To: nil}) - return a -} - -func (a *DateRangeAggregation) GtWithKey(key string, from interface{}) *DateRangeAggregation { - a.entries = append(a.entries, DateRangeAggregationEntry{Key: key, From: from, To: nil}) - return a -} - -func (a *DateRangeAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "range" : { - // "date_range": { - // "field": "date", - // "format": "MM-yyy", - // "ranges": [ - // { "to": "now-10M/M" }, - // { "from": "now-10M/M" } - // ] - // } - // } - // } - // } - // } - // - // This method returns only the { "date_range" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["date_range"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - - if a.keyed != nil { - opts["keyed"] = *a.keyed - } - if a.unmapped != nil { - opts["unmapped"] = *a.unmapped - } - if a.timeZone != "" { - opts["time_zone"] = a.timeZone - } - if a.format != "" { - opts["format"] = a.format - } - - var ranges []interface{} - for _, ent := range a.entries { - r := make(map[string]interface{}) - if ent.Key != "" { - r["key"] = ent.Key - } - if ent.From != nil { - switch from := ent.From.(type) { - case int, int16, int32, int64, float32, float64: - r["from"] = from - case *int, *int16, *int32, *int64, *float32, *float64: - r["from"] = from - case time.Time: - r["from"] = from.Format(time.RFC3339) - case *time.Time: - r["from"] = from.Format(time.RFC3339) - case string: - r["from"] = from - case *string: - r["from"] = from - } - } - if ent.To != nil { - switch to := ent.To.(type) { - case int, int16, int32, int64, float32, float64: - r["to"] = to - case *int, *int16, *int32, *int64, *float32, *float64: - r["to"] = to - case time.Time: - r["to"] = to.Format(time.RFC3339) - case *time.Time: - r["to"] = to.Format(time.RFC3339) - case string: - r["to"] = to - case *string: - r["to"] = to - } - } - ranges = append(ranges, r) - } - opts["ranges"] = ranges - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_diversified_sampler.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_diversified_sampler.go deleted file mode 100644 index 6d3a448..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_diversified_sampler.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// DiversifiedSamplerAggregation Like the ‘sampler` aggregation this is a filtering aggregation used to limit any -// sub aggregations’ processing to a sample of the top-scoring documents. The diversified_sampler aggregation adds -// the ability to limit the number of matches that share a common value such as an "author". -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-diversified-sampler-aggregation.html -type DiversifiedSamplerAggregation struct { - subAggregations map[string]Aggregation - meta map[string]interface{} - field string - script *Script - shardSize int - maxDocsPerValue int - executionHint string -} - -func NewDiversifiedSamplerAggregation() *DiversifiedSamplerAggregation { - return &DiversifiedSamplerAggregation{ - shardSize: -1, - maxDocsPerValue: -1, - subAggregations: make(map[string]Aggregation), - } -} - -func (a *DiversifiedSamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *DiversifiedSamplerAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *DiversifiedSamplerAggregation) Meta(metaData map[string]interface{}) *DiversifiedSamplerAggregation { - a.meta = metaData - return a -} - -// Field on which the aggregation is processed. -func (a *DiversifiedSamplerAggregation) Field(field string) *DiversifiedSamplerAggregation { - a.field = field - return a -} - -func (a *DiversifiedSamplerAggregation) Script(script *Script) *DiversifiedSamplerAggregation { - a.script = script - return a -} - -// ShardSize sets the maximum number of docs returned from each shard. -func (a *DiversifiedSamplerAggregation) ShardSize(shardSize int) *DiversifiedSamplerAggregation { - a.shardSize = shardSize - return a -} - -func (a *DiversifiedSamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *DiversifiedSamplerAggregation { - a.maxDocsPerValue = maxDocsPerValue - return a -} - -func (a *DiversifiedSamplerAggregation) ExecutionHint(hint string) *DiversifiedSamplerAggregation { - a.executionHint = hint - return a -} - -func (a *DiversifiedSamplerAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs": { - // "my_unbiased_sample": { - // "diversified_sampler": { - // "shard_size": 200, - // "field" : "author" - // } - // } - // } - // } - // - // This method returns only the { "diversified_sampler" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["diversified_sampler"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.shardSize >= 0 { - opts["shard_size"] = a.shardSize - } - if a.maxDocsPerValue >= 0 { - opts["max_docs_per_value"] = a.maxDocsPerValue - } - if a.executionHint != "" { - opts["execution_hint"] = a.executionHint - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_filter.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_filter.go deleted file mode 100644 index c002c81..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_filter.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// FilterAggregation defines a single bucket of all the documents -// in the current document set context that match a specified filter. -// Often this will be used to narrow down the current aggregation context -// to a specific set of documents. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-filter-aggregation.html -type FilterAggregation struct { - filter Query - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewFilterAggregation() *FilterAggregation { - return &FilterAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *FilterAggregation) SubAggregation(name string, subAggregation Aggregation) *FilterAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *FilterAggregation) Meta(metaData map[string]interface{}) *FilterAggregation { - a.meta = metaData - return a -} - -func (a *FilterAggregation) Filter(filter Query) *FilterAggregation { - a.filter = filter - return a -} - -func (a *FilterAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "in_stock_products" : { - // "filter" : { "range" : { "stock" : { "gt" : 0 } } } - // } - // } - // } - // This method returns only the { "filter" : {} } part. - - src, err := a.filter.Source() - if err != nil { - return nil, err - } - source := make(map[string]interface{}) - source["filter"] = src - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_filters.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_filters.go deleted file mode 100644 index d2ea5ea..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_filters.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// FiltersAggregation defines a multi bucket aggregations where each bucket -// is associated with a filter. Each bucket will collect all documents that -// match its associated filter. -// -// Notice that the caller has to decide whether to add filters by name -// (using FilterWithName) or unnamed filters (using Filter or Filters). One cannot -// use both named and unnamed filters. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-filters-aggregation.html -type FiltersAggregation struct { - unnamedFilters []Query - namedFilters map[string]Query - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -// NewFiltersAggregation initializes a new FiltersAggregation. -func NewFiltersAggregation() *FiltersAggregation { - return &FiltersAggregation{ - unnamedFilters: make([]Query, 0), - namedFilters: make(map[string]Query), - subAggregations: make(map[string]Aggregation), - } -} - -// Filter adds an unnamed filter. Notice that you can -// either use named or unnamed filters, but not both. -func (a *FiltersAggregation) Filter(filter Query) *FiltersAggregation { - a.unnamedFilters = append(a.unnamedFilters, filter) - return a -} - -// Filters adds one or more unnamed filters. Notice that you can -// either use named or unnamed filters, but not both. -func (a *FiltersAggregation) Filters(filters ...Query) *FiltersAggregation { - if len(filters) > 0 { - a.unnamedFilters = append(a.unnamedFilters, filters...) - } - return a -} - -// FilterWithName adds a filter with a specific name. Notice that you can -// either use named or unnamed filters, but not both. -func (a *FiltersAggregation) FilterWithName(name string, filter Query) *FiltersAggregation { - a.namedFilters[name] = filter - return a -} - -// SubAggregation adds a sub-aggregation to this aggregation. -func (a *FiltersAggregation) SubAggregation(name string, subAggregation Aggregation) *FiltersAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *FiltersAggregation) Meta(metaData map[string]interface{}) *FiltersAggregation { - a.meta = metaData - return a -} - -// Source returns the a JSON-serializable interface. -// If the aggregation is invalid, an error is returned. This may e.g. happen -// if you mixed named and unnamed filters. -func (a *FiltersAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "messages" : { - // "filters" : { - // "filters" : { - // "errors" : { "term" : { "body" : "error" }}, - // "warnings" : { "term" : { "body" : "warning" }} - // } - // } - // } - // } - // } - // This method returns only the (outer) { "filters" : {} } part. - - source := make(map[string]interface{}) - filters := make(map[string]interface{}) - source["filters"] = filters - - if len(a.unnamedFilters) > 0 && len(a.namedFilters) > 0 { - return nil, errors.New("elastic: use either named or unnamed filters with FiltersAggregation but not both") - } - - if len(a.unnamedFilters) > 0 { - arr := make([]interface{}, len(a.unnamedFilters)) - for i, filter := range a.unnamedFilters { - src, err := filter.Source() - if err != nil { - return nil, err - } - arr[i] = src - } - filters["filters"] = arr - } else { - dict := make(map[string]interface{}) - for key, filter := range a.namedFilters { - src, err := filter.Source() - if err != nil { - return nil, err - } - dict[key] = src - } - filters["filters"] = dict - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_geo_distance.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_geo_distance.go deleted file mode 100644 index ee8dd0c..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_geo_distance.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoDistanceAggregation is a multi-bucket aggregation that works on geo_point fields -// and conceptually works very similar to the range aggregation. -// The user can define a point of origin and a set of distance range buckets. -// The aggregation evaluate the distance of each document value from -// the origin point and determines the buckets it belongs to based on -// the ranges (a document belongs to a bucket if the distance between the -// document and the origin falls within the distance range of the bucket). -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-geodistance-aggregation.html -type GeoDistanceAggregation struct { - field string - unit string - distanceType string - point string - ranges []geoDistAggRange - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -type geoDistAggRange struct { - Key string - From interface{} - To interface{} -} - -func NewGeoDistanceAggregation() *GeoDistanceAggregation { - return &GeoDistanceAggregation{ - subAggregations: make(map[string]Aggregation), - ranges: make([]geoDistAggRange, 0), - } -} - -func (a *GeoDistanceAggregation) Field(field string) *GeoDistanceAggregation { - a.field = field - return a -} - -func (a *GeoDistanceAggregation) Unit(unit string) *GeoDistanceAggregation { - a.unit = unit - return a -} - -func (a *GeoDistanceAggregation) DistanceType(distanceType string) *GeoDistanceAggregation { - a.distanceType = distanceType - return a -} - -func (a *GeoDistanceAggregation) Point(latLon string) *GeoDistanceAggregation { - a.point = latLon - return a -} - -func (a *GeoDistanceAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoDistanceAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *GeoDistanceAggregation) Meta(metaData map[string]interface{}) *GeoDistanceAggregation { - a.meta = metaData - return a -} -func (a *GeoDistanceAggregation) AddRange(from, to interface{}) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) - return a -} - -func (a *GeoDistanceAggregation) AddRangeWithKey(key string, from, to interface{}) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) - return a -} - -func (a *GeoDistanceAggregation) AddUnboundedTo(from float64) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{From: from, To: nil}) - return a -} - -func (a *GeoDistanceAggregation) AddUnboundedToWithKey(key string, from float64) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: nil}) - return a -} - -func (a *GeoDistanceAggregation) AddUnboundedFrom(to float64) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{From: nil, To: to}) - return a -} - -func (a *GeoDistanceAggregation) AddUnboundedFromWithKey(key string, to float64) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: nil, To: to}) - return a -} - -func (a *GeoDistanceAggregation) Between(from, to interface{}) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{From: from, To: to}) - return a -} - -func (a *GeoDistanceAggregation) BetweenWithKey(key string, from, to interface{}) *GeoDistanceAggregation { - a.ranges = append(a.ranges, geoDistAggRange{Key: key, From: from, To: to}) - return a -} - -func (a *GeoDistanceAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "rings_around_amsterdam" : { - // "geo_distance" : { - // "field" : "location", - // "origin" : "52.3760, 4.894", - // "ranges" : [ - // { "to" : 100 }, - // { "from" : 100, "to" : 300 }, - // { "from" : 300 } - // ] - // } - // } - // } - // } - // - // This method returns only the { "range" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["geo_distance"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.unit != "" { - opts["unit"] = a.unit - } - if a.distanceType != "" { - opts["distance_type"] = a.distanceType - } - if a.point != "" { - opts["origin"] = a.point - } - - var ranges []interface{} - for _, ent := range a.ranges { - r := make(map[string]interface{}) - if ent.Key != "" { - r["key"] = ent.Key - } - if ent.From != nil { - switch from := ent.From.(type) { - case int, int16, int32, int64, float32, float64: - r["from"] = from - case *int, *int16, *int32, *int64, *float32, *float64: - r["from"] = from - case string: - r["from"] = from - case *string: - r["from"] = from - } - } - if ent.To != nil { - switch to := ent.To.(type) { - case int, int16, int32, int64, float32, float64: - r["to"] = to - case *int, *int16, *int32, *int64, *float32, *float64: - r["to"] = to - case string: - r["to"] = to - case *string: - r["to"] = to - } - } - ranges = append(ranges, r) - } - opts["ranges"] = ranges - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_geohash_grid.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_geohash_grid.go deleted file mode 100644 index 1a56b57..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_geohash_grid.go +++ /dev/null @@ -1,104 +0,0 @@ -package elastic - -type GeoHashGridAggregation struct { - field string - precision interface{} - size int - shardSize int - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewGeoHashGridAggregation() *GeoHashGridAggregation { - return &GeoHashGridAggregation{ - subAggregations: make(map[string]Aggregation), - size: -1, - shardSize: -1, - } -} - -func (a *GeoHashGridAggregation) Field(field string) *GeoHashGridAggregation { - a.field = field - return a -} - -// Precision accepts the level as int value between 1 and 12 or Distance Units like "2km", "5mi" as described at -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/common-options.html#distance-units and -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-geohashgrid-aggregation.html -func (a *GeoHashGridAggregation) Precision(precision interface{}) *GeoHashGridAggregation { - a.precision = precision - return a -} - -func (a *GeoHashGridAggregation) Size(size int) *GeoHashGridAggregation { - a.size = size - return a -} - -func (a *GeoHashGridAggregation) ShardSize(shardSize int) *GeoHashGridAggregation { - a.shardSize = shardSize - return a -} - -func (a *GeoHashGridAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoHashGridAggregation { - a.subAggregations[name] = subAggregation - return a -} - -func (a *GeoHashGridAggregation) Meta(metaData map[string]interface{}) *GeoHashGridAggregation { - a.meta = metaData - return a -} - -func (a *GeoHashGridAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs": { - // "new_york": { - // "geohash_grid": { - // "field": "location", - // "precision": 5 - // } - // } - // } - // } - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["geohash_grid"] = opts - - if a.field != "" { - opts["field"] = a.field - } - - if a.precision != nil { - opts["precision"] = a.precision - } - - if a.size != -1 { - opts["size"] = a.size - } - - if a.shardSize != -1 { - opts["shard_size"] = a.shardSize - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_global.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_global.go deleted file mode 100644 index 2e07f5d..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_global.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GlobalAggregation defines a single bucket of all the documents within -// the search execution context. This context is defined by the indices -// and the document types you’re searching on, but is not influenced -// by the search query itself. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-global-aggregation.html -type GlobalAggregation struct { - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewGlobalAggregation() *GlobalAggregation { - return &GlobalAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *GlobalAggregation) SubAggregation(name string, subAggregation Aggregation) *GlobalAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *GlobalAggregation) Meta(metaData map[string]interface{}) *GlobalAggregation { - a.meta = metaData - return a -} - -func (a *GlobalAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "all_products" : { - // "global" : {}, - // "aggs" : { - // "avg_price" : { "avg" : { "field" : "price" } } - // } - // } - // } - // } - // This method returns only the { "global" : {} } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["global"] = opts - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_histogram.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_histogram.go deleted file mode 100644 index 69021e6..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_histogram.go +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// HistogramAggregation is a multi-bucket values source based aggregation -// that can be applied on numeric values extracted from the documents. -// It dynamically builds fixed size (a.k.a. interval) buckets over the -// values. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-histogram-aggregation.html -type HistogramAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - - interval float64 - order string - orderAsc bool - minDocCount *int64 - minBounds *float64 - maxBounds *float64 - offset *float64 -} - -func NewHistogramAggregation() *HistogramAggregation { - return &HistogramAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *HistogramAggregation) Field(field string) *HistogramAggregation { - a.field = field - return a -} - -func (a *HistogramAggregation) Script(script *Script) *HistogramAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *HistogramAggregation) Missing(missing interface{}) *HistogramAggregation { - a.missing = missing - return a -} - -func (a *HistogramAggregation) SubAggregation(name string, subAggregation Aggregation) *HistogramAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *HistogramAggregation) Meta(metaData map[string]interface{}) *HistogramAggregation { - a.meta = metaData - return a -} - -// Interval for this builder, must be greater than 0. -func (a *HistogramAggregation) Interval(interval float64) *HistogramAggregation { - a.interval = interval - return a -} - -// Order specifies the sort order. Valid values for order are: -// "_key", "_count", a sub-aggregation name, or a sub-aggregation name -// with a metric. -func (a *HistogramAggregation) Order(order string, asc bool) *HistogramAggregation { - a.order = order - a.orderAsc = asc - return a -} - -func (a *HistogramAggregation) OrderByCount(asc bool) *HistogramAggregation { - // "order" : { "_count" : "asc" } - a.order = "_count" - a.orderAsc = asc - return a -} - -func (a *HistogramAggregation) OrderByCountAsc() *HistogramAggregation { - return a.OrderByCount(true) -} - -func (a *HistogramAggregation) OrderByCountDesc() *HistogramAggregation { - return a.OrderByCount(false) -} - -func (a *HistogramAggregation) OrderByKey(asc bool) *HistogramAggregation { - // "order" : { "_key" : "asc" } - a.order = "_key" - a.orderAsc = asc - return a -} - -func (a *HistogramAggregation) OrderByKeyAsc() *HistogramAggregation { - return a.OrderByKey(true) -} - -func (a *HistogramAggregation) OrderByKeyDesc() *HistogramAggregation { - return a.OrderByKey(false) -} - -// OrderByAggregation creates a bucket ordering strategy which sorts buckets -// based on a single-valued calc get. -func (a *HistogramAggregation) OrderByAggregation(aggName string, asc bool) *HistogramAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "avg_height" : "desc" } - // }, - // "aggs" : { - // "avg_height" : { "avg" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName - a.orderAsc = asc - return a -} - -// OrderByAggregationAndMetric creates a bucket ordering strategy which -// sorts buckets based on a multi-valued calc get. -func (a *HistogramAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *HistogramAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "height_stats.avg" : "desc" } - // }, - // "aggs" : { - // "height_stats" : { "stats" : { "field" : "height" } } - // } - // } - // } - // } - a.order = aggName + "." + metric - a.orderAsc = asc - return a -} - -func (a *HistogramAggregation) MinDocCount(minDocCount int64) *HistogramAggregation { - a.minDocCount = &minDocCount - return a -} - -func (a *HistogramAggregation) ExtendedBounds(min, max float64) *HistogramAggregation { - a.minBounds = &min - a.maxBounds = &max - return a -} - -func (a *HistogramAggregation) ExtendedBoundsMin(min float64) *HistogramAggregation { - a.minBounds = &min - return a -} - -func (a *HistogramAggregation) MinBounds(min float64) *HistogramAggregation { - a.minBounds = &min - return a -} - -func (a *HistogramAggregation) ExtendedBoundsMax(max float64) *HistogramAggregation { - a.maxBounds = &max - return a -} - -func (a *HistogramAggregation) MaxBounds(max float64) *HistogramAggregation { - a.maxBounds = &max - return a -} - -// Offset into the histogram -func (a *HistogramAggregation) Offset(offset float64) *HistogramAggregation { - a.offset = &offset - return a -} - -func (a *HistogramAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "prices" : { - // "histogram" : { - // "field" : "price", - // "interval" : 50 - // } - // } - // } - // } - // - // This method returns only the { "histogram" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["histogram"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - opts["interval"] = a.interval - if a.order != "" { - o := make(map[string]interface{}) - if a.orderAsc { - o[a.order] = "asc" - } else { - o[a.order] = "desc" - } - opts["order"] = o - } - if a.offset != nil { - opts["offset"] = *a.offset - } - if a.minDocCount != nil { - opts["min_doc_count"] = *a.minDocCount - } - if a.minBounds != nil || a.maxBounds != nil { - bounds := make(map[string]interface{}) - if a.minBounds != nil { - bounds["min"] = a.minBounds - } - if a.maxBounds != nil { - bounds["max"] = a.maxBounds - } - opts["extended_bounds"] = bounds - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_ip_range.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_ip_range.go deleted file mode 100644 index 76ebe0d..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_ip_range.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// IPRangeAggregation is a range aggregation that is dedicated for -// IP addresses. -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-iprange-aggregation.html -type IPRangeAggregation struct { - field string - subAggregations map[string]Aggregation - meta map[string]interface{} - keyed *bool - entries []IPRangeAggregationEntry -} - -type IPRangeAggregationEntry struct { - Key string - Mask string - From string - To string -} - -func NewIPRangeAggregation() *IPRangeAggregation { - return &IPRangeAggregation{ - subAggregations: make(map[string]Aggregation), - entries: make([]IPRangeAggregationEntry, 0), - } -} - -func (a *IPRangeAggregation) Field(field string) *IPRangeAggregation { - a.field = field - return a -} - -func (a *IPRangeAggregation) SubAggregation(name string, subAggregation Aggregation) *IPRangeAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *IPRangeAggregation) Meta(metaData map[string]interface{}) *IPRangeAggregation { - a.meta = metaData - return a -} - -func (a *IPRangeAggregation) Keyed(keyed bool) *IPRangeAggregation { - a.keyed = &keyed - return a -} - -func (a *IPRangeAggregation) AddMaskRange(mask string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{Mask: mask}) - return a -} - -func (a *IPRangeAggregation) AddMaskRangeWithKey(key, mask string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, Mask: mask}) - return a -} - -func (a *IPRangeAggregation) AddRange(from, to string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *IPRangeAggregation) AddRangeWithKey(key, from, to string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *IPRangeAggregation) AddUnboundedTo(from string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: ""}) - return a -} - -func (a *IPRangeAggregation) AddUnboundedToWithKey(key, from string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: ""}) - return a -} - -func (a *IPRangeAggregation) AddUnboundedFrom(to string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{From: "", To: to}) - return a -} - -func (a *IPRangeAggregation) AddUnboundedFromWithKey(key, to string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: "", To: to}) - return a -} - -func (a *IPRangeAggregation) Lt(to string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{From: "", To: to}) - return a -} - -func (a *IPRangeAggregation) LtWithKey(key, to string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: "", To: to}) - return a -} - -func (a *IPRangeAggregation) Between(from, to string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *IPRangeAggregation) BetweenWithKey(key, from, to string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *IPRangeAggregation) Gt(from string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{From: from, To: ""}) - return a -} - -func (a *IPRangeAggregation) GtWithKey(key, from string) *IPRangeAggregation { - a.entries = append(a.entries, IPRangeAggregationEntry{Key: key, From: from, To: ""}) - return a -} - -func (a *IPRangeAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "range" : { - // "ip_range": { - // "field": "ip", - // "ranges": [ - // { "to": "10.0.0.5" }, - // { "from": "10.0.0.5" } - // ] - // } - // } - // } - // } - // } - // - // This method returns only the { "ip_range" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["ip_range"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - - if a.keyed != nil { - opts["keyed"] = *a.keyed - } - - var ranges []interface{} - for _, ent := range a.entries { - r := make(map[string]interface{}) - if ent.Key != "" { - r["key"] = ent.Key - } - if ent.Mask != "" { - r["mask"] = ent.Mask - } else { - if ent.From != "" { - r["from"] = ent.From - } - if ent.To != "" { - r["to"] = ent.To - } - } - ranges = append(ranges, r) - } - opts["ranges"] = ranges - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_missing.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_missing.go deleted file mode 100644 index 68eba98..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_missing.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MissingAggregation is a field data based single bucket aggregation, -// that creates a bucket of all documents in the current document set context -// that are missing a field value (effectively, missing a field or having -// the configured NULL value set). This aggregator will often be used in -// conjunction with other field data bucket aggregators (such as ranges) -// to return information for all the documents that could not be placed -// in any of the other buckets due to missing field data values. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-missing-aggregation.html -type MissingAggregation struct { - field string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewMissingAggregation() *MissingAggregation { - return &MissingAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *MissingAggregation) Field(field string) *MissingAggregation { - a.field = field - return a -} - -func (a *MissingAggregation) SubAggregation(name string, subAggregation Aggregation) *MissingAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MissingAggregation) Meta(metaData map[string]interface{}) *MissingAggregation { - a.meta = metaData - return a -} - -func (a *MissingAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "products_without_a_price" : { - // "missing" : { "field" : "price" } - // } - // } - // } - // This method returns only the { "missing" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["missing"] = opts - - if a.field != "" { - opts["field"] = a.field - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_nested.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_nested.go deleted file mode 100644 index 5481f92..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_nested.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// NestedAggregation is a special single bucket aggregation that enables -// aggregating nested documents. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-nested-aggregation.html -type NestedAggregation struct { - path string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewNestedAggregation() *NestedAggregation { - return &NestedAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *NestedAggregation) SubAggregation(name string, subAggregation Aggregation) *NestedAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *NestedAggregation) Meta(metaData map[string]interface{}) *NestedAggregation { - a.meta = metaData - return a -} - -func (a *NestedAggregation) Path(path string) *NestedAggregation { - a.path = path - return a -} - -func (a *NestedAggregation) Source() (interface{}, error) { - // Example: - // { - // "query" : { - // "match" : { "name" : "led tv" } - // } - // "aggs" : { - // "resellers" : { - // "nested" : { - // "path" : "resellers" - // }, - // "aggs" : { - // "min_price" : { "min" : { "field" : "resellers.price" } } - // } - // } - // } - // } - // This method returns only the { "nested" : {} } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["nested"] = opts - - opts["path"] = a.path - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_range.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_range.go deleted file mode 100644 index 7e786d9..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_range.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "time" -) - -// RangeAggregation is a multi-bucket value source based aggregation that -// enables the user to define a set of ranges - each representing a bucket. -// During the aggregation process, the values extracted from each document -// will be checked against each bucket range and "bucket" the -// relevant/matching document. Note that this aggregration includes the -// from value and excludes the to value for each range. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-range-aggregation.html -type RangeAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - keyed *bool - unmapped *bool - entries []rangeAggregationEntry -} - -type rangeAggregationEntry struct { - Key string - From interface{} - To interface{} -} - -func NewRangeAggregation() *RangeAggregation { - return &RangeAggregation{ - subAggregations: make(map[string]Aggregation), - entries: make([]rangeAggregationEntry, 0), - } -} - -func (a *RangeAggregation) Field(field string) *RangeAggregation { - a.field = field - return a -} - -func (a *RangeAggregation) Script(script *Script) *RangeAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *RangeAggregation) Missing(missing interface{}) *RangeAggregation { - a.missing = missing - return a -} - -func (a *RangeAggregation) SubAggregation(name string, subAggregation Aggregation) *RangeAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *RangeAggregation) Meta(metaData map[string]interface{}) *RangeAggregation { - a.meta = metaData - return a -} - -func (a *RangeAggregation) Keyed(keyed bool) *RangeAggregation { - a.keyed = &keyed - return a -} - -func (a *RangeAggregation) Unmapped(unmapped bool) *RangeAggregation { - a.unmapped = &unmapped - return a -} - -func (a *RangeAggregation) AddRange(from, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *RangeAggregation) AddRangeWithKey(key string, from, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *RangeAggregation) AddUnboundedTo(from interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) - return a -} - -func (a *RangeAggregation) AddUnboundedToWithKey(key string, from interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) - return a -} - -func (a *RangeAggregation) AddUnboundedFrom(to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) - return a -} - -func (a *RangeAggregation) AddUnboundedFromWithKey(key string, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) - return a -} - -func (a *RangeAggregation) Lt(to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: nil, To: to}) - return a -} - -func (a *RangeAggregation) LtWithKey(key string, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: nil, To: to}) - return a -} - -func (a *RangeAggregation) Between(from, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: from, To: to}) - return a -} - -func (a *RangeAggregation) BetweenWithKey(key string, from, to interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: to}) - return a -} - -func (a *RangeAggregation) Gt(from interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{From: from, To: nil}) - return a -} - -func (a *RangeAggregation) GtWithKey(key string, from interface{}) *RangeAggregation { - a.entries = append(a.entries, rangeAggregationEntry{Key: key, From: from, To: nil}) - return a -} - -func (a *RangeAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "price_ranges" : { - // "range" : { - // "field" : "price", - // "ranges" : [ - // { "to" : 50 }, - // { "from" : 50, "to" : 100 }, - // { "from" : 100 } - // ] - // } - // } - // } - // } - // - // This method returns only the { "range" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["range"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - if a.keyed != nil { - opts["keyed"] = *a.keyed - } - if a.unmapped != nil { - opts["unmapped"] = *a.unmapped - } - - var ranges []interface{} - for _, ent := range a.entries { - r := make(map[string]interface{}) - if ent.Key != "" { - r["key"] = ent.Key - } - if ent.From != nil { - switch from := ent.From.(type) { - case int, int16, int32, int64, float32, float64: - r["from"] = from - case *int, *int16, *int32, *int64, *float32, *float64: - r["from"] = from - case time.Time: - r["from"] = from.Format(time.RFC3339) - case *time.Time: - r["from"] = from.Format(time.RFC3339) - case string: - r["from"] = from - case *string: - r["from"] = from - } - } - if ent.To != nil { - switch to := ent.To.(type) { - case int, int16, int32, int64, float32, float64: - r["to"] = to - case *int, *int16, *int32, *int64, *float32, *float64: - r["to"] = to - case time.Time: - r["to"] = to.Format(time.RFC3339) - case *time.Time: - r["to"] = to.Format(time.RFC3339) - case string: - r["to"] = to - case *string: - r["to"] = to - } - } - ranges = append(ranges, r) - } - opts["ranges"] = ranges - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_reverse_nested.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_reverse_nested.go deleted file mode 100644 index 8f454f8..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_reverse_nested.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ReverseNestedAggregation defines a special single bucket aggregation -// that enables aggregating on parent docs from nested documents. -// Effectively this aggregation can break out of the nested block -// structure and link to other nested structures or the root document, -// which allows nesting other aggregations that aren’t part of -// the nested object in a nested aggregation. -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-reverse-nested-aggregation.html -type ReverseNestedAggregation struct { - path string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -// NewReverseNestedAggregation initializes a new ReverseNestedAggregation -// bucket aggregation. -func NewReverseNestedAggregation() *ReverseNestedAggregation { - return &ReverseNestedAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -// Path set the path to use for this nested aggregation. The path must match -// the path to a nested object in the mappings. If it is not specified -// then this aggregation will go back to the root document. -func (a *ReverseNestedAggregation) Path(path string) *ReverseNestedAggregation { - a.path = path - return a -} - -func (a *ReverseNestedAggregation) SubAggregation(name string, subAggregation Aggregation) *ReverseNestedAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *ReverseNestedAggregation) Meta(metaData map[string]interface{}) *ReverseNestedAggregation { - a.meta = metaData - return a -} - -func (a *ReverseNestedAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "reverse_nested" : { - // "path": "..." - // } - // } - // } - // This method returns only the { "reverse_nested" : {} } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["reverse_nested"] = opts - - if a.path != "" { - opts["path"] = a.path - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_sampler.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_sampler.go deleted file mode 100644 index d64e6b3..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_sampler.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SamplerAggregation is a filtering aggregation used to limit any -// sub aggregations' processing to a sample of the top-scoring documents. -// Optionally, diversity settings can be used to limit the number of matches -// that share a common value such as an "author". -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-sampler-aggregation.html -type SamplerAggregation struct { - subAggregations map[string]Aggregation - meta map[string]interface{} - - shardSize int - maxDocsPerValue int - executionHint string -} - -func NewSamplerAggregation() *SamplerAggregation { - return &SamplerAggregation{ - shardSize: -1, - maxDocsPerValue: -1, - subAggregations: make(map[string]Aggregation), - } -} - -func (a *SamplerAggregation) SubAggregation(name string, subAggregation Aggregation) *SamplerAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SamplerAggregation) Meta(metaData map[string]interface{}) *SamplerAggregation { - a.meta = metaData - return a -} - -// ShardSize sets the maximum number of docs returned from each shard. -func (a *SamplerAggregation) ShardSize(shardSize int) *SamplerAggregation { - a.shardSize = shardSize - return a -} - -func (a *SamplerAggregation) MaxDocsPerValue(maxDocsPerValue int) *SamplerAggregation { - a.maxDocsPerValue = maxDocsPerValue - return a -} - -func (a *SamplerAggregation) ExecutionHint(hint string) *SamplerAggregation { - a.executionHint = hint - return a -} - -func (a *SamplerAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "sample" : { - // "sampler" : { - // "shard_size" : 200 - // }, - // "aggs": { - // "keywords": { - // "significant_terms": { - // "field": "text" - // } - // } - // } - // } - // } - // } - // - // This method returns only the { "sampler" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["sampler"] = opts - - if a.shardSize >= 0 { - opts["shard_size"] = a.shardSize - } - if a.maxDocsPerValue >= 0 { - opts["max_docs_per_value"] = a.maxDocsPerValue - } - if a.executionHint != "" { - opts["execution_hint"] = a.executionHint - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_significant_terms.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_significant_terms.go deleted file mode 100644 index e30cb22..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_significant_terms.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SignificantTermsAggregation is an aggregation that returns interesting -// or unusual occurrences of terms in a set. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html -type SignificantTermsAggregation struct { - field string - subAggregations map[string]Aggregation - meta map[string]interface{} - - minDocCount *int - shardMinDocCount *int - requiredSize *int - shardSize *int - filter Query - executionHint string - significanceHeuristic SignificanceHeuristic - includeExclude *TermsAggregationIncludeExclude -} - -func NewSignificantTermsAggregation() *SignificantTermsAggregation { - return &SignificantTermsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *SignificantTermsAggregation) Field(field string) *SignificantTermsAggregation { - a.field = field - return a -} - -func (a *SignificantTermsAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTermsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SignificantTermsAggregation) Meta(metaData map[string]interface{}) *SignificantTermsAggregation { - a.meta = metaData - return a -} - -func (a *SignificantTermsAggregation) MinDocCount(minDocCount int) *SignificantTermsAggregation { - a.minDocCount = &minDocCount - return a -} - -func (a *SignificantTermsAggregation) ShardMinDocCount(shardMinDocCount int) *SignificantTermsAggregation { - a.shardMinDocCount = &shardMinDocCount - return a -} - -func (a *SignificantTermsAggregation) RequiredSize(requiredSize int) *SignificantTermsAggregation { - a.requiredSize = &requiredSize - return a -} - -func (a *SignificantTermsAggregation) ShardSize(shardSize int) *SignificantTermsAggregation { - a.shardSize = &shardSize - return a -} - -func (a *SignificantTermsAggregation) BackgroundFilter(filter Query) *SignificantTermsAggregation { - a.filter = filter - return a -} - -func (a *SignificantTermsAggregation) ExecutionHint(hint string) *SignificantTermsAggregation { - a.executionHint = hint - return a -} - -func (a *SignificantTermsAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTermsAggregation { - a.significanceHeuristic = heuristic - return a -} - -func (a *SignificantTermsAggregation) Include(regexp string) *SignificantTermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Include = regexp - return a -} - -func (a *SignificantTermsAggregation) IncludeValues(values ...interface{}) *SignificantTermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...) - return a -} - -func (a *SignificantTermsAggregation) Exclude(regexp string) *SignificantTermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Exclude = regexp - return a -} - -func (a *SignificantTermsAggregation) ExcludeValues(values ...interface{}) *SignificantTermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...) - return a -} - -func (a *SignificantTermsAggregation) Partition(p int) *SignificantTermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Partition = p - return a -} - -func (a *SignificantTermsAggregation) NumPartitions(n int) *SignificantTermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.NumPartitions = n - return a -} - -func (a *SignificantTermsAggregation) IncludeExclude(includeExclude *TermsAggregationIncludeExclude) *SignificantTermsAggregation { - a.includeExclude = includeExclude - return a -} - -func (a *SignificantTermsAggregation) Source() (interface{}, error) { - // Example: - // { - // "query" : { - // "terms" : {"force" : [ "British Transport Police" ]} - // }, - // "aggregations" : { - // "significantCrimeTypes" : { - // "significant_terms" : { "field" : "crime_type" } - // } - // } - // } - // - // This method returns only the - // { "significant_terms" : { "field" : "crime_type" } - // part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["significant_terms"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.requiredSize != nil { - opts["size"] = *a.requiredSize // not a typo! - } - if a.shardSize != nil { - opts["shard_size"] = *a.shardSize - } - if a.minDocCount != nil { - opts["min_doc_count"] = *a.minDocCount - } - if a.shardMinDocCount != nil { - opts["shard_min_doc_count"] = *a.shardMinDocCount - } - if a.executionHint != "" { - opts["execution_hint"] = a.executionHint - } - if a.filter != nil { - src, err := a.filter.Source() - if err != nil { - return nil, err - } - opts["background_filter"] = src - } - if a.significanceHeuristic != nil { - name := a.significanceHeuristic.Name() - src, err := a.significanceHeuristic.Source() - if err != nil { - return nil, err - } - opts[name] = src - } - - // Include/Exclude - if ie := a.includeExclude; ie != nil { - if err := ie.MergeInto(opts); err != nil { - return nil, err - } - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} - -// -- Significance heuristics -- - -type SignificanceHeuristic interface { - Name() string - Source() (interface{}, error) -} - -// -- Chi Square -- - -// ChiSquareSignificanceHeuristic implements Chi square as described -// in "Information Retrieval", Manning et al., Chapter 13.5.2. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html#_chi_square -// for details. -type ChiSquareSignificanceHeuristic struct { - backgroundIsSuperset *bool - includeNegatives *bool -} - -// NewChiSquareSignificanceHeuristic initializes a new ChiSquareSignificanceHeuristic. -func NewChiSquareSignificanceHeuristic() *ChiSquareSignificanceHeuristic { - return &ChiSquareSignificanceHeuristic{} -} - -// Name returns the name of the heuristic in the REST interface. -func (sh *ChiSquareSignificanceHeuristic) Name() string { - return "chi_square" -} - -// BackgroundIsSuperset indicates whether you defined a custom background -// filter that represents a difference set of documents that you want to -// compare to. -func (sh *ChiSquareSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *ChiSquareSignificanceHeuristic { - sh.backgroundIsSuperset = &backgroundIsSuperset - return sh -} - -// IncludeNegatives indicates whether to filter out the terms that appear -// much less in the subset than in the background without the subset. -func (sh *ChiSquareSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *ChiSquareSignificanceHeuristic { - sh.includeNegatives = &includeNegatives - return sh -} - -// Source returns the parameters that need to be added to the REST parameters. -func (sh *ChiSquareSignificanceHeuristic) Source() (interface{}, error) { - source := make(map[string]interface{}) - if sh.backgroundIsSuperset != nil { - source["background_is_superset"] = *sh.backgroundIsSuperset - } - if sh.includeNegatives != nil { - source["include_negatives"] = *sh.includeNegatives - } - return source, nil -} - -// -- GND -- - -// GNDSignificanceHeuristic implements the "Google Normalized Distance" -// as described in "The Google Similarity Distance", Cilibrasi and Vitanyi, -// 2007. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html#_google_normalized_distance -// for details. -type GNDSignificanceHeuristic struct { - backgroundIsSuperset *bool -} - -// NewGNDSignificanceHeuristic implements a new GNDSignificanceHeuristic. -func NewGNDSignificanceHeuristic() *GNDSignificanceHeuristic { - return &GNDSignificanceHeuristic{} -} - -// Name returns the name of the heuristic in the REST interface. -func (sh *GNDSignificanceHeuristic) Name() string { - return "gnd" -} - -// BackgroundIsSuperset indicates whether you defined a custom background -// filter that represents a difference set of documents that you want to -// compare to. -func (sh *GNDSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *GNDSignificanceHeuristic { - sh.backgroundIsSuperset = &backgroundIsSuperset - return sh -} - -// Source returns the parameters that need to be added to the REST parameters. -func (sh *GNDSignificanceHeuristic) Source() (interface{}, error) { - source := make(map[string]interface{}) - if sh.backgroundIsSuperset != nil { - source["background_is_superset"] = *sh.backgroundIsSuperset - } - return source, nil -} - -// -- JLH Score -- - -// JLHScoreSignificanceHeuristic implements the JLH score as described in -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html#_jlh_score. -type JLHScoreSignificanceHeuristic struct{} - -// NewJLHScoreSignificanceHeuristic initializes a new JLHScoreSignificanceHeuristic. -func NewJLHScoreSignificanceHeuristic() *JLHScoreSignificanceHeuristic { - return &JLHScoreSignificanceHeuristic{} -} - -// Name returns the name of the heuristic in the REST interface. -func (sh *JLHScoreSignificanceHeuristic) Name() string { - return "jlh" -} - -// Source returns the parameters that need to be added to the REST parameters. -func (sh *JLHScoreSignificanceHeuristic) Source() (interface{}, error) { - source := make(map[string]interface{}) - return source, nil -} - -// -- Mutual Information -- - -// MutualInformationSignificanceHeuristic implements Mutual information -// as described in "Information Retrieval", Manning et al., Chapter 13.5.1. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html#_mutual_information -// for details. -type MutualInformationSignificanceHeuristic struct { - backgroundIsSuperset *bool - includeNegatives *bool -} - -// NewMutualInformationSignificanceHeuristic initializes a new instance of -// MutualInformationSignificanceHeuristic. -func NewMutualInformationSignificanceHeuristic() *MutualInformationSignificanceHeuristic { - return &MutualInformationSignificanceHeuristic{} -} - -// Name returns the name of the heuristic in the REST interface. -func (sh *MutualInformationSignificanceHeuristic) Name() string { - return "mutual_information" -} - -// BackgroundIsSuperset indicates whether you defined a custom background -// filter that represents a difference set of documents that you want to -// compare to. -func (sh *MutualInformationSignificanceHeuristic) BackgroundIsSuperset(backgroundIsSuperset bool) *MutualInformationSignificanceHeuristic { - sh.backgroundIsSuperset = &backgroundIsSuperset - return sh -} - -// IncludeNegatives indicates whether to filter out the terms that appear -// much less in the subset than in the background without the subset. -func (sh *MutualInformationSignificanceHeuristic) IncludeNegatives(includeNegatives bool) *MutualInformationSignificanceHeuristic { - sh.includeNegatives = &includeNegatives - return sh -} - -// Source returns the parameters that need to be added to the REST parameters. -func (sh *MutualInformationSignificanceHeuristic) Source() (interface{}, error) { - source := make(map[string]interface{}) - if sh.backgroundIsSuperset != nil { - source["background_is_superset"] = *sh.backgroundIsSuperset - } - if sh.includeNegatives != nil { - source["include_negatives"] = *sh.includeNegatives - } - return source, nil -} - -// -- Percentage Score -- - -// PercentageScoreSignificanceHeuristic implements the algorithm described -// in https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html#_percentage. -type PercentageScoreSignificanceHeuristic struct{} - -// NewPercentageScoreSignificanceHeuristic initializes a new instance of -// PercentageScoreSignificanceHeuristic. -func NewPercentageScoreSignificanceHeuristic() *PercentageScoreSignificanceHeuristic { - return &PercentageScoreSignificanceHeuristic{} -} - -// Name returns the name of the heuristic in the REST interface. -func (sh *PercentageScoreSignificanceHeuristic) Name() string { - return "percentage" -} - -// Source returns the parameters that need to be added to the REST parameters. -func (sh *PercentageScoreSignificanceHeuristic) Source() (interface{}, error) { - source := make(map[string]interface{}) - return source, nil -} - -// -- Script -- - -// ScriptSignificanceHeuristic implements a scripted significance heuristic. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html#_scripted -// for details. -type ScriptSignificanceHeuristic struct { - script *Script -} - -// NewScriptSignificanceHeuristic initializes a new instance of -// ScriptSignificanceHeuristic. -func NewScriptSignificanceHeuristic() *ScriptSignificanceHeuristic { - return &ScriptSignificanceHeuristic{} -} - -// Name returns the name of the heuristic in the REST interface. -func (sh *ScriptSignificanceHeuristic) Name() string { - return "script_heuristic" -} - -// Script specifies the script to use to get custom scores. The following -// parameters are available in the script: `_subset_freq`, `_superset_freq`, -// `_subset_size`, and `_superset_size`. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significantterms-aggregation.html#_scripted -// for details. -func (sh *ScriptSignificanceHeuristic) Script(script *Script) *ScriptSignificanceHeuristic { - sh.script = script - return sh -} - -// Source returns the parameters that need to be added to the REST parameters. -func (sh *ScriptSignificanceHeuristic) Source() (interface{}, error) { - source := make(map[string]interface{}) - if sh.script != nil { - src, err := sh.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_significant_text.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_significant_text.go deleted file mode 100644 index 2602df9..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_significant_text.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SignificantTextAggregation returns interesting or unusual occurrences -// of free-text terms in a set. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-significanttext-aggregation.html -type SignificantTextAggregation struct { - field string - subAggregations map[string]Aggregation - meta map[string]interface{} - - sourceFieldNames []string - filterDuplicateText *bool - includeExclude *TermsAggregationIncludeExclude - filter Query - bucketCountThresholds *BucketCountThresholds - significanceHeuristic SignificanceHeuristic -} - -func NewSignificantTextAggregation() *SignificantTextAggregation { - return &SignificantTextAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *SignificantTextAggregation) Field(field string) *SignificantTextAggregation { - a.field = field - return a -} - -func (a *SignificantTextAggregation) SubAggregation(name string, subAggregation Aggregation) *SignificantTextAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SignificantTextAggregation) Meta(metaData map[string]interface{}) *SignificantTextAggregation { - a.meta = metaData - return a -} - -func (a *SignificantTextAggregation) SourceFieldNames(names ...string) *SignificantTextAggregation { - a.sourceFieldNames = names - return a -} - -func (a *SignificantTextAggregation) FilterDuplicateText(filter bool) *SignificantTextAggregation { - a.filterDuplicateText = &filter - return a -} - -func (a *SignificantTextAggregation) MinDocCount(minDocCount int64) *SignificantTextAggregation { - if a.bucketCountThresholds == nil { - a.bucketCountThresholds = &BucketCountThresholds{} - } - a.bucketCountThresholds.MinDocCount = &minDocCount - return a -} - -func (a *SignificantTextAggregation) ShardMinDocCount(shardMinDocCount int64) *SignificantTextAggregation { - if a.bucketCountThresholds == nil { - a.bucketCountThresholds = &BucketCountThresholds{} - } - a.bucketCountThresholds.ShardMinDocCount = &shardMinDocCount - return a -} - -func (a *SignificantTextAggregation) Size(size int) *SignificantTextAggregation { - if a.bucketCountThresholds == nil { - a.bucketCountThresholds = &BucketCountThresholds{} - } - a.bucketCountThresholds.RequiredSize = &size - return a -} - -func (a *SignificantTextAggregation) ShardSize(shardSize int) *SignificantTextAggregation { - if a.bucketCountThresholds == nil { - a.bucketCountThresholds = &BucketCountThresholds{} - } - a.bucketCountThresholds.ShardSize = &shardSize - return a -} - -func (a *SignificantTextAggregation) BackgroundFilter(filter Query) *SignificantTextAggregation { - a.filter = filter - return a -} - -func (a *SignificantTextAggregation) SignificanceHeuristic(heuristic SignificanceHeuristic) *SignificantTextAggregation { - a.significanceHeuristic = heuristic - return a -} - -func (a *SignificantTextAggregation) Include(regexp string) *SignificantTextAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Include = regexp - return a -} - -func (a *SignificantTextAggregation) IncludeValues(values ...interface{}) *SignificantTextAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...) - return a -} - -func (a *SignificantTextAggregation) Exclude(regexp string) *SignificantTextAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Exclude = regexp - return a -} - -func (a *SignificantTextAggregation) ExcludeValues(values ...interface{}) *SignificantTextAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...) - return a -} - -func (a *SignificantTextAggregation) Partition(p int) *SignificantTextAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Partition = p - return a -} - -func (a *SignificantTextAggregation) NumPartitions(n int) *SignificantTextAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.NumPartitions = n - return a -} - -func (a *SignificantTextAggregation) IncludeExclude(includeExclude *TermsAggregationIncludeExclude) *SignificantTextAggregation { - a.includeExclude = includeExclude - return a -} - -func (a *SignificantTextAggregation) Source() (interface{}, error) { - // Example: - // { - // "query" : { - // "match" : {"content" : "Bird flu"} - // }, - // "aggregations" : { - // "my_sample" : { - // "sampler": { - // "shard_size" : 100 - // }, - // "aggregations": { - // "keywords" : { - // "significant_text" : { "field" : "content" } - // } - // } - // } - // } - // } - // - // This method returns only the - // { "significant_text" : { "field" : "content" } - // part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["significant_text"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.bucketCountThresholds != nil { - if a.bucketCountThresholds.RequiredSize != nil { - opts["size"] = (*a.bucketCountThresholds).RequiredSize - } - if a.bucketCountThresholds.ShardSize != nil { - opts["shard_size"] = (*a.bucketCountThresholds).ShardSize - } - if a.bucketCountThresholds.MinDocCount != nil { - opts["min_doc_count"] = (*a.bucketCountThresholds).MinDocCount - } - if a.bucketCountThresholds.ShardMinDocCount != nil { - opts["shard_min_doc_count"] = (*a.bucketCountThresholds).ShardMinDocCount - } - } - if a.filter != nil { - src, err := a.filter.Source() - if err != nil { - return nil, err - } - opts["background_filter"] = src - } - if a.significanceHeuristic != nil { - name := a.significanceHeuristic.Name() - src, err := a.significanceHeuristic.Source() - if err != nil { - return nil, err - } - opts[name] = src - } - // Include/Exclude - if ie := a.includeExclude; ie != nil { - // Include - if ie.Include != "" { - opts["include"] = ie.Include - } else if len(ie.IncludeValues) > 0 { - opts["include"] = ie.IncludeValues - } else if ie.NumPartitions > 0 { - inc := make(map[string]interface{}) - inc["partition"] = ie.Partition - inc["num_partitions"] = ie.NumPartitions - opts["include"] = inc - } - // Exclude - if ie.Exclude != "" { - opts["exclude"] = ie.Exclude - } else if len(ie.ExcludeValues) > 0 { - opts["exclude"] = ie.ExcludeValues - } - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_terms.go b/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_terms.go deleted file mode 100644 index 8f047be..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_bucket_terms.go +++ /dev/null @@ -1,422 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "fmt" - -// TermsAggregation is a multi-bucket value source based aggregation -// where buckets are dynamically built - one per unique value. -// -// See: http://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-bucket-terms-aggregation.html -type TermsAggregation struct { - field string - script *Script - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - - size *int - shardSize *int - requiredSize *int - minDocCount *int - shardMinDocCount *int - valueType string - includeExclude *TermsAggregationIncludeExclude - executionHint string - collectionMode string - showTermDocCountError *bool - order []TermsOrder -} - -func NewTermsAggregation() *TermsAggregation { - return &TermsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *TermsAggregation) Field(field string) *TermsAggregation { - a.field = field - return a -} - -func (a *TermsAggregation) Script(script *Script) *TermsAggregation { - a.script = script - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *TermsAggregation) Missing(missing interface{}) *TermsAggregation { - a.missing = missing - return a -} - -func (a *TermsAggregation) SubAggregation(name string, subAggregation Aggregation) *TermsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *TermsAggregation) Meta(metaData map[string]interface{}) *TermsAggregation { - a.meta = metaData - return a -} - -func (a *TermsAggregation) Size(size int) *TermsAggregation { - a.size = &size - return a -} - -func (a *TermsAggregation) RequiredSize(requiredSize int) *TermsAggregation { - a.requiredSize = &requiredSize - return a -} - -func (a *TermsAggregation) ShardSize(shardSize int) *TermsAggregation { - a.shardSize = &shardSize - return a -} - -func (a *TermsAggregation) MinDocCount(minDocCount int) *TermsAggregation { - a.minDocCount = &minDocCount - return a -} - -func (a *TermsAggregation) ShardMinDocCount(shardMinDocCount int) *TermsAggregation { - a.shardMinDocCount = &shardMinDocCount - return a -} - -func (a *TermsAggregation) Include(regexp string) *TermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Include = regexp - return a -} - -func (a *TermsAggregation) IncludeValues(values ...interface{}) *TermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.IncludeValues = append(a.includeExclude.IncludeValues, values...) - return a -} - -func (a *TermsAggregation) Exclude(regexp string) *TermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Exclude = regexp - return a -} - -func (a *TermsAggregation) ExcludeValues(values ...interface{}) *TermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.ExcludeValues = append(a.includeExclude.ExcludeValues, values...) - return a -} - -func (a *TermsAggregation) Partition(p int) *TermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.Partition = p - return a -} - -func (a *TermsAggregation) NumPartitions(n int) *TermsAggregation { - if a.includeExclude == nil { - a.includeExclude = &TermsAggregationIncludeExclude{} - } - a.includeExclude.NumPartitions = n - return a -} - -func (a *TermsAggregation) IncludeExclude(includeExclude *TermsAggregationIncludeExclude) *TermsAggregation { - a.includeExclude = includeExclude - return a -} - -// ValueType can be string, long, or double. -func (a *TermsAggregation) ValueType(valueType string) *TermsAggregation { - a.valueType = valueType - return a -} - -func (a *TermsAggregation) Order(order string, asc bool) *TermsAggregation { - a.order = append(a.order, TermsOrder{Field: order, Ascending: asc}) - return a -} - -func (a *TermsAggregation) OrderByCount(asc bool) *TermsAggregation { - // "order" : { "_count" : "asc" } - a.order = append(a.order, TermsOrder{Field: "_count", Ascending: asc}) - return a -} - -func (a *TermsAggregation) OrderByCountAsc() *TermsAggregation { - return a.OrderByCount(true) -} - -func (a *TermsAggregation) OrderByCountDesc() *TermsAggregation { - return a.OrderByCount(false) -} - -// Deprecated: Use OrderByKey instead. -func (a *TermsAggregation) OrderByTerm(asc bool) *TermsAggregation { - // "order" : { "_term" : "asc" } - a.order = append(a.order, TermsOrder{Field: "_term", Ascending: asc}) - return a -} - -// Deprecated: Use OrderByKeyAsc instead. -func (a *TermsAggregation) OrderByTermAsc() *TermsAggregation { - return a.OrderByTerm(true) -} - -// Deprecated: Use OrderByKeyDesc instead. -func (a *TermsAggregation) OrderByTermDesc() *TermsAggregation { - return a.OrderByTerm(false) -} - -func (a *TermsAggregation) OrderByKey(asc bool) *TermsAggregation { - // "order" : { "_term" : "asc" } - a.order = append(a.order, TermsOrder{Field: "_key", Ascending: asc}) - return a -} - -func (a *TermsAggregation) OrderByKeyAsc() *TermsAggregation { - return a.OrderByKey(true) -} - -func (a *TermsAggregation) OrderByKeyDesc() *TermsAggregation { - return a.OrderByKey(false) -} - -// OrderByAggregation creates a bucket ordering strategy which sorts buckets -// based on a single-valued calc get. -func (a *TermsAggregation) OrderByAggregation(aggName string, asc bool) *TermsAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "avg_height" : "desc" } - // }, - // "aggs" : { - // "avg_height" : { "avg" : { "field" : "height" } } - // } - // } - // } - // } - a.order = append(a.order, TermsOrder{Field: aggName, Ascending: asc}) - return a -} - -// OrderByAggregationAndMetric creates a bucket ordering strategy which -// sorts buckets based on a multi-valued calc get. -func (a *TermsAggregation) OrderByAggregationAndMetric(aggName, metric string, asc bool) *TermsAggregation { - // { - // "aggs" : { - // "genders" : { - // "terms" : { - // "field" : "gender", - // "order" : { "height_stats.avg" : "desc" } - // }, - // "aggs" : { - // "height_stats" : { "stats" : { "field" : "height" } } - // } - // } - // } - // } - a.order = append(a.order, TermsOrder{Field: aggName + "." + metric, Ascending: asc}) - return a -} - -func (a *TermsAggregation) ExecutionHint(hint string) *TermsAggregation { - a.executionHint = hint - return a -} - -// Collection mode can be depth_first or breadth_first as of 1.4.0. -func (a *TermsAggregation) CollectionMode(collectionMode string) *TermsAggregation { - a.collectionMode = collectionMode - return a -} - -func (a *TermsAggregation) ShowTermDocCountError(showTermDocCountError bool) *TermsAggregation { - a.showTermDocCountError = &showTermDocCountError - return a -} - -func (a *TermsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "genders" : { - // "terms" : { "field" : "gender" } - // } - // } - // } - // This method returns only the { "terms" : { "field" : "gender" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["terms"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - - // TermsBuilder - if a.size != nil && *a.size >= 0 { - opts["size"] = *a.size - } - if a.shardSize != nil && *a.shardSize >= 0 { - opts["shard_size"] = *a.shardSize - } - if a.requiredSize != nil && *a.requiredSize >= 0 { - opts["required_size"] = *a.requiredSize - } - if a.minDocCount != nil && *a.minDocCount >= 0 { - opts["min_doc_count"] = *a.minDocCount - } - if a.shardMinDocCount != nil && *a.shardMinDocCount >= 0 { - opts["shard_min_doc_count"] = *a.shardMinDocCount - } - if a.showTermDocCountError != nil { - opts["show_term_doc_count_error"] = *a.showTermDocCountError - } - if a.collectionMode != "" { - opts["collect_mode"] = a.collectionMode - } - if a.valueType != "" { - opts["value_type"] = a.valueType - } - if len(a.order) > 0 { - var orderSlice []interface{} - for _, order := range a.order { - src, err := order.Source() - if err != nil { - return nil, err - } - orderSlice = append(orderSlice, src) - } - opts["order"] = orderSlice - } - - // Include/Exclude - if ie := a.includeExclude; ie != nil { - if err := ie.MergeInto(opts); err != nil { - return nil, err - } - } - - if a.executionHint != "" { - opts["execution_hint"] = a.executionHint - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} - -// TermsAggregationIncludeExclude allows for include/exclude in a TermsAggregation. -type TermsAggregationIncludeExclude struct { - Include string - Exclude string - IncludeValues []interface{} - ExcludeValues []interface{} - Partition int - NumPartitions int -} - -// Source returns a JSON serializable struct. -func (ie *TermsAggregationIncludeExclude) Source() (interface{}, error) { - source := make(map[string]interface{}) - - // Include - if ie.Include != "" { - source["include"] = ie.Include - } else if len(ie.IncludeValues) > 0 { - source["include"] = ie.IncludeValues - } else if ie.NumPartitions > 0 { - inc := make(map[string]interface{}) - inc["partition"] = ie.Partition - inc["num_partitions"] = ie.NumPartitions - source["include"] = inc - } - - // Exclude - if ie.Exclude != "" { - source["exclude"] = ie.Exclude - } else if len(ie.ExcludeValues) > 0 { - source["exclude"] = ie.ExcludeValues - } - - return source, nil -} - -// MergeInto merges the values of the include/exclude options into source. -func (ie *TermsAggregationIncludeExclude) MergeInto(source map[string]interface{}) error { - values, err := ie.Source() - if err != nil { - return err - } - mv, ok := values.(map[string]interface{}) - if !ok { - return fmt.Errorf("IncludeExclude: expected a map[string]interface{}, got %T", values) - } - for k, v := range mv { - source[k] = v - } - return nil -} - -// TermsOrder specifies a single order field for a terms aggregation. -type TermsOrder struct { - Field string - Ascending bool -} - -// Source returns serializable JSON of the TermsOrder. -func (order *TermsOrder) Source() (interface{}, error) { - source := make(map[string]string) - if order.Ascending { - source[order.Field] = "asc" - } else { - source[order.Field] = "desc" - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_matrix_stats.go b/vendor/github.com/olivere/elastic/v7/search_aggs_matrix_stats.go deleted file mode 100644 index f0eb457..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_matrix_stats.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MatrixMatrixStatsAggregation is a multi-value metrics aggregation -// that computes stats over numeric values extracted from the -// aggregated documents. These values can be extracted either from -// specific numeric fields in the documents, or be generated by a provided script. -// -// The stats that are returned consist of: min, max, sum, count and avg. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-stats-aggregation.html -// for details. -type MatrixStatsAggregation struct { - fields []string - missing interface{} - format string - valueType interface{} - mode string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -// NewMatrixStatsAggregation initializes a new MatrixStatsAggregation. -func NewMatrixStatsAggregation() *MatrixStatsAggregation { - return &MatrixStatsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *MatrixStatsAggregation) Fields(fields ...string) *MatrixStatsAggregation { - a.fields = append(a.fields, fields...) - return a -} - -// Missing configures the value to use when documents miss a value. -func (a *MatrixStatsAggregation) Missing(missing interface{}) *MatrixStatsAggregation { - a.missing = missing - return a -} - -// Mode specifies how to operate. Valid values are: sum, avg, median, min, or max. -func (a *MatrixStatsAggregation) Mode(mode string) *MatrixStatsAggregation { - a.mode = mode - return a -} - -func (a *MatrixStatsAggregation) Format(format string) *MatrixStatsAggregation { - a.format = format - return a -} - -func (a *MatrixStatsAggregation) ValueType(valueType interface{}) *MatrixStatsAggregation { - a.valueType = valueType - return a -} - -func (a *MatrixStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *MatrixStatsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MatrixStatsAggregation) Meta(metaData map[string]interface{}) *MatrixStatsAggregation { - a.meta = metaData - return a -} - -// Source returns the JSON to serialize into the request, or an error. -func (a *MatrixStatsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "matrixstats" : { - // "matrix_stats" : { - // "fields" : ["poverty", "income"], - // "missing": {"income": 50000}, - // "mode": "avg", - // ... - // } - // } - // } - // } - // This method returns only the { "matrix_stats" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["matrix_stats"] = opts - - // MatrixStatsAggregationBuilder - opts["fields"] = a.fields - if a.missing != nil { - opts["missing"] = a.missing - } - if a.format != "" { - opts["format"] = a.format - } - if a.valueType != nil { - opts["value_type"] = a.valueType - } - if a.mode != "" { - opts["mode"] = a.mode - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_avg.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_avg.go deleted file mode 100644 index 7f58caf..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_avg.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// AvgAggregation is a single-value metrics aggregation that computes -// the average of numeric values that are extracted from the -// aggregated documents. These values can be extracted either from -// specific numeric fields in the documents, or be generated by -// a provided script. -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-avg-aggregation.html -type AvgAggregation struct { - field string - script *Script - format string - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewAvgAggregation() *AvgAggregation { - return &AvgAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *AvgAggregation) Field(field string) *AvgAggregation { - a.field = field - return a -} - -func (a *AvgAggregation) Script(script *Script) *AvgAggregation { - a.script = script - return a -} - -func (a *AvgAggregation) Format(format string) *AvgAggregation { - a.format = format - return a -} - -func (a *AvgAggregation) Missing(missing interface{}) *AvgAggregation { - a.missing = missing - return a -} - -func (a *AvgAggregation) SubAggregation(name string, subAggregation Aggregation) *AvgAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *AvgAggregation) Meta(metaData map[string]interface{}) *AvgAggregation { - a.meta = metaData - return a -} - -func (a *AvgAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "avg_grade" : { "avg" : { "field" : "grade" } } - // } - // } - // This method returns only the { "avg" : { "field" : "grade" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["avg"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - - if a.format != "" { - opts["format"] = a.format - } - - if a.missing != nil { - opts["missing"] = a.missing - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_cardinality.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_cardinality.go deleted file mode 100644 index f0599a6..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_cardinality.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CardinalityAggregation is a single-value metrics aggregation that -// calculates an approximate count of distinct values. -// Values can be extracted either from specific fields in the document -// or generated by a script. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-cardinality-aggregation.html -type CardinalityAggregation struct { - field string - script *Script - format string - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - precisionThreshold *int64 - rehash *bool -} - -func NewCardinalityAggregation() *CardinalityAggregation { - return &CardinalityAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *CardinalityAggregation) Field(field string) *CardinalityAggregation { - a.field = field - return a -} - -func (a *CardinalityAggregation) Script(script *Script) *CardinalityAggregation { - a.script = script - return a -} - -func (a *CardinalityAggregation) Format(format string) *CardinalityAggregation { - a.format = format - return a -} - -func (a *CardinalityAggregation) Missing(missing interface{}) *CardinalityAggregation { - a.missing = missing - return a -} -func (a *CardinalityAggregation) SubAggregation(name string, subAggregation Aggregation) *CardinalityAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *CardinalityAggregation) Meta(metaData map[string]interface{}) *CardinalityAggregation { - a.meta = metaData - return a -} - -func (a *CardinalityAggregation) PrecisionThreshold(threshold int64) *CardinalityAggregation { - a.precisionThreshold = &threshold - return a -} - -func (a *CardinalityAggregation) Rehash(rehash bool) *CardinalityAggregation { - a.rehash = &rehash - return a -} - -func (a *CardinalityAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "author_count" : { - // "cardinality" : { "field" : "author" } - // } - // } - // } - // This method returns only the "cardinality" : { "field" : "author" } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["cardinality"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.missing != nil { - opts["missing"] = a.missing - } - if a.format != "" { - opts["format"] = a.format - } - if a.precisionThreshold != nil { - opts["precision_threshold"] = *a.precisionThreshold - } - if a.rehash != nil { - opts["rehash"] = *a.rehash - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_extended_stats.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_extended_stats.go deleted file mode 100644 index 3008a8d..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_extended_stats.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ExtendedExtendedStatsAggregation is a multi-value metrics aggregation that -// computes stats over numeric values extracted from the aggregated documents. -// These values can be extracted either from specific numeric fields -// in the documents, or be generated by a provided script. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-extendedstats-aggregation.html -type ExtendedStatsAggregation struct { - field string - script *Script - format string - missing interface{} - sigma *float64 - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewExtendedStatsAggregation() *ExtendedStatsAggregation { - return &ExtendedStatsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *ExtendedStatsAggregation) Field(field string) *ExtendedStatsAggregation { - a.field = field - return a -} - -func (a *ExtendedStatsAggregation) Script(script *Script) *ExtendedStatsAggregation { - a.script = script - return a -} - -func (a *ExtendedStatsAggregation) Format(format string) *ExtendedStatsAggregation { - a.format = format - return a -} - -func (a *ExtendedStatsAggregation) Missing(missing interface{}) *ExtendedStatsAggregation { - a.missing = missing - return a -} - -func (a *ExtendedStatsAggregation) Sigma(sigma float64) *ExtendedStatsAggregation { - a.sigma = &sigma - return a -} - -func (a *ExtendedStatsAggregation) SubAggregation(name string, subAggregation Aggregation) *ExtendedStatsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *ExtendedStatsAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsAggregation { - a.meta = metaData - return a -} - -func (a *ExtendedStatsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "grades_stats" : { "extended_stats" : { "field" : "grade" } } - // } - // } - // This method returns only the { "extended_stats" : { "field" : "grade" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["extended_stats"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if v := a.missing; v != nil { - opts["missing"] = v - } - if v := a.sigma; v != nil { - opts["sigma"] = *v - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_geo_bounds.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_geo_bounds.go deleted file mode 100644 index 007e302..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_geo_bounds.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoBoundsAggregation is a metric aggregation that computes the -// bounding box containing all geo_point values for a field. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-geobounds-aggregation.html -type GeoBoundsAggregation struct { - field string - script *Script - wrapLongitude *bool - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewGeoBoundsAggregation() *GeoBoundsAggregation { - return &GeoBoundsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *GeoBoundsAggregation) Field(field string) *GeoBoundsAggregation { - a.field = field - return a -} - -func (a *GeoBoundsAggregation) Script(script *Script) *GeoBoundsAggregation { - a.script = script - return a -} - -func (a *GeoBoundsAggregation) WrapLongitude(wrapLongitude bool) *GeoBoundsAggregation { - a.wrapLongitude = &wrapLongitude - return a -} - -func (a *GeoBoundsAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoBoundsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *GeoBoundsAggregation) Meta(metaData map[string]interface{}) *GeoBoundsAggregation { - a.meta = metaData - return a -} - -func (a *GeoBoundsAggregation) Source() (interface{}, error) { - // Example: - // { - // "query" : { - // "match" : { "business_type" : "shop" } - // }, - // "aggs" : { - // "viewport" : { - // "geo_bounds" : { - // "field" : "location" - // "wrap_longitude" : "true" - // } - // } - // } - // } - // - // This method returns only the { "geo_bounds" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["geo_bounds"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.wrapLongitude != nil { - opts["wrap_longitude"] = *a.wrapLongitude - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_geo_centroid.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_geo_centroid.go deleted file mode 100644 index 55b3f48..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_geo_centroid.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoCentroidAggregation is a metric aggregation that computes the weighted centroid -// from all coordinate values for a Geo-point datatype field. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-geocentroid-aggregation.html -type GeoCentroidAggregation struct { - field string - script *Script - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewGeoCentroidAggregation() *GeoCentroidAggregation { - return &GeoCentroidAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *GeoCentroidAggregation) Field(field string) *GeoCentroidAggregation { - a.field = field - return a -} - -func (a *GeoCentroidAggregation) Script(script *Script) *GeoCentroidAggregation { - a.script = script - return a -} - -func (a *GeoCentroidAggregation) SubAggregation(name string, subAggregation Aggregation) *GeoCentroidAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *GeoCentroidAggregation) Meta(metaData map[string]interface{}) *GeoCentroidAggregation { - a.meta = metaData - return a -} - -func (a *GeoCentroidAggregation) Source() (interface{}, error) { - // Example: - // { - // "query" : { - // "match" : { "business_type" : "shop" } - // }, - // "aggs" : { - // "centroid" : { - // "geo_centroid" : { - // "field" : "location" - // } - // } - // } - // } - // - // This method returns only the { "geo_centroid" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["geo_centroid"] = opts - - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_max.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_max.go deleted file mode 100644 index 9128a25..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_max.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MaxAggregation is a single-value metrics aggregation that keeps track and -// returns the maximum value among the numeric values extracted from -// the aggregated documents. These values can be extracted either from -// specific numeric fields in the documents, or be generated by -// a provided script. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-max-aggregation.html -type MaxAggregation struct { - field string - script *Script - format string - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewMaxAggregation() *MaxAggregation { - return &MaxAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *MaxAggregation) Field(field string) *MaxAggregation { - a.field = field - return a -} - -func (a *MaxAggregation) Script(script *Script) *MaxAggregation { - a.script = script - return a -} - -func (a *MaxAggregation) Format(format string) *MaxAggregation { - a.format = format - return a -} - -func (a *MaxAggregation) Missing(missing interface{}) *MaxAggregation { - a.missing = missing - return a -} - -func (a *MaxAggregation) SubAggregation(name string, subAggregation Aggregation) *MaxAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MaxAggregation) Meta(metaData map[string]interface{}) *MaxAggregation { - a.meta = metaData - return a -} -func (a *MaxAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "max_price" : { "max" : { "field" : "price" } } - // } - // } - // This method returns only the { "max" : { "field" : "price" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["max"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if a.missing != nil { - opts["missing"] = a.missing - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_min.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_min.go deleted file mode 100644 index b6e40f9..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_min.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MinAggregation is a single-value metrics aggregation that keeps track and -// returns the minimum value among numeric values extracted from the -// aggregated documents. These values can be extracted either from -// specific numeric fields in the documents, or be generated by a -// provided script. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-min-aggregation.html -type MinAggregation struct { - field string - script *Script - format string - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewMinAggregation() *MinAggregation { - return &MinAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *MinAggregation) Field(field string) *MinAggregation { - a.field = field - return a -} - -func (a *MinAggregation) Script(script *Script) *MinAggregation { - a.script = script - return a -} - -func (a *MinAggregation) Format(format string) *MinAggregation { - a.format = format - return a -} - -func (a *MinAggregation) Missing(missing interface{}) *MinAggregation { - a.missing = missing - return a -} - -func (a *MinAggregation) SubAggregation(name string, subAggregation Aggregation) *MinAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MinAggregation) Meta(metaData map[string]interface{}) *MinAggregation { - a.meta = metaData - return a -} - -func (a *MinAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "min_price" : { "min" : { "field" : "price" } } - // } - // } - // This method returns only the { "min" : { "field" : "price" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["min"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if a.missing != nil { - opts["missing"] = a.missing - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_percentile_ranks.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_percentile_ranks.go deleted file mode 100644 index a62ff4c..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_percentile_ranks.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// PercentileRanksAggregation -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-percentile-rank-aggregation.html -type PercentileRanksAggregation struct { - field string - script *Script - format string - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - values []float64 - compression *float64 - estimator string -} - -func NewPercentileRanksAggregation() *PercentileRanksAggregation { - return &PercentileRanksAggregation{ - subAggregations: make(map[string]Aggregation), - values: make([]float64, 0), - } -} - -func (a *PercentileRanksAggregation) Field(field string) *PercentileRanksAggregation { - a.field = field - return a -} - -func (a *PercentileRanksAggregation) Script(script *Script) *PercentileRanksAggregation { - a.script = script - return a -} - -func (a *PercentileRanksAggregation) Format(format string) *PercentileRanksAggregation { - a.format = format - return a -} - -func (a *PercentileRanksAggregation) Missing(missing interface{}) *PercentileRanksAggregation { - a.missing = missing - return a -} - -func (a *PercentileRanksAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentileRanksAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *PercentileRanksAggregation) Meta(metaData map[string]interface{}) *PercentileRanksAggregation { - a.meta = metaData - return a -} - -func (a *PercentileRanksAggregation) Values(values ...float64) *PercentileRanksAggregation { - a.values = append(a.values, values...) - return a -} - -func (a *PercentileRanksAggregation) Compression(compression float64) *PercentileRanksAggregation { - a.compression = &compression - return a -} - -func (a *PercentileRanksAggregation) Estimator(estimator string) *PercentileRanksAggregation { - a.estimator = estimator - return a -} - -func (a *PercentileRanksAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "load_time_outlier" : { - // "percentile_ranks" : { - // "field" : "load_time" - // "values" : [15, 30] - // } - // } - // } - // } - // This method returns only the - // { "percentile_ranks" : { "field" : "load_time", "values" : [15, 30] } } - // part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["percentile_ranks"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if a.missing != nil { - opts["missing"] = a.missing - } - if len(a.values) > 0 { - opts["values"] = a.values - } - if a.compression != nil { - opts["compression"] = *a.compression - } - if a.estimator != "" { - opts["estimator"] = a.estimator - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_percentiles.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_percentiles.go deleted file mode 100644 index 1eb62de..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_percentiles.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// PercentilesAggregation is a multi-value metrics aggregation -// that calculates one or more percentiles over numeric values -// extracted from the aggregated documents. These values can -// be extracted either from specific numeric fields in the documents, -// or be generated by a provided script. -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-percentile-aggregation.html -type PercentilesAggregation struct { - field string - script *Script - format string - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} - percentiles []float64 - method string - compression *float64 - numberOfSignificantValueDigits *int - estimator string -} - -func NewPercentilesAggregation() *PercentilesAggregation { - return &PercentilesAggregation{ - subAggregations: make(map[string]Aggregation), - percentiles: make([]float64, 0), - method: "tdigest", - } -} - -func (a *PercentilesAggregation) Field(field string) *PercentilesAggregation { - a.field = field - return a -} - -func (a *PercentilesAggregation) Script(script *Script) *PercentilesAggregation { - a.script = script - return a -} - -func (a *PercentilesAggregation) Format(format string) *PercentilesAggregation { - a.format = format - return a -} - -func (a *PercentilesAggregation) Missing(missing interface{}) *PercentilesAggregation { - a.missing = missing - return a -} - -func (a *PercentilesAggregation) SubAggregation(name string, subAggregation Aggregation) *PercentilesAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *PercentilesAggregation) Meta(metaData map[string]interface{}) *PercentilesAggregation { - a.meta = metaData - return a -} - -func (a *PercentilesAggregation) Percentiles(percentiles ...float64) *PercentilesAggregation { - a.percentiles = append(a.percentiles, percentiles...) - return a -} - -// Method is the percentiles method, which can be "tdigest" (default) or "hdr". -func (a *PercentilesAggregation) Method(method string) *PercentilesAggregation { - a.method = method - return a -} - -func (a *PercentilesAggregation) Compression(compression float64) *PercentilesAggregation { - a.compression = &compression - return a -} - -func (a *PercentilesAggregation) NumberOfSignificantValueDigits(digits int) *PercentilesAggregation { - a.numberOfSignificantValueDigits = &digits - return a -} - -func (a *PercentilesAggregation) Estimator(estimator string) *PercentilesAggregation { - a.estimator = estimator - return a -} - -func (a *PercentilesAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "load_time_outlier" : { - // "percentiles" : { - // "field" : "load_time" - // } - // } - // } - // } - // This method returns only the - // { "percentiles" : { "field" : "load_time" } } - // part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["percentiles"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if a.missing != nil { - opts["missing"] = a.missing - } - if len(a.percentiles) > 0 { - opts["percents"] = a.percentiles - } - switch a.method { - case "tdigest": - if c := a.compression; c != nil { - opts[a.method] = map[string]interface{}{ - "compression": *c, - } - } - case "hdr": - if n := a.numberOfSignificantValueDigits; n != nil { - opts[a.method] = map[string]interface{}{ - "number_of_significant_value_digits": *n, - } - } - } - if a.estimator != "" { - opts["estimator"] = a.estimator - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_scripted_metric.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_scripted_metric.go deleted file mode 100644 index 0b26c9d..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_scripted_metric.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. -package elastic - -// ScriptedMetricAggregation is a a metric aggregation that executes using scripts to provide a metric output. -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-scripted-metric-aggregation.html -type ScriptedMetricAggregation struct { - initScript *Script - mapScript *Script - combineScript *Script - reduceScript *Script - - params map[string]interface{} - meta map[string]interface{} -} - -func NewScriptedMetricAggregation() *ScriptedMetricAggregation { - a := &ScriptedMetricAggregation{} - return a -} - -func (a *ScriptedMetricAggregation) InitScript(script *Script) *ScriptedMetricAggregation { - a.initScript = script - return a -} - -func (a *ScriptedMetricAggregation) MapScript(script *Script) *ScriptedMetricAggregation { - a.mapScript = script - return a -} - -func (a *ScriptedMetricAggregation) CombineScript(script *Script) *ScriptedMetricAggregation { - a.combineScript = script - return a -} - -func (a *ScriptedMetricAggregation) ReduceScript(script *Script) *ScriptedMetricAggregation { - a.reduceScript = script - return a -} - -func (a *ScriptedMetricAggregation) Params(params map[string]interface{}) *ScriptedMetricAggregation { - a.params = params - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *ScriptedMetricAggregation) Meta(metaData map[string]interface{}) *ScriptedMetricAggregation { - a.meta = metaData - return a -} - -func (a *ScriptedMetricAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "magic_script" : { "scripted_metric" : { - // "init_script" : "state.transactions = []", - // "map_script" : "state.transactions.add(doc.type.value == 'sale' ? doc.amount.value : -1 * doc.amount.value)", - // "combine_script" : "double profit = 0; for (t in state.transactions) { profit += t } return profit", - // "reduce_script" : "double profit = 0; for (a in states) { profit += a } return profit" - // } } - // } - // } - // This method returns only the { "scripted_metric" : { ... } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["scripted_metric"] = opts - - if a.initScript != nil { - src, err := a.initScript.Source() - if err != nil { - return nil, err - } - opts["init_script"] = src - } - if a.mapScript != nil { - src, err := a.mapScript.Source() - if err != nil { - return nil, err - } - opts["map_script"] = src - } - if a.combineScript != nil { - src, err := a.combineScript.Source() - if err != nil { - return nil, err - } - opts["combine_script"] = src - } - if a.reduceScript != nil { - src, err := a.reduceScript.Source() - if err != nil { - return nil, err - } - opts["reduce_script"] = src - } - - if a.params != nil && len(a.params) > 0 { - opts["params"] = a.params - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_stats.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_stats.go deleted file mode 100644 index 6c00f68..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_stats.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// StatsAggregation is a multi-value metrics aggregation that computes stats -// over numeric values extracted from the aggregated documents. -// These values can be extracted either from specific numeric fields -// in the documents, or be generated by a provided script. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-stats-aggregation.html -type StatsAggregation struct { - field string - script *Script - format string - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewStatsAggregation() *StatsAggregation { - return &StatsAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *StatsAggregation) Field(field string) *StatsAggregation { - a.field = field - return a -} - -func (a *StatsAggregation) Script(script *Script) *StatsAggregation { - a.script = script - return a -} - -func (a *StatsAggregation) Format(format string) *StatsAggregation { - a.format = format - return a -} - -func (a *StatsAggregation) Missing(missing interface{}) *StatsAggregation { - a.missing = missing - return a -} - -func (a *StatsAggregation) SubAggregation(name string, subAggregation Aggregation) *StatsAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *StatsAggregation) Meta(metaData map[string]interface{}) *StatsAggregation { - a.meta = metaData - return a -} - -func (a *StatsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "grades_stats" : { "stats" : { "field" : "grade" } } - // } - // } - // This method returns only the { "stats" : { "field" : "grade" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["stats"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if a.missing != nil { - opts["missing"] = a.missing - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_sum.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_sum.go deleted file mode 100644 index 6de8f1d..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_sum.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SumAggregation is a single-value metrics aggregation that sums up -// numeric values that are extracted from the aggregated documents. -// These values can be extracted either from specific numeric fields -// in the documents, or be generated by a provided script. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-sum-aggregation.html -type SumAggregation struct { - field string - script *Script - format string - missing interface{} - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewSumAggregation() *SumAggregation { - return &SumAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *SumAggregation) Field(field string) *SumAggregation { - a.field = field - return a -} - -func (a *SumAggregation) Script(script *Script) *SumAggregation { - a.script = script - return a -} - -func (a *SumAggregation) Format(format string) *SumAggregation { - a.format = format - return a -} - -func (a *SumAggregation) Missing(missing interface{}) *SumAggregation { - a.missing = missing - return a -} - -func (a *SumAggregation) SubAggregation(name string, subAggregation Aggregation) *SumAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SumAggregation) Meta(metaData map[string]interface{}) *SumAggregation { - a.meta = metaData - return a -} - -func (a *SumAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "intraday_return" : { "sum" : { "field" : "change" } } - // } - // } - // This method returns only the { "sum" : { "field" : "change" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["sum"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - if a.missing != nil { - opts["missing"] = a.missing - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_top_hits.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_top_hits.go deleted file mode 100644 index 7321667..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_top_hits.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TopHitsAggregation keeps track of the most relevant document -// being aggregated. This aggregator is intended to be used as a -// sub aggregator, so that the top matching documents -// can be aggregated per bucket. -// -// It can effectively be used to group result sets by certain fields via -// a bucket aggregator. One or more bucket aggregators determines by -// which properties a result set get sliced into. -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-top-hits-aggregation.html -type TopHitsAggregation struct { - searchSource *SearchSource -} - -func NewTopHitsAggregation() *TopHitsAggregation { - return &TopHitsAggregation{ - searchSource: NewSearchSource(), - } -} - -func (a *TopHitsAggregation) SearchSource(searchSource *SearchSource) *TopHitsAggregation { - a.searchSource = searchSource - if a.searchSource == nil { - a.searchSource = NewSearchSource() - } - return a -} - -func (a *TopHitsAggregation) From(from int) *TopHitsAggregation { - a.searchSource = a.searchSource.From(from) - return a -} - -func (a *TopHitsAggregation) Size(size int) *TopHitsAggregation { - a.searchSource = a.searchSource.Size(size) - return a -} - -func (a *TopHitsAggregation) TrackScores(trackScores bool) *TopHitsAggregation { - a.searchSource = a.searchSource.TrackScores(trackScores) - return a -} - -func (a *TopHitsAggregation) Explain(explain bool) *TopHitsAggregation { - a.searchSource = a.searchSource.Explain(explain) - return a -} - -func (a *TopHitsAggregation) Version(version bool) *TopHitsAggregation { - a.searchSource = a.searchSource.Version(version) - return a -} - -func (a *TopHitsAggregation) NoStoredFields() *TopHitsAggregation { - a.searchSource = a.searchSource.NoStoredFields() - return a -} - -func (a *TopHitsAggregation) FetchSource(fetchSource bool) *TopHitsAggregation { - a.searchSource = a.searchSource.FetchSource(fetchSource) - return a -} - -func (a *TopHitsAggregation) FetchSourceContext(fetchSourceContext *FetchSourceContext) *TopHitsAggregation { - a.searchSource = a.searchSource.FetchSourceContext(fetchSourceContext) - return a -} - -func (a *TopHitsAggregation) DocvalueFields(docvalueFields ...string) *TopHitsAggregation { - a.searchSource = a.searchSource.DocvalueFields(docvalueFields...) - return a -} - -func (a *TopHitsAggregation) DocvalueFieldsWithFormat(docvalueFields ...DocvalueField) *TopHitsAggregation { - a.searchSource = a.searchSource.DocvalueFieldsWithFormat(docvalueFields...) - return a -} - -func (a *TopHitsAggregation) DocvalueField(docvalueField string) *TopHitsAggregation { - a.searchSource = a.searchSource.DocvalueField(docvalueField) - return a -} - -func (a *TopHitsAggregation) DocvalueFieldWithFormat(docvalueField DocvalueField) *TopHitsAggregation { - a.searchSource = a.searchSource.DocvalueFieldWithFormat(docvalueField) - return a -} - -func (a *TopHitsAggregation) ScriptFields(scriptFields ...*ScriptField) *TopHitsAggregation { - a.searchSource = a.searchSource.ScriptFields(scriptFields...) - return a -} - -func (a *TopHitsAggregation) ScriptField(scriptField *ScriptField) *TopHitsAggregation { - a.searchSource = a.searchSource.ScriptField(scriptField) - return a -} - -func (a *TopHitsAggregation) Sort(field string, ascending bool) *TopHitsAggregation { - a.searchSource = a.searchSource.Sort(field, ascending) - return a -} - -func (a *TopHitsAggregation) SortWithInfo(info SortInfo) *TopHitsAggregation { - a.searchSource = a.searchSource.SortWithInfo(info) - return a -} - -func (a *TopHitsAggregation) SortBy(sorter ...Sorter) *TopHitsAggregation { - a.searchSource = a.searchSource.SortBy(sorter...) - return a -} - -func (a *TopHitsAggregation) Highlight(highlight *Highlight) *TopHitsAggregation { - a.searchSource = a.searchSource.Highlight(highlight) - return a -} - -func (a *TopHitsAggregation) Highlighter() *Highlight { - return a.searchSource.Highlighter() -} - -func (a *TopHitsAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs": { - // "top_tag_hits": { - // "top_hits": { - // "sort": [ - // { - // "last_activity_date": { - // "order": "desc" - // } - // } - // ], - // "_source": { - // "include": [ - // "title" - // ] - // }, - // "size" : 1 - // } - // } - // } - // } - // This method returns only the { "top_hits" : { ... } } part. - - source := make(map[string]interface{}) - src, err := a.searchSource.Source() - if err != nil { - return nil, err - } - source["top_hits"] = src - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_value_count.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_value_count.go deleted file mode 100644 index b948dcc..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_value_count.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ValueCountAggregation is a single-value metrics aggregation that counts -// the number of values that are extracted from the aggregated documents. -// These values can be extracted either from specific fields in the documents, -// or be generated by a provided script. Typically, this aggregator will be -// used in conjunction with other single-value aggregations. -// For example, when computing the avg one might be interested in the -// number of values the average is computed over. -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-valuecount-aggregation.html -type ValueCountAggregation struct { - field string - script *Script - format string - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewValueCountAggregation() *ValueCountAggregation { - return &ValueCountAggregation{ - subAggregations: make(map[string]Aggregation), - } -} - -func (a *ValueCountAggregation) Field(field string) *ValueCountAggregation { - a.field = field - return a -} - -func (a *ValueCountAggregation) Script(script *Script) *ValueCountAggregation { - a.script = script - return a -} - -func (a *ValueCountAggregation) Format(format string) *ValueCountAggregation { - a.format = format - return a -} - -func (a *ValueCountAggregation) SubAggregation(name string, subAggregation Aggregation) *ValueCountAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *ValueCountAggregation) Meta(metaData map[string]interface{}) *ValueCountAggregation { - a.meta = metaData - return a -} - -func (a *ValueCountAggregation) Source() (interface{}, error) { - // Example: - // { - // "aggs" : { - // "grades_count" : { "value_count" : { "field" : "grade" } } - // } - // } - // This method returns only the { "value_count" : { "field" : "grade" } } part. - - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["value_count"] = opts - - // ValuesSourceAggregationBuilder - if a.field != "" { - opts["field"] = a.field - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - opts["script"] = src - } - if a.format != "" { - opts["format"] = a.format - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_weighted_avg.go b/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_weighted_avg.go deleted file mode 100644 index cd7ad7e..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_metrics_weighted_avg.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// WeightedAvgAggregation is a single-value metrics aggregation that -// computes the weighted average of numeric values that are extracted -// from the aggregated documents. These values can be extracted either -// from specific numeric fields in the documents. -// -// See: https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-metrics-weight-avg-aggregation.html -type WeightedAvgAggregation struct { - fields map[string]*MultiValuesSourceFieldConfig - valueType string - format string - value *MultiValuesSourceFieldConfig - weight *MultiValuesSourceFieldConfig - subAggregations map[string]Aggregation - meta map[string]interface{} -} - -func NewWeightedAvgAggregation() *WeightedAvgAggregation { - return &WeightedAvgAggregation{ - fields: make(map[string]*MultiValuesSourceFieldConfig), - subAggregations: make(map[string]Aggregation), - } -} - -func (a *WeightedAvgAggregation) Field(field string, config *MultiValuesSourceFieldConfig) *WeightedAvgAggregation { - a.fields[field] = config - return a -} - -func (a *WeightedAvgAggregation) ValueType(valueType string) *WeightedAvgAggregation { - a.valueType = valueType - return a -} - -func (a *WeightedAvgAggregation) Format(format string) *WeightedAvgAggregation { - a.format = format - return a -} - -func (a *WeightedAvgAggregation) Value(value *MultiValuesSourceFieldConfig) *WeightedAvgAggregation { - a.value = value - return a -} - -func (a *WeightedAvgAggregation) Weight(weight *MultiValuesSourceFieldConfig) *WeightedAvgAggregation { - a.weight = weight - return a -} - -func (a *WeightedAvgAggregation) SubAggregation(name string, subAggregation Aggregation) *WeightedAvgAggregation { - a.subAggregations[name] = subAggregation - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *WeightedAvgAggregation) Meta(metaData map[string]interface{}) *WeightedAvgAggregation { - a.meta = metaData - return a -} - -func (a *WeightedAvgAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - opts := make(map[string]interface{}) - source["weighted_avg"] = opts - - if len(a.fields) > 0 { - f := make(map[string]interface{}) - for name, config := range a.fields { - cfg, err := config.Source() - if err != nil { - return nil, err - } - f[name] = cfg - } - opts["fields"] = f - } - - if v := a.format; v != "" { - opts["format"] = v - } - - if v := a.valueType; v != "" { - opts["value_type"] = v - } - - if v := a.value; v != nil { - cfg, err := v.Source() - if err != nil { - return nil, err - } - opts["value"] = cfg - } - - if v := a.weight; v != nil { - cfg, err := v.Source() - if err != nil { - return nil, err - } - opts["weight"] = cfg - } - - // AggregationBuilder (SubAggregations) - if len(a.subAggregations) > 0 { - aggsMap := make(map[string]interface{}) - source["aggregations"] = aggsMap - for name, aggregate := range a.subAggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} - -// MultiValuesSourceFieldConfig represents a field configuration -// used e.g. in WeightedAvgAggregation. -type MultiValuesSourceFieldConfig struct { - FieldName string - Missing interface{} - Script *Script - TimeZone string -} - -func (f *MultiValuesSourceFieldConfig) Source() (interface{}, error) { - source := make(map[string]interface{}) - if v := f.Missing; v != nil { - source["missing"] = v - } - if v := f.Script; v != nil { - src, err := v.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - if v := f.FieldName; v != "" { - source["field"] = v - } - if v := f.TimeZone; v != "" { - source["time_zone"] = v - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_avg_bucket.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_avg_bucket.go deleted file mode 100644 index 5c130d6..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_avg_bucket.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// AvgBucketAggregation is a sibling pipeline aggregation which calculates -// the (mean) average value of a specified metric in a sibling aggregation. -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-avg-bucket-aggregation.html -type AvgBucketAggregation struct { - format string - gapPolicy string - - meta map[string]interface{} - bucketsPaths []string -} - -// NewAvgBucketAggregation creates and initializes a new AvgBucketAggregation. -func NewAvgBucketAggregation() *AvgBucketAggregation { - return &AvgBucketAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (a *AvgBucketAggregation) Format(format string) *AvgBucketAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *AvgBucketAggregation) GapPolicy(gapPolicy string) *AvgBucketAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *AvgBucketAggregation) GapInsertZeros() *AvgBucketAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *AvgBucketAggregation) GapSkip() *AvgBucketAggregation { - a.gapPolicy = "skip" - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *AvgBucketAggregation) Meta(metaData map[string]interface{}) *AvgBucketAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *AvgBucketAggregation) BucketsPath(bucketsPaths ...string) *AvgBucketAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *AvgBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["avg_bucket"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_script.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_script.go deleted file mode 100644 index bc5229b..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_script.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// BucketScriptAggregation is a parent pipeline aggregation which executes -// a script which can perform per bucket computations on specified metrics -// in the parent multi-bucket aggregation. The specified metric must be -// numeric and the script must return a numeric value. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-bucket-script-aggregation.html -type BucketScriptAggregation struct { - format string - gapPolicy string - script *Script - - meta map[string]interface{} - bucketsPathsMap map[string]string -} - -// NewBucketScriptAggregation creates and initializes a new BucketScriptAggregation. -func NewBucketScriptAggregation() *BucketScriptAggregation { - return &BucketScriptAggregation{ - bucketsPathsMap: make(map[string]string), - } -} - -// Format to use on the output of this aggregation. -func (a *BucketScriptAggregation) Format(format string) *BucketScriptAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *BucketScriptAggregation) GapPolicy(gapPolicy string) *BucketScriptAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *BucketScriptAggregation) GapInsertZeros() *BucketScriptAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *BucketScriptAggregation) GapSkip() *BucketScriptAggregation { - a.gapPolicy = "skip" - return a -} - -// Script is the script to run. -func (a *BucketScriptAggregation) Script(script *Script) *BucketScriptAggregation { - a.script = script - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *BucketScriptAggregation) Meta(metaData map[string]interface{}) *BucketScriptAggregation { - a.meta = metaData - return a -} - -// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. -func (a *BucketScriptAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketScriptAggregation { - a.bucketsPathsMap = bucketsPathsMap - return a -} - -// AddBucketsPath adds a bucket path to use for this pipeline aggregator. -func (a *BucketScriptAggregation) AddBucketsPath(name, path string) *BucketScriptAggregation { - if a.bucketsPathsMap == nil { - a.bucketsPathsMap = make(map[string]string) - } - a.bucketsPathsMap[name] = path - return a -} - -// Source returns the a JSON-serializable interface. -func (a *BucketScriptAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["bucket_script"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - params["script"] = src - } - - // Add buckets paths - if len(a.bucketsPathsMap) > 0 { - params["buckets_path"] = a.bucketsPathsMap - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_selector.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_selector.go deleted file mode 100644 index 1051f1a..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_selector.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// BucketSelectorAggregation is a parent pipeline aggregation which -// determines whether the current bucket will be retained in the parent -// multi-bucket aggregation. The specific metric must be numeric and -// the script must return a boolean value. If the script language is -// expression then a numeric return value is permitted. In this case 0.0 -// will be evaluated as false and all other values will evaluate to true. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-bucket-selector-aggregation.html -type BucketSelectorAggregation struct { - format string - gapPolicy string - script *Script - - meta map[string]interface{} - bucketsPathsMap map[string]string -} - -// NewBucketSelectorAggregation creates and initializes a new BucketSelectorAggregation. -func NewBucketSelectorAggregation() *BucketSelectorAggregation { - return &BucketSelectorAggregation{ - bucketsPathsMap: make(map[string]string), - } -} - -// Format to use on the output of this aggregation. -func (a *BucketSelectorAggregation) Format(format string) *BucketSelectorAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *BucketSelectorAggregation) GapPolicy(gapPolicy string) *BucketSelectorAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *BucketSelectorAggregation) GapInsertZeros() *BucketSelectorAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *BucketSelectorAggregation) GapSkip() *BucketSelectorAggregation { - a.gapPolicy = "skip" - return a -} - -// Script is the script to run. -func (a *BucketSelectorAggregation) Script(script *Script) *BucketSelectorAggregation { - a.script = script - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *BucketSelectorAggregation) Meta(metaData map[string]interface{}) *BucketSelectorAggregation { - a.meta = metaData - return a -} - -// BucketsPathsMap sets the paths to the buckets to use for this pipeline aggregator. -func (a *BucketSelectorAggregation) BucketsPathsMap(bucketsPathsMap map[string]string) *BucketSelectorAggregation { - a.bucketsPathsMap = bucketsPathsMap - return a -} - -// AddBucketsPath adds a bucket path to use for this pipeline aggregator. -func (a *BucketSelectorAggregation) AddBucketsPath(name, path string) *BucketSelectorAggregation { - if a.bucketsPathsMap == nil { - a.bucketsPathsMap = make(map[string]string) - } - a.bucketsPathsMap[name] = path - return a -} - -// Source returns the a JSON-serializable interface. -func (a *BucketSelectorAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["bucket_selector"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - params["script"] = src - } - - // Add buckets paths - if len(a.bucketsPathsMap) > 0 { - params["buckets_path"] = a.bucketsPathsMap - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_sort.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_sort.go deleted file mode 100644 index 3d060df..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_bucket_sort.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// BucketSortAggregation parent pipeline aggregation which sorts the buckets -// of its parent multi-bucket aggregation. Zero or more sort fields may be -// specified together with the corresponding sort order. Each bucket may be -// sorted based on its _key, _count or its sub-aggregations. In addition, -// parameters from and size may be set in order to truncate the result buckets. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-bucket-sort-aggregation.html -type BucketSortAggregation struct { - sorters []Sorter - from int - size int - gapPolicy string - - meta map[string]interface{} -} - -// NewBucketSortAggregation creates and initializes a new BucketSortAggregation. -func NewBucketSortAggregation() *BucketSortAggregation { - return &BucketSortAggregation{ - size: -1, - } -} - -// Sort adds a sort order to the list of sorters. -func (a *BucketSortAggregation) Sort(field string, ascending bool) *BucketSortAggregation { - a.sorters = append(a.sorters, SortInfo{Field: field, Ascending: ascending}) - return a -} - -// SortWithInfo adds a SortInfo to the list of sorters. -func (a *BucketSortAggregation) SortWithInfo(info SortInfo) *BucketSortAggregation { - a.sorters = append(a.sorters, info) - return a -} - -// From adds the "from" parameter to the aggregation. -func (a *BucketSortAggregation) From(from int) *BucketSortAggregation { - a.from = from - return a -} - -// Size adds the "size" parameter to the aggregation. -func (a *BucketSortAggregation) Size(size int) *BucketSortAggregation { - a.size = size - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "skip". -func (a *BucketSortAggregation) GapPolicy(gapPolicy string) *BucketSortAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *BucketSortAggregation) GapInsertZeros() *BucketSortAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *BucketSortAggregation) GapSkip() *BucketSortAggregation { - a.gapPolicy = "skip" - return a -} - -// Meta sets the meta data in the aggregation. -// Although metadata is supported for this aggregation by Elasticsearch, it's important to -// note that there's no use to it because this aggregation does not include new data in the -// response. It merely reorders parent buckets. -func (a *BucketSortAggregation) Meta(meta map[string]interface{}) *BucketSortAggregation { - a.meta = meta - return a -} - -// Source returns the a JSON-serializable interface. -func (a *BucketSortAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["bucket_sort"] = params - - if a.from != 0 { - params["from"] = a.from - } - if a.size != -1 { - params["size"] = a.size - } - - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Parses sorters to JSON-serializable interface. - if len(a.sorters) > 0 { - sorters := make([]interface{}, len(a.sorters)) - params["sort"] = sorters - for idx, sorter := range a.sorters { - src, err := sorter.Source() - if err != nil { - return nil, err - } - sorters[idx] = src - } - } - - // Add metadata if available. - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_cumulative_sum.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_cumulative_sum.go deleted file mode 100644 index 1074d86..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_cumulative_sum.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CumulativeSumAggregation is a parent pipeline aggregation which calculates -// the cumulative sum of a specified metric in a parent histogram (or date_histogram) -// aggregation. The specified metric must be numeric and the enclosing -// histogram must have min_doc_count set to 0 (default for histogram aggregations). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-cumulative-sum-aggregation.html -type CumulativeSumAggregation struct { - format string - - meta map[string]interface{} - bucketsPaths []string -} - -// NewCumulativeSumAggregation creates and initializes a new CumulativeSumAggregation. -func NewCumulativeSumAggregation() *CumulativeSumAggregation { - return &CumulativeSumAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (a *CumulativeSumAggregation) Format(format string) *CumulativeSumAggregation { - a.format = format - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *CumulativeSumAggregation) Meta(metaData map[string]interface{}) *CumulativeSumAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *CumulativeSumAggregation) BucketsPath(bucketsPaths ...string) *CumulativeSumAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *CumulativeSumAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["cumulative_sum"] = params - - if a.format != "" { - params["format"] = a.format - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_derivative.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_derivative.go deleted file mode 100644 index a22219c..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_derivative.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// DerivativeAggregation is a parent pipeline aggregation which calculates -// the derivative of a specified metric in a parent histogram (or date_histogram) -// aggregation. The specified metric must be numeric and the enclosing -// histogram must have min_doc_count set to 0 (default for histogram aggregations). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-derivative-aggregation.html -type DerivativeAggregation struct { - format string - gapPolicy string - unit string - - meta map[string]interface{} - bucketsPaths []string -} - -// NewDerivativeAggregation creates and initializes a new DerivativeAggregation. -func NewDerivativeAggregation() *DerivativeAggregation { - return &DerivativeAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (a *DerivativeAggregation) Format(format string) *DerivativeAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *DerivativeAggregation) GapPolicy(gapPolicy string) *DerivativeAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *DerivativeAggregation) GapInsertZeros() *DerivativeAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *DerivativeAggregation) GapSkip() *DerivativeAggregation { - a.gapPolicy = "skip" - return a -} - -// Unit sets the unit provided, e.g. "1d" or "1y". -// It is only useful when calculating the derivative using a date_histogram. -func (a *DerivativeAggregation) Unit(unit string) *DerivativeAggregation { - a.unit = unit - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *DerivativeAggregation) Meta(metaData map[string]interface{}) *DerivativeAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *DerivativeAggregation) BucketsPath(bucketsPaths ...string) *DerivativeAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *DerivativeAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["derivative"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.unit != "" { - params["unit"] = a.unit - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_extended_stats_bucket.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_extended_stats_bucket.go deleted file mode 100644 index 4e816d4..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_extended_stats_bucket.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ExtendedStatsBucketAggregation is a sibling pipeline aggregation which calculates -// a variety of stats across all bucket of a specified metric in a sibling aggregation. -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// This aggregation provides a few more statistics (sum of squares, standard deviation, etc) -// compared to the stats_bucket aggregation. -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-extended-stats-bucket-aggregation.html -type ExtendedStatsBucketAggregation struct { - format string - gapPolicy string - sigma *float32 - meta map[string]interface{} - bucketsPaths []string -} - -// NewExtendedStatsBucketAggregation creates and initializes a new ExtendedStatsBucketAggregation. -func NewExtendedStatsBucketAggregation() *ExtendedStatsBucketAggregation { - return &ExtendedStatsBucketAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (s *ExtendedStatsBucketAggregation) Format(format string) *ExtendedStatsBucketAggregation { - s.format = format - return s -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (s *ExtendedStatsBucketAggregation) GapPolicy(gapPolicy string) *ExtendedStatsBucketAggregation { - s.gapPolicy = gapPolicy - return s -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (s *ExtendedStatsBucketAggregation) GapInsertZeros() *ExtendedStatsBucketAggregation { - s.gapPolicy = "insert_zeros" - return s -} - -// GapSkip skips gaps in the series. -func (s *ExtendedStatsBucketAggregation) GapSkip() *ExtendedStatsBucketAggregation { - s.gapPolicy = "skip" - return s -} - -// Meta sets the meta data to be included in the aggregation response. -func (s *ExtendedStatsBucketAggregation) Meta(metaData map[string]interface{}) *ExtendedStatsBucketAggregation { - s.meta = metaData - return s -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (s *ExtendedStatsBucketAggregation) BucketsPath(bucketsPaths ...string) *ExtendedStatsBucketAggregation { - s.bucketsPaths = append(s.bucketsPaths, bucketsPaths...) - return s -} - -// Sigma sets number of standard deviations above/below the mean to display -func (s *ExtendedStatsBucketAggregation) Sigma(sigma float32) *ExtendedStatsBucketAggregation { - s.sigma = &sigma - return s -} - -// Source returns the a JSON-serializable interface. -func (s *ExtendedStatsBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["extended_stats_bucket"] = params - - if s.format != "" { - params["format"] = s.format - } - if s.gapPolicy != "" { - params["gap_policy"] = s.gapPolicy - } - - // Add buckets paths - switch len(s.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = s.bucketsPaths[0] - default: - params["buckets_path"] = s.bucketsPaths - } - - // Add sigma is not zero or less - if s.sigma != nil && *s.sigma >= 0 { - params["sigma"] = *s.sigma - } - - // Add Meta data if available - if len(s.meta) > 0 { - source["meta"] = s.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_max_bucket.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_max_bucket.go deleted file mode 100644 index 6cd087e..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_max_bucket.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MaxBucketAggregation is a sibling pipeline aggregation which identifies -// the bucket(s) with the maximum value of a specified metric in a sibling -// aggregation and outputs both the value and the key(s) of the bucket(s). -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-max-bucket-aggregation.html -type MaxBucketAggregation struct { - format string - gapPolicy string - - meta map[string]interface{} - bucketsPaths []string -} - -// NewMaxBucketAggregation creates and initializes a new MaxBucketAggregation. -func NewMaxBucketAggregation() *MaxBucketAggregation { - return &MaxBucketAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (a *MaxBucketAggregation) Format(format string) *MaxBucketAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *MaxBucketAggregation) GapPolicy(gapPolicy string) *MaxBucketAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *MaxBucketAggregation) GapInsertZeros() *MaxBucketAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *MaxBucketAggregation) GapSkip() *MaxBucketAggregation { - a.gapPolicy = "skip" - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MaxBucketAggregation) Meta(metaData map[string]interface{}) *MaxBucketAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *MaxBucketAggregation) BucketsPath(bucketsPaths ...string) *MaxBucketAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *MaxBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["max_bucket"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_min_bucket.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_min_bucket.go deleted file mode 100644 index c87f0d9..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_min_bucket.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MinBucketAggregation is a sibling pipeline aggregation which identifies -// the bucket(s) with the maximum value of a specified metric in a sibling -// aggregation and outputs both the value and the key(s) of the bucket(s). -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-min-bucket-aggregation.html -type MinBucketAggregation struct { - format string - gapPolicy string - - meta map[string]interface{} - bucketsPaths []string -} - -// NewMinBucketAggregation creates and initializes a new MinBucketAggregation. -func NewMinBucketAggregation() *MinBucketAggregation { - return &MinBucketAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (a *MinBucketAggregation) Format(format string) *MinBucketAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *MinBucketAggregation) GapPolicy(gapPolicy string) *MinBucketAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *MinBucketAggregation) GapInsertZeros() *MinBucketAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *MinBucketAggregation) GapSkip() *MinBucketAggregation { - a.gapPolicy = "skip" - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MinBucketAggregation) Meta(metaData map[string]interface{}) *MinBucketAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *MinBucketAggregation) BucketsPath(bucketsPaths ...string) *MinBucketAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *MinBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["min_bucket"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_mov_avg.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_mov_avg.go deleted file mode 100644 index 035dc0e..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_mov_avg.go +++ /dev/null @@ -1,378 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MovAvgAggregation operates on a series of data. It will slide a window -// across the data and emit the average value of that window. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html -// -// Deprecated: The MovAvgAggregation has been deprecated in 6.4.0. Use the more generate MovFnAggregation instead. -type MovAvgAggregation struct { - format string - gapPolicy string - model MovAvgModel - window *int - predict *int - minimize *bool - - meta map[string]interface{} - bucketsPaths []string -} - -// NewMovAvgAggregation creates and initializes a new MovAvgAggregation. -// -// Deprecated: The MovAvgAggregation has been deprecated in 6.4.0. Use the more generate MovFnAggregation instead. -func NewMovAvgAggregation() *MovAvgAggregation { - return &MovAvgAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (a *MovAvgAggregation) Format(format string) *MovAvgAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *MovAvgAggregation) GapPolicy(gapPolicy string) *MovAvgAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *MovAvgAggregation) GapInsertZeros() *MovAvgAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *MovAvgAggregation) GapSkip() *MovAvgAggregation { - a.gapPolicy = "skip" - return a -} - -// Model is used to define what type of moving average you want to use -// in the series. -func (a *MovAvgAggregation) Model(model MovAvgModel) *MovAvgAggregation { - a.model = model - return a -} - -// Window sets the window size for the moving average. This window will -// "slide" across the series, and the values inside that window will -// be used to calculate the moving avg value. -func (a *MovAvgAggregation) Window(window int) *MovAvgAggregation { - a.window = &window - return a -} - -// Predict sets the number of predictions that should be returned. -// Each prediction will be spaced at the intervals in the histogram. -// E.g. a predict of 2 will return two new buckets at the end of the -// histogram with the predicted values. -func (a *MovAvgAggregation) Predict(numPredictions int) *MovAvgAggregation { - a.predict = &numPredictions - return a -} - -// Minimize determines if the model should be fit to the data using a -// cost minimizing algorithm. -func (a *MovAvgAggregation) Minimize(minimize bool) *MovAvgAggregation { - a.minimize = &minimize - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MovAvgAggregation) Meta(metaData map[string]interface{}) *MovAvgAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *MovAvgAggregation) BucketsPath(bucketsPaths ...string) *MovAvgAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *MovAvgAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["moving_avg"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.model != nil { - params["model"] = a.model.Name() - settings := a.model.Settings() - if len(settings) > 0 { - params["settings"] = settings - } - } - if a.window != nil { - params["window"] = *a.window - } - if a.predict != nil { - params["predict"] = *a.predict - } - if a.minimize != nil { - params["minimize"] = *a.minimize - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} - -// -- Models for moving averages -- -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html#_models - -// MovAvgModel specifies the model to use with the MovAvgAggregation. -type MovAvgModel interface { - Name() string - Settings() map[string]interface{} -} - -// -- EWMA -- - -// EWMAMovAvgModel calculates an exponentially weighted moving average. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html#_ewma_exponentially_weighted -type EWMAMovAvgModel struct { - alpha *float64 -} - -// NewEWMAMovAvgModel creates and initializes a new EWMAMovAvgModel. -func NewEWMAMovAvgModel() *EWMAMovAvgModel { - return &EWMAMovAvgModel{} -} - -// Alpha controls the smoothing of the data. Alpha = 1 retains no memory -// of past values (e.g. a random walk), while alpha = 0 retains infinite -// memory of past values (e.g. the series mean). Useful values are somewhere -// in between. Defaults to 0.5. -func (m *EWMAMovAvgModel) Alpha(alpha float64) *EWMAMovAvgModel { - m.alpha = &alpha - return m -} - -// Name of the model. -func (m *EWMAMovAvgModel) Name() string { - return "ewma" -} - -// Settings of the model. -func (m *EWMAMovAvgModel) Settings() map[string]interface{} { - settings := make(map[string]interface{}) - if m.alpha != nil { - settings["alpha"] = *m.alpha - } - return settings -} - -// -- Holt linear -- - -// HoltLinearMovAvgModel calculates a doubly exponential weighted moving average. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html#_holt_linear -type HoltLinearMovAvgModel struct { - alpha *float64 - beta *float64 -} - -// NewHoltLinearMovAvgModel creates and initializes a new HoltLinearMovAvgModel. -func NewHoltLinearMovAvgModel() *HoltLinearMovAvgModel { - return &HoltLinearMovAvgModel{} -} - -// Alpha controls the smoothing of the data. Alpha = 1 retains no memory -// of past values (e.g. a random walk), while alpha = 0 retains infinite -// memory of past values (e.g. the series mean). Useful values are somewhere -// in between. Defaults to 0.5. -func (m *HoltLinearMovAvgModel) Alpha(alpha float64) *HoltLinearMovAvgModel { - m.alpha = &alpha - return m -} - -// Beta is equivalent to Alpha but controls the smoothing of the trend -// instead of the data. -func (m *HoltLinearMovAvgModel) Beta(beta float64) *HoltLinearMovAvgModel { - m.beta = &beta - return m -} - -// Name of the model. -func (m *HoltLinearMovAvgModel) Name() string { - return "holt" -} - -// Settings of the model. -func (m *HoltLinearMovAvgModel) Settings() map[string]interface{} { - settings := make(map[string]interface{}) - if m.alpha != nil { - settings["alpha"] = *m.alpha - } - if m.beta != nil { - settings["beta"] = *m.beta - } - return settings -} - -// -- Holt Winters -- - -// HoltWintersMovAvgModel calculates a triple exponential weighted moving average. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html#_holt_winters -type HoltWintersMovAvgModel struct { - alpha *float64 - beta *float64 - gamma *float64 - period *int - seasonalityType string - pad *bool -} - -// NewHoltWintersMovAvgModel creates and initializes a new HoltWintersMovAvgModel. -func NewHoltWintersMovAvgModel() *HoltWintersMovAvgModel { - return &HoltWintersMovAvgModel{} -} - -// Alpha controls the smoothing of the data. Alpha = 1 retains no memory -// of past values (e.g. a random walk), while alpha = 0 retains infinite -// memory of past values (e.g. the series mean). Useful values are somewhere -// in between. Defaults to 0.5. -func (m *HoltWintersMovAvgModel) Alpha(alpha float64) *HoltWintersMovAvgModel { - m.alpha = &alpha - return m -} - -// Beta is equivalent to Alpha but controls the smoothing of the trend -// instead of the data. -func (m *HoltWintersMovAvgModel) Beta(beta float64) *HoltWintersMovAvgModel { - m.beta = &beta - return m -} - -func (m *HoltWintersMovAvgModel) Gamma(gamma float64) *HoltWintersMovAvgModel { - m.gamma = &gamma - return m -} - -func (m *HoltWintersMovAvgModel) Period(period int) *HoltWintersMovAvgModel { - m.period = &period - return m -} - -func (m *HoltWintersMovAvgModel) SeasonalityType(typ string) *HoltWintersMovAvgModel { - m.seasonalityType = typ - return m -} - -func (m *HoltWintersMovAvgModel) Pad(pad bool) *HoltWintersMovAvgModel { - m.pad = &pad - return m -} - -// Name of the model. -func (m *HoltWintersMovAvgModel) Name() string { - return "holt_winters" -} - -// Settings of the model. -func (m *HoltWintersMovAvgModel) Settings() map[string]interface{} { - settings := make(map[string]interface{}) - if m.alpha != nil { - settings["alpha"] = *m.alpha - } - if m.beta != nil { - settings["beta"] = *m.beta - } - if m.gamma != nil { - settings["gamma"] = *m.gamma - } - if m.period != nil { - settings["period"] = *m.period - } - if m.pad != nil { - settings["pad"] = *m.pad - } - if m.seasonalityType != "" { - settings["type"] = m.seasonalityType - } - return settings -} - -// -- Linear -- - -// LinearMovAvgModel calculates a linearly weighted moving average, such -// that older values are linearly less important. "Time" is determined -// by position in collection. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html#_linear -type LinearMovAvgModel struct { -} - -// NewLinearMovAvgModel creates and initializes a new LinearMovAvgModel. -func NewLinearMovAvgModel() *LinearMovAvgModel { - return &LinearMovAvgModel{} -} - -// Name of the model. -func (m *LinearMovAvgModel) Name() string { - return "linear" -} - -// Settings of the model. -func (m *LinearMovAvgModel) Settings() map[string]interface{} { - return nil -} - -// -- Simple -- - -// SimpleMovAvgModel calculates a simple unweighted (arithmetic) moving average. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movavg-aggregation.html#_simple -type SimpleMovAvgModel struct { -} - -// NewSimpleMovAvgModel creates and initializes a new SimpleMovAvgModel. -func NewSimpleMovAvgModel() *SimpleMovAvgModel { - return &SimpleMovAvgModel{} -} - -// Name of the model. -func (m *SimpleMovAvgModel) Name() string { - return "simple" -} - -// Settings of the model. -func (m *SimpleMovAvgModel) Settings() map[string]interface{} { - return nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_mov_fn.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_mov_fn.go deleted file mode 100644 index ea8cbdc..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_mov_fn.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MovFnAggregation, given an ordered series of data, will slice a window across -// the data and allow the user to specify a custom script that is executed for -// each window of data. -// -// You must pass a script to process the values. There are a number of predefined -// script functions you can use as described here: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movfn-aggregation.html#_pre_built_functions. -// -// Example: -// agg := elastic.NewMovFnAggregation( -// "the_sum", // bucket path -// elastic.NewScript("MovingFunctions.stdDev(values, MovingFunctions.unweightedAvg(values))"), -// 10, // window size -// ) -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-movfn-aggregation.html. -type MovFnAggregation struct { - script *Script - format string - gapPolicy string - window int - - meta map[string]interface{} - bucketsPaths []string -} - -// NewMovFnAggregation creates and initializes a new MovFnAggregation. -// -// Deprecated: The MovFnAggregation has been deprecated in 6.4.0. Use the more generate MovFnAggregation instead. -func NewMovFnAggregation(bucketsPath string, script *Script, window int) *MovFnAggregation { - return &MovFnAggregation{ - bucketsPaths: []string{bucketsPath}, - script: script, - window: window, - } -} - -// Script is the script to run. -func (a *MovFnAggregation) Script(script *Script) *MovFnAggregation { - a.script = script - return a -} - -// Format to use on the output of this aggregation. -func (a *MovFnAggregation) Format(format string) *MovFnAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *MovFnAggregation) GapPolicy(gapPolicy string) *MovFnAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *MovFnAggregation) GapInsertZeros() *MovFnAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *MovFnAggregation) GapSkip() *MovFnAggregation { - a.gapPolicy = "skip" - return a -} - -// Window sets the window size for this aggregation. -func (a *MovFnAggregation) Window(window int) *MovFnAggregation { - a.window = window - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *MovFnAggregation) Meta(metaData map[string]interface{}) *MovFnAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *MovFnAggregation) BucketsPath(bucketsPaths ...string) *MovFnAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *MovFnAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["moving_fn"] = params - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Script - if a.script != nil { - src, err := a.script.Source() - if err != nil { - return nil, err - } - params["script"] = src - } - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - params["window"] = a.window - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_percentiles_bucket.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_percentiles_bucket.go deleted file mode 100644 index a5a3fdf..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_percentiles_bucket.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2012-2015 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// PercentilesBucketAggregation is a sibling pipeline aggregation which calculates -// percentiles across all bucket of a specified metric in a sibling aggregation. -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-percentiles-bucket-aggregation.html -type PercentilesBucketAggregation struct { - format string - gapPolicy string - percents []float64 - bucketsPaths []string - - meta map[string]interface{} -} - -// NewPercentilesBucketAggregation creates and initializes a new PercentilesBucketAggregation. -func NewPercentilesBucketAggregation() *PercentilesBucketAggregation { - return &PercentilesBucketAggregation{} -} - -// Format to apply the output value of this aggregation. -func (p *PercentilesBucketAggregation) Format(format string) *PercentilesBucketAggregation { - p.format = format - return p -} - -// Percents to calculate percentiles for in this aggregation. -func (p *PercentilesBucketAggregation) Percents(percents ...float64) *PercentilesBucketAggregation { - p.percents = percents - return p -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (p *PercentilesBucketAggregation) GapPolicy(gapPolicy string) *PercentilesBucketAggregation { - p.gapPolicy = gapPolicy - return p -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (p *PercentilesBucketAggregation) GapInsertZeros() *PercentilesBucketAggregation { - p.gapPolicy = "insert_zeros" - return p -} - -// GapSkip skips gaps in the series. -func (p *PercentilesBucketAggregation) GapSkip() *PercentilesBucketAggregation { - p.gapPolicy = "skip" - return p -} - -// Meta sets the meta data to be included in the aggregation response. -func (p *PercentilesBucketAggregation) Meta(metaData map[string]interface{}) *PercentilesBucketAggregation { - p.meta = metaData - return p -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (p *PercentilesBucketAggregation) BucketsPath(bucketsPaths ...string) *PercentilesBucketAggregation { - p.bucketsPaths = append(p.bucketsPaths, bucketsPaths...) - return p -} - -// Source returns the a JSON-serializable interface. -func (p *PercentilesBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["percentiles_bucket"] = params - - if p.format != "" { - params["format"] = p.format - } - if p.gapPolicy != "" { - params["gap_policy"] = p.gapPolicy - } - - // Add buckets paths - switch len(p.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = p.bucketsPaths[0] - default: - params["buckets_path"] = p.bucketsPaths - } - - // Add percents - if len(p.percents) > 0 { - params["percents"] = p.percents - } - - // Add Meta data if available - if len(p.meta) > 0 { - source["meta"] = p.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_serial_diff.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_serial_diff.go deleted file mode 100644 index d2a9b4c..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_serial_diff.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SerialDiffAggregation implements serial differencing. -// Serial differencing is a technique where values in a time series are -// subtracted from itself at different time lags or periods. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-serialdiff-aggregation.html -type SerialDiffAggregation struct { - format string - gapPolicy string - lag *int - - meta map[string]interface{} - bucketsPaths []string -} - -// NewSerialDiffAggregation creates and initializes a new SerialDiffAggregation. -func NewSerialDiffAggregation() *SerialDiffAggregation { - return &SerialDiffAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (a *SerialDiffAggregation) Format(format string) *SerialDiffAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *SerialDiffAggregation) GapPolicy(gapPolicy string) *SerialDiffAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *SerialDiffAggregation) GapInsertZeros() *SerialDiffAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *SerialDiffAggregation) GapSkip() *SerialDiffAggregation { - a.gapPolicy = "skip" - return a -} - -// Lag specifies the historical bucket to subtract from the current value. -// E.g. a lag of 7 will subtract the current value from the value 7 buckets -// ago. Lag must be a positive, non-zero integer. -func (a *SerialDiffAggregation) Lag(lag int) *SerialDiffAggregation { - a.lag = &lag - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SerialDiffAggregation) Meta(metaData map[string]interface{}) *SerialDiffAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *SerialDiffAggregation) BucketsPath(bucketsPaths ...string) *SerialDiffAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *SerialDiffAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["serial_diff"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - if a.lag != nil { - params["lag"] = *a.lag - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_stats_bucket.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_stats_bucket.go deleted file mode 100644 index 2eb61eb..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_stats_bucket.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// StatsBucketAggregation is a sibling pipeline aggregation which calculates -// a variety of stats across all bucket of a specified metric in a sibling aggregation. -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-stats-bucket-aggregation.html -type StatsBucketAggregation struct { - format string - gapPolicy string - - meta map[string]interface{} - bucketsPaths []string -} - -// NewStatsBucketAggregation creates and initializes a new StatsBucketAggregation. -func NewStatsBucketAggregation() *StatsBucketAggregation { - return &StatsBucketAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (s *StatsBucketAggregation) Format(format string) *StatsBucketAggregation { - s.format = format - return s -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (s *StatsBucketAggregation) GapPolicy(gapPolicy string) *StatsBucketAggregation { - s.gapPolicy = gapPolicy - return s -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (s *StatsBucketAggregation) GapInsertZeros() *StatsBucketAggregation { - s.gapPolicy = "insert_zeros" - return s -} - -// GapSkip skips gaps in the series. -func (s *StatsBucketAggregation) GapSkip() *StatsBucketAggregation { - s.gapPolicy = "skip" - return s -} - -// Meta sets the meta data to be included in the aggregation response. -func (s *StatsBucketAggregation) Meta(metaData map[string]interface{}) *StatsBucketAggregation { - s.meta = metaData - return s -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (s *StatsBucketAggregation) BucketsPath(bucketsPaths ...string) *StatsBucketAggregation { - s.bucketsPaths = append(s.bucketsPaths, bucketsPaths...) - return s -} - -// Source returns the a JSON-serializable interface. -func (s *StatsBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["stats_bucket"] = params - - if s.format != "" { - params["format"] = s.format - } - if s.gapPolicy != "" { - params["gap_policy"] = s.gapPolicy - } - - // Add buckets paths - switch len(s.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = s.bucketsPaths[0] - default: - params["buckets_path"] = s.bucketsPaths - } - - // Add Meta data if available - if len(s.meta) > 0 { - source["meta"] = s.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_sum_bucket.go b/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_sum_bucket.go deleted file mode 100644 index cfdafce..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_aggs_pipeline_sum_bucket.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SumBucketAggregation is a sibling pipeline aggregation which calculates -// the sum across all buckets of a specified metric in a sibling aggregation. -// The specified metric must be numeric and the sibling aggregation must -// be a multi-bucket aggregation. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-aggregations-pipeline-sum-bucket-aggregation.html -type SumBucketAggregation struct { - format string - gapPolicy string - - meta map[string]interface{} - bucketsPaths []string -} - -// NewSumBucketAggregation creates and initializes a new SumBucketAggregation. -func NewSumBucketAggregation() *SumBucketAggregation { - return &SumBucketAggregation{ - bucketsPaths: make([]string, 0), - } -} - -// Format to use on the output of this aggregation. -func (a *SumBucketAggregation) Format(format string) *SumBucketAggregation { - a.format = format - return a -} - -// GapPolicy defines what should be done when a gap in the series is discovered. -// Valid values include "insert_zeros" or "skip". Default is "insert_zeros". -func (a *SumBucketAggregation) GapPolicy(gapPolicy string) *SumBucketAggregation { - a.gapPolicy = gapPolicy - return a -} - -// GapInsertZeros inserts zeros for gaps in the series. -func (a *SumBucketAggregation) GapInsertZeros() *SumBucketAggregation { - a.gapPolicy = "insert_zeros" - return a -} - -// GapSkip skips gaps in the series. -func (a *SumBucketAggregation) GapSkip() *SumBucketAggregation { - a.gapPolicy = "skip" - return a -} - -// Meta sets the meta data to be included in the aggregation response. -func (a *SumBucketAggregation) Meta(metaData map[string]interface{}) *SumBucketAggregation { - a.meta = metaData - return a -} - -// BucketsPath sets the paths to the buckets to use for this pipeline aggregator. -func (a *SumBucketAggregation) BucketsPath(bucketsPaths ...string) *SumBucketAggregation { - a.bucketsPaths = append(a.bucketsPaths, bucketsPaths...) - return a -} - -// Source returns the a JSON-serializable interface. -func (a *SumBucketAggregation) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["sum_bucket"] = params - - if a.format != "" { - params["format"] = a.format - } - if a.gapPolicy != "" { - params["gap_policy"] = a.gapPolicy - } - - // Add buckets paths - switch len(a.bucketsPaths) { - case 0: - case 1: - params["buckets_path"] = a.bucketsPaths[0] - default: - params["buckets_path"] = a.bucketsPaths - } - - // Add Meta data if available - if len(a.meta) > 0 { - source["meta"] = a.meta - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_collapse_builder.go b/vendor/github.com/olivere/elastic/v7/search_collapse_builder.go deleted file mode 100644 index 0de4eed..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_collapse_builder.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CollapseBuilder enables field collapsing on a search request. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-collapse.html -// for details. -type CollapseBuilder struct { - field string - innerHit *InnerHit - maxConcurrentGroupRequests *int -} - -// NewCollapseBuilder creates a new CollapseBuilder. -func NewCollapseBuilder(field string) *CollapseBuilder { - return &CollapseBuilder{field: field} -} - -// Field to collapse. -func (b *CollapseBuilder) Field(field string) *CollapseBuilder { - b.field = field - return b -} - -// InnerHit option to expand the collapsed results. -func (b *CollapseBuilder) InnerHit(innerHit *InnerHit) *CollapseBuilder { - b.innerHit = innerHit - return b -} - -// MaxConcurrentGroupRequests is the maximum number of group requests that are -// allowed to be ran concurrently in the inner_hits phase. -func (b *CollapseBuilder) MaxConcurrentGroupRequests(max int) *CollapseBuilder { - b.maxConcurrentGroupRequests = &max - return b -} - -// Source generates the JSON serializable fragment for the CollapseBuilder. -func (b *CollapseBuilder) Source() (interface{}, error) { - // { - // "field": "user", - // "inner_hits": { - // "name": "last_tweets", - // "size": 5, - // "sort": [{ "date": "asc" }] - // }, - // "max_concurrent_group_searches": 4 - // } - src := map[string]interface{}{ - "field": b.field, - } - - if b.innerHit != nil { - hits, err := b.innerHit.Source() - if err != nil { - return nil, err - } - src["inner_hits"] = hits - } - - if b.maxConcurrentGroupRequests != nil { - src["max_concurrent_group_searches"] = *b.maxConcurrentGroupRequests - } - - return src, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_bool.go b/vendor/github.com/olivere/elastic/v7/search_queries_bool.go deleted file mode 100644 index d192d79..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_bool.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "fmt" - -// A bool query matches documents matching boolean -// combinations of other queries. -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-bool-query.html -type BoolQuery struct { - Query - mustClauses []Query - mustNotClauses []Query - filterClauses []Query - shouldClauses []Query - boost *float64 - minimumShouldMatch string - adjustPureNegative *bool - queryName string -} - -// Creates a new bool query. -func NewBoolQuery() *BoolQuery { - return &BoolQuery{ - mustClauses: make([]Query, 0), - mustNotClauses: make([]Query, 0), - filterClauses: make([]Query, 0), - shouldClauses: make([]Query, 0), - } -} - -func (q *BoolQuery) Must(queries ...Query) *BoolQuery { - q.mustClauses = append(q.mustClauses, queries...) - return q -} - -func (q *BoolQuery) MustNot(queries ...Query) *BoolQuery { - q.mustNotClauses = append(q.mustNotClauses, queries...) - return q -} - -func (q *BoolQuery) Filter(filters ...Query) *BoolQuery { - q.filterClauses = append(q.filterClauses, filters...) - return q -} - -func (q *BoolQuery) Should(queries ...Query) *BoolQuery { - q.shouldClauses = append(q.shouldClauses, queries...) - return q -} - -func (q *BoolQuery) Boost(boost float64) *BoolQuery { - q.boost = &boost - return q -} - -func (q *BoolQuery) MinimumShouldMatch(minimumShouldMatch string) *BoolQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -func (q *BoolQuery) MinimumNumberShouldMatch(minimumNumberShouldMatch int) *BoolQuery { - q.minimumShouldMatch = fmt.Sprintf("%d", minimumNumberShouldMatch) - return q -} - -func (q *BoolQuery) AdjustPureNegative(adjustPureNegative bool) *BoolQuery { - q.adjustPureNegative = &adjustPureNegative - return q -} - -func (q *BoolQuery) QueryName(queryName string) *BoolQuery { - q.queryName = queryName - return q -} - -// Creates the query source for the bool query. -func (q *BoolQuery) Source() (interface{}, error) { - // { - // "bool" : { - // "must" : { - // "term" : { "user" : "kimchy" } - // }, - // "must_not" : { - // "range" : { - // "age" : { "from" : 10, "to" : 20 } - // } - // }, - // "filter" : [ - // ... - // ] - // "should" : [ - // { - // "term" : { "tag" : "wow" } - // }, - // { - // "term" : { "tag" : "elasticsearch" } - // } - // ], - // "minimum_should_match" : 1, - // "boost" : 1.0 - // } - // } - - query := make(map[string]interface{}) - - boolClause := make(map[string]interface{}) - query["bool"] = boolClause - - // must - if len(q.mustClauses) == 1 { - src, err := q.mustClauses[0].Source() - if err != nil { - return nil, err - } - boolClause["must"] = src - } else if len(q.mustClauses) > 1 { - var clauses []interface{} - for _, subQuery := range q.mustClauses { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - boolClause["must"] = clauses - } - - // must_not - if len(q.mustNotClauses) == 1 { - src, err := q.mustNotClauses[0].Source() - if err != nil { - return nil, err - } - boolClause["must_not"] = src - } else if len(q.mustNotClauses) > 1 { - var clauses []interface{} - for _, subQuery := range q.mustNotClauses { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - boolClause["must_not"] = clauses - } - - // filter - if len(q.filterClauses) == 1 { - src, err := q.filterClauses[0].Source() - if err != nil { - return nil, err - } - boolClause["filter"] = src - } else if len(q.filterClauses) > 1 { - var clauses []interface{} - for _, subQuery := range q.filterClauses { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - boolClause["filter"] = clauses - } - - // should - if len(q.shouldClauses) == 1 { - src, err := q.shouldClauses[0].Source() - if err != nil { - return nil, err - } - boolClause["should"] = src - } else if len(q.shouldClauses) > 1 { - var clauses []interface{} - for _, subQuery := range q.shouldClauses { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - boolClause["should"] = clauses - } - - if q.boost != nil { - boolClause["boost"] = *q.boost - } - if q.minimumShouldMatch != "" { - boolClause["minimum_should_match"] = q.minimumShouldMatch - } - if q.adjustPureNegative != nil { - boolClause["adjust_pure_negative"] = *q.adjustPureNegative - } - if q.queryName != "" { - boolClause["_name"] = q.queryName - } - - return query, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_boosting.go b/vendor/github.com/olivere/elastic/v7/search_queries_boosting.go deleted file mode 100644 index 7a76efb..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_boosting.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// A boosting query can be used to effectively -// demote results that match a given query. -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-boosting-query.html -type BoostingQuery struct { - Query - positiveClause Query - negativeClause Query - negativeBoost *float64 - boost *float64 -} - -// Creates a new boosting query. -func NewBoostingQuery() *BoostingQuery { - return &BoostingQuery{} -} - -func (q *BoostingQuery) Positive(positive Query) *BoostingQuery { - q.positiveClause = positive - return q -} - -func (q *BoostingQuery) Negative(negative Query) *BoostingQuery { - q.negativeClause = negative - return q -} - -func (q *BoostingQuery) NegativeBoost(negativeBoost float64) *BoostingQuery { - q.negativeBoost = &negativeBoost - return q -} - -func (q *BoostingQuery) Boost(boost float64) *BoostingQuery { - q.boost = &boost - return q -} - -// Creates the query source for the boosting query. -func (q *BoostingQuery) Source() (interface{}, error) { - // { - // "boosting" : { - // "positive" : { - // "term" : { - // "field1" : "value1" - // } - // }, - // "negative" : { - // "term" : { - // "field2" : "value2" - // } - // }, - // "negative_boost" : 0.2 - // } - // } - - query := make(map[string]interface{}) - - boostingClause := make(map[string]interface{}) - query["boosting"] = boostingClause - - // Negative and positive clause as well as negative boost - // are mandatory in the Java client. - - // positive - if q.positiveClause != nil { - src, err := q.positiveClause.Source() - if err != nil { - return nil, err - } - boostingClause["positive"] = src - } - - // negative - if q.negativeClause != nil { - src, err := q.negativeClause.Source() - if err != nil { - return nil, err - } - boostingClause["negative"] = src - } - - if q.negativeBoost != nil { - boostingClause["negative_boost"] = *q.negativeBoost - } - - if q.boost != nil { - boostingClause["boost"] = *q.boost - } - - return query, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_common_terms.go b/vendor/github.com/olivere/elastic/v7/search_queries_common_terms.go deleted file mode 100644 index a10a213..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_common_terms.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// CommonTermsQuery is a modern alternative to stopwords -// which improves the precision and recall of search results -// (by taking stopwords into account), without sacrificing performance. -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.4/query-dsl-common-terms-query.html -// -// Deprecated: Use Match query instead (7.3.0+), which skips blocks of -// documents efficiently, without any configuration, provided that the -// total number of hits is not tracked. -type CommonTermsQuery struct { - Query - name string - text interface{} - cutoffFreq *float64 - highFreq *float64 - highFreqOp string - highFreqMinimumShouldMatch string - lowFreq *float64 - lowFreqOp string - lowFreqMinimumShouldMatch string - analyzer string - boost *float64 - queryName string -} - -// NewCommonTermsQuery creates and initializes a new common terms query. -func NewCommonTermsQuery(name string, text interface{}) *CommonTermsQuery { - return &CommonTermsQuery{name: name, text: text} -} - -func (q *CommonTermsQuery) CutoffFrequency(f float64) *CommonTermsQuery { - q.cutoffFreq = &f - return q -} - -func (q *CommonTermsQuery) HighFreq(f float64) *CommonTermsQuery { - q.highFreq = &f - return q -} - -func (q *CommonTermsQuery) HighFreqOperator(op string) *CommonTermsQuery { - q.highFreqOp = op - return q -} - -func (q *CommonTermsQuery) HighFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { - q.highFreqMinimumShouldMatch = minShouldMatch - return q -} - -func (q *CommonTermsQuery) LowFreq(f float64) *CommonTermsQuery { - q.lowFreq = &f - return q -} - -func (q *CommonTermsQuery) LowFreqOperator(op string) *CommonTermsQuery { - q.lowFreqOp = op - return q -} - -func (q *CommonTermsQuery) LowFreqMinimumShouldMatch(minShouldMatch string) *CommonTermsQuery { - q.lowFreqMinimumShouldMatch = minShouldMatch - return q -} - -func (q *CommonTermsQuery) Analyzer(analyzer string) *CommonTermsQuery { - q.analyzer = analyzer - return q -} - -func (q *CommonTermsQuery) Boost(boost float64) *CommonTermsQuery { - q.boost = &boost - return q -} - -func (q *CommonTermsQuery) QueryName(queryName string) *CommonTermsQuery { - q.queryName = queryName - return q -} - -// Creates the query source for the common query. -func (q *CommonTermsQuery) Source() (interface{}, error) { - // { - // "common": { - // "body": { - // "query": "this is bonsai cool", - // "cutoff_frequency": 0.001 - // } - // } - // } - source := make(map[string]interface{}) - body := make(map[string]interface{}) - query := make(map[string]interface{}) - - source["common"] = body - body[q.name] = query - query["query"] = q.text - - if q.cutoffFreq != nil { - query["cutoff_frequency"] = *q.cutoffFreq - } - if q.highFreq != nil { - query["high_freq"] = *q.highFreq - } - if q.highFreqOp != "" { - query["high_freq_operator"] = q.highFreqOp - } - if q.lowFreq != nil { - query["low_freq"] = *q.lowFreq - } - if q.lowFreqOp != "" { - query["low_freq_operator"] = q.lowFreqOp - } - if q.lowFreqMinimumShouldMatch != "" || q.highFreqMinimumShouldMatch != "" { - mm := make(map[string]interface{}) - if q.lowFreqMinimumShouldMatch != "" { - mm["low_freq"] = q.lowFreqMinimumShouldMatch - } - if q.highFreqMinimumShouldMatch != "" { - mm["high_freq"] = q.highFreqMinimumShouldMatch - } - query["minimum_should_match"] = mm - } - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_constant_score.go b/vendor/github.com/olivere/elastic/v7/search_queries_constant_score.go deleted file mode 100644 index cfa8dff..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_constant_score.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ConstantScoreQuery is a query that wraps a filter and simply returns -// a constant score equal to the query boost for every document in the filter. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-constant-score-query.html -type ConstantScoreQuery struct { - filter Query - boost *float64 -} - -// ConstantScoreQuery creates and initializes a new constant score query. -func NewConstantScoreQuery(filter Query) *ConstantScoreQuery { - return &ConstantScoreQuery{ - filter: filter, - } -} - -// Boost sets the boost for this query. Documents matching this query -// will (in addition to the normal weightings) have their score multiplied -// by the boost provided. -func (q *ConstantScoreQuery) Boost(boost float64) *ConstantScoreQuery { - q.boost = &boost - return q -} - -// Source returns the query source. -func (q *ConstantScoreQuery) Source() (interface{}, error) { - // "constant_score" : { - // "filter" : { - // .... - // }, - // "boost" : 1.5 - // } - - query := make(map[string]interface{}) - - params := make(map[string]interface{}) - query["constant_score"] = params - - // filter - src, err := q.filter.Source() - if err != nil { - return nil, err - } - params["filter"] = src - - // boost - if q.boost != nil { - params["boost"] = *q.boost - } - - return query, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_dis_max.go b/vendor/github.com/olivere/elastic/v7/search_queries_dis_max.go deleted file mode 100644 index 48376d6..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_dis_max.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// DisMaxQuery is a query that generates the union of documents produced by -// its subqueries, and that scores each document with the maximum score -// for that document as produced by any subquery, plus a tie breaking -// increment for any additional matching subqueries. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-dis-max-query.html -type DisMaxQuery struct { - queries []Query - boost *float64 - tieBreaker *float64 - queryName string -} - -// NewDisMaxQuery creates and initializes a new dis max query. -func NewDisMaxQuery() *DisMaxQuery { - return &DisMaxQuery{ - queries: make([]Query, 0), - } -} - -// Query adds one or more queries to the dis max query. -func (q *DisMaxQuery) Query(queries ...Query) *DisMaxQuery { - q.queries = append(q.queries, queries...) - return q -} - -// Boost sets the boost for this query. Documents matching this query will -// (in addition to the normal weightings) have their score multiplied by -// the boost provided. -func (q *DisMaxQuery) Boost(boost float64) *DisMaxQuery { - q.boost = &boost - return q -} - -// TieBreaker is the factor by which the score of each non-maximum disjunct -// for a document is multiplied with and added into the final score. -// -// If non-zero, the value should be small, on the order of 0.1, which says -// that 10 occurrences of word in a lower-scored field that is also in a -// higher scored field is just as good as a unique word in the lower scored -// field (i.e., one that is not in any higher scored field). -func (q *DisMaxQuery) TieBreaker(tieBreaker float64) *DisMaxQuery { - q.tieBreaker = &tieBreaker - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched filters per hit. -func (q *DisMaxQuery) QueryName(queryName string) *DisMaxQuery { - q.queryName = queryName - return q -} - -// Source returns the JSON serializable content for this query. -func (q *DisMaxQuery) Source() (interface{}, error) { - // { - // "dis_max" : { - // "tie_breaker" : 0.7, - // "boost" : 1.2, - // "queries" : { - // { - // "term" : { "age" : 34 } - // }, - // { - // "term" : { "age" : 35 } - // } - // ] - // } - // } - - query := make(map[string]interface{}) - params := make(map[string]interface{}) - query["dis_max"] = params - - if q.tieBreaker != nil { - params["tie_breaker"] = *q.tieBreaker - } - if q.boost != nil { - params["boost"] = *q.boost - } - if q.queryName != "" { - params["_name"] = q.queryName - } - - // queries - var clauses []interface{} - for _, subQuery := range q.queries { - src, err := subQuery.Source() - if err != nil { - return nil, err - } - clauses = append(clauses, src) - } - params["queries"] = clauses - - return query, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_distance_feature_query.go b/vendor/github.com/olivere/elastic/v7/search_queries_distance_feature_query.go deleted file mode 100644 index 1a8a061..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_distance_feature_query.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" -) - -// DistanceFeatureQuery uses a script to provide a custom score for returned documents. -// -// A DistanceFeatureQuery query is useful if, for example, a scoring function is -// expensive and you only need to calculate the score of a filtered set of documents. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.4/query-dsl-distance-feature-query.html -type DistanceFeatureQuery struct { - field string - pivot string - origin interface{} - boost *float64 - queryName string -} - -// NewDistanceFeatureQuery creates and initializes a new script_score query. -func NewDistanceFeatureQuery(field string, origin interface{}, pivot string) *DistanceFeatureQuery { - return &DistanceFeatureQuery{ - field: field, - origin: origin, - pivot: pivot, - } -} - -// Field to be used in the DistanceFeatureQuery. -func (q *DistanceFeatureQuery) Field(name string) *DistanceFeatureQuery { - q.field = name - return q -} - -// Origin is the date or point of origin used to calculate distances. -// -// If the field is a date or date_nanos field, the origin value must be a -// date. Date math such as "now-1h" is supported. -// -// If the field is a geo_point field, the origin must be a GeoPoint. -func (q *DistanceFeatureQuery) Origin(origin interface{}) *DistanceFeatureQuery { - q.origin = origin - return q -} - -// Pivot is distance from the origin at which relevance scores -// receive half of the boost value. -// -// If field is a date or date_nanos field, the pivot value must be a time -// unit, such as "1h" or "10d". -// -// If field is a geo_point field, the pivot value must be a distance unit, -// such as "1km" or "12m". You can pass a string, or a GeoPoint. -func (q *DistanceFeatureQuery) Pivot(pivot string) *DistanceFeatureQuery { - q.pivot = pivot - return q -} - -// Boost sets the boost for this query. -func (q *DistanceFeatureQuery) Boost(boost float64) *DistanceFeatureQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter. -func (q *DistanceFeatureQuery) QueryName(queryName string) *DistanceFeatureQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *DistanceFeatureQuery) Source() (interface{}, error) { - // { - // "distance_feature" : { - // "field" : "production_date", - // "pivot" : "7d", - // "origin" : "now" - // } - // } - // { - // "distance_feature" : { - // "field" : "location", - // "pivot" : "1000m", - // "origin" : [-71.3, 41.15] - // } - // } - - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["distance_feature"] = query - - query["field"] = q.field - query["pivot"] = q.pivot - switch v := q.origin.(type) { - default: - return nil, fmt.Errorf("DistanceFeatureQuery: unable to serialize Origin from type %T", v) - case string: - query["origin"] = v - case *GeoPoint: - query["origin"] = v.Source() - case GeoPoint: - query["origin"] = v.Source() - } - - if v := q.boost; v != nil { - query["boost"] = *v - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_exists.go b/vendor/github.com/olivere/elastic/v7/search_queries_exists.go deleted file mode 100644 index 33f1589..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_exists.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ExistsQuery is a query that only matches on documents that the field -// has a value in them. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-exists-query.html -type ExistsQuery struct { - name string - queryName string -} - -// NewExistsQuery creates and initializes a new exists query. -func NewExistsQuery(name string) *ExistsQuery { - return &ExistsQuery{ - name: name, - } -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched queries per hit. -func (q *ExistsQuery) QueryName(queryName string) *ExistsQuery { - q.queryName = queryName - return q -} - -// Source returns the JSON serializable content for this query. -func (q *ExistsQuery) Source() (interface{}, error) { - // { - // "exists" : { - // "field" : "user" - // } - // } - - query := make(map[string]interface{}) - params := make(map[string]interface{}) - query["exists"] = params - - params["field"] = q.name - if q.queryName != "" { - params["_name"] = q.queryName - } - - return query, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_fsq.go b/vendor/github.com/olivere/elastic/v7/search_queries_fsq.go deleted file mode 100644 index ce00122..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_fsq.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// FunctionScoreQuery allows you to modify the score of documents that -// are retrieved by a query. This can be useful if, for example, -// a score function is computationally expensive and it is sufficient -// to compute the score on a filtered set of documents. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html -type FunctionScoreQuery struct { - query Query - filter Query - boost *float64 - maxBoost *float64 - scoreMode string - boostMode string - filters []Query - scoreFuncs []ScoreFunction - minScore *float64 -} - -// NewFunctionScoreQuery creates and initializes a new function score query. -func NewFunctionScoreQuery() *FunctionScoreQuery { - return &FunctionScoreQuery{ - filters: make([]Query, 0), - scoreFuncs: make([]ScoreFunction, 0), - } -} - -// Query sets the query for the function score query. -func (q *FunctionScoreQuery) Query(query Query) *FunctionScoreQuery { - q.query = query - return q -} - -// Filter sets the filter for the function score query. -func (q *FunctionScoreQuery) Filter(filter Query) *FunctionScoreQuery { - q.filter = filter - return q -} - -// Add adds a score function that will execute on all the documents -// matching the filter. -func (q *FunctionScoreQuery) Add(filter Query, scoreFunc ScoreFunction) *FunctionScoreQuery { - q.filters = append(q.filters, filter) - q.scoreFuncs = append(q.scoreFuncs, scoreFunc) - return q -} - -// AddScoreFunc adds a score function that will execute the function on all documents. -func (q *FunctionScoreQuery) AddScoreFunc(scoreFunc ScoreFunction) *FunctionScoreQuery { - q.filters = append(q.filters, nil) - q.scoreFuncs = append(q.scoreFuncs, scoreFunc) - return q -} - -// ScoreMode defines how results of individual score functions will be aggregated. -// Can be first, avg, max, sum, min, or multiply. -func (q *FunctionScoreQuery) ScoreMode(scoreMode string) *FunctionScoreQuery { - q.scoreMode = scoreMode - return q -} - -// BoostMode defines how the combined result of score functions will -// influence the final score together with the sub query score. -func (q *FunctionScoreQuery) BoostMode(boostMode string) *FunctionScoreQuery { - q.boostMode = boostMode - return q -} - -// MaxBoost is the maximum boost that will be applied by function score. -func (q *FunctionScoreQuery) MaxBoost(maxBoost float64) *FunctionScoreQuery { - q.maxBoost = &maxBoost - return q -} - -// Boost sets the boost for this query. Documents matching this query will -// (in addition to the normal weightings) have their score multiplied by the -// boost provided. -func (q *FunctionScoreQuery) Boost(boost float64) *FunctionScoreQuery { - q.boost = &boost - return q -} - -// MinScore sets the minimum score. -func (q *FunctionScoreQuery) MinScore(minScore float64) *FunctionScoreQuery { - q.minScore = &minScore - return q -} - -// Source returns JSON for the function score query. -func (q *FunctionScoreQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["function_score"] = query - - if q.query != nil { - src, err := q.query.Source() - if err != nil { - return nil, err - } - query["query"] = src - } - if q.filter != nil { - src, err := q.filter.Source() - if err != nil { - return nil, err - } - query["filter"] = src - } - - if len(q.filters) > 0 { - funcs := make([]interface{}, len(q.filters)) - for i, filter := range q.filters { - hsh := make(map[string]interface{}) - if filter != nil { - src, err := filter.Source() - if err != nil { - return nil, err - } - hsh["filter"] = src - } - // Weight needs to be serialized on this level. - if weight := q.scoreFuncs[i].GetWeight(); weight != nil { - hsh["weight"] = weight - } - // Serialize the score function - src, err := q.scoreFuncs[i].Source() - if err != nil { - return nil, err - } - hsh[q.scoreFuncs[i].Name()] = src - funcs[i] = hsh - } - query["functions"] = funcs - } - - if q.scoreMode != "" { - query["score_mode"] = q.scoreMode - } - if q.boostMode != "" { - query["boost_mode"] = q.boostMode - } - if q.maxBoost != nil { - query["max_boost"] = *q.maxBoost - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.minScore != nil { - query["min_score"] = *q.minScore - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_fsq_score_funcs.go b/vendor/github.com/olivere/elastic/v7/search_queries_fsq_score_funcs.go deleted file mode 100644 index b14f3b7..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_fsq_score_funcs.go +++ /dev/null @@ -1,582 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "strings" -) - -// ScoreFunction is used in combination with the Function Score Query. -type ScoreFunction interface { - Name() string - GetWeight() *float64 // returns the weight which must be serialized at the level of FunctionScoreQuery - Source() (interface{}, error) -} - -// -- Exponential Decay -- - -// ExponentialDecayFunction builds an exponential decay score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html -// for details. -type ExponentialDecayFunction struct { - fieldName string - origin interface{} - scale interface{} - decay *float64 - offset interface{} - multiValueMode string - weight *float64 -} - -// NewExponentialDecayFunction creates a new ExponentialDecayFunction. -func NewExponentialDecayFunction() *ExponentialDecayFunction { - return &ExponentialDecayFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *ExponentialDecayFunction) Name() string { - return "exp" -} - -// FieldName specifies the name of the field to which this decay function is applied to. -func (fn *ExponentialDecayFunction) FieldName(fieldName string) *ExponentialDecayFunction { - fn.fieldName = fieldName - return fn -} - -// Origin defines the "central point" by which the decay function calculates -// "distance". -func (fn *ExponentialDecayFunction) Origin(origin interface{}) *ExponentialDecayFunction { - fn.origin = origin - return fn -} - -// Scale defines the scale to be used with Decay. -func (fn *ExponentialDecayFunction) Scale(scale interface{}) *ExponentialDecayFunction { - fn.scale = scale - return fn -} - -// Decay defines how documents are scored at the distance given a Scale. -// If no decay is defined, documents at the distance Scale will be scored 0.5. -func (fn *ExponentialDecayFunction) Decay(decay float64) *ExponentialDecayFunction { - fn.decay = &decay - return fn -} - -// Offset, if defined, computes the decay function only for a distance -// greater than the defined offset. -func (fn *ExponentialDecayFunction) Offset(offset interface{}) *ExponentialDecayFunction { - fn.offset = offset - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *ExponentialDecayFunction) Weight(weight float64) *ExponentialDecayFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *ExponentialDecayFunction) GetWeight() *float64 { - return fn.weight -} - -// MultiValueMode specifies how the decay function should be calculated -// on a field that has multiple values. -// Valid modes are: min, max, avg, and sum. -func (fn *ExponentialDecayFunction) MultiValueMode(mode string) *ExponentialDecayFunction { - fn.multiValueMode = mode - return fn -} - -// Source returns the serializable JSON data of this score function. -func (fn *ExponentialDecayFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source[fn.fieldName] = params - if fn.origin != nil { - params["origin"] = fn.origin - } - params["scale"] = fn.scale - if fn.decay != nil && *fn.decay > 0 { - params["decay"] = *fn.decay - } - if fn.offset != nil { - params["offset"] = fn.offset - } - if fn.multiValueMode != "" { - source["multi_value_mode"] = fn.multiValueMode - } - return source, nil -} - -// -- Gauss Decay -- - -// GaussDecayFunction builds a gauss decay score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html -// for details. -type GaussDecayFunction struct { - fieldName string - origin interface{} - scale interface{} - decay *float64 - offset interface{} - multiValueMode string - weight *float64 -} - -// NewGaussDecayFunction returns a new GaussDecayFunction. -func NewGaussDecayFunction() *GaussDecayFunction { - return &GaussDecayFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *GaussDecayFunction) Name() string { - return "gauss" -} - -// FieldName specifies the name of the field to which this decay function is applied to. -func (fn *GaussDecayFunction) FieldName(fieldName string) *GaussDecayFunction { - fn.fieldName = fieldName - return fn -} - -// Origin defines the "central point" by which the decay function calculates -// "distance". -func (fn *GaussDecayFunction) Origin(origin interface{}) *GaussDecayFunction { - fn.origin = origin - return fn -} - -// Scale defines the scale to be used with Decay. -func (fn *GaussDecayFunction) Scale(scale interface{}) *GaussDecayFunction { - fn.scale = scale - return fn -} - -// Decay defines how documents are scored at the distance given a Scale. -// If no decay is defined, documents at the distance Scale will be scored 0.5. -func (fn *GaussDecayFunction) Decay(decay float64) *GaussDecayFunction { - fn.decay = &decay - return fn -} - -// Offset, if defined, computes the decay function only for a distance -// greater than the defined offset. -func (fn *GaussDecayFunction) Offset(offset interface{}) *GaussDecayFunction { - fn.offset = offset - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *GaussDecayFunction) Weight(weight float64) *GaussDecayFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *GaussDecayFunction) GetWeight() *float64 { - return fn.weight -} - -// MultiValueMode specifies how the decay function should be calculated -// on a field that has multiple values. -// Valid modes are: min, max, avg, and sum. -func (fn *GaussDecayFunction) MultiValueMode(mode string) *GaussDecayFunction { - fn.multiValueMode = mode - return fn -} - -// Source returns the serializable JSON data of this score function. -func (fn *GaussDecayFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source[fn.fieldName] = params - if fn.origin != nil { - params["origin"] = fn.origin - } - params["scale"] = fn.scale - if fn.decay != nil && *fn.decay > 0 { - params["decay"] = *fn.decay - } - if fn.offset != nil { - params["offset"] = fn.offset - } - if fn.multiValueMode != "" { - source["multi_value_mode"] = fn.multiValueMode - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} - -// -- Linear Decay -- - -// LinearDecayFunction builds a linear decay score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html -// for details. -type LinearDecayFunction struct { - fieldName string - origin interface{} - scale interface{} - decay *float64 - offset interface{} - multiValueMode string - weight *float64 -} - -// NewLinearDecayFunction initializes and returns a new LinearDecayFunction. -func NewLinearDecayFunction() *LinearDecayFunction { - return &LinearDecayFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *LinearDecayFunction) Name() string { - return "linear" -} - -// FieldName specifies the name of the field to which this decay function is applied to. -func (fn *LinearDecayFunction) FieldName(fieldName string) *LinearDecayFunction { - fn.fieldName = fieldName - return fn -} - -// Origin defines the "central point" by which the decay function calculates -// "distance". -func (fn *LinearDecayFunction) Origin(origin interface{}) *LinearDecayFunction { - fn.origin = origin - return fn -} - -// Scale defines the scale to be used with Decay. -func (fn *LinearDecayFunction) Scale(scale interface{}) *LinearDecayFunction { - fn.scale = scale - return fn -} - -// Decay defines how documents are scored at the distance given a Scale. -// If no decay is defined, documents at the distance Scale will be scored 0.5. -func (fn *LinearDecayFunction) Decay(decay float64) *LinearDecayFunction { - fn.decay = &decay - return fn -} - -// Offset, if defined, computes the decay function only for a distance -// greater than the defined offset. -func (fn *LinearDecayFunction) Offset(offset interface{}) *LinearDecayFunction { - fn.offset = offset - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *LinearDecayFunction) Weight(weight float64) *LinearDecayFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *LinearDecayFunction) GetWeight() *float64 { - return fn.weight -} - -// MultiValueMode specifies how the decay function should be calculated -// on a field that has multiple values. -// Valid modes are: min, max, avg, and sum. -func (fn *LinearDecayFunction) MultiValueMode(mode string) *LinearDecayFunction { - fn.multiValueMode = mode - return fn -} - -// GetMultiValueMode returns how the decay function should be calculated -// on a field that has multiple values. -// Valid modes are: min, max, avg, and sum. -func (fn *LinearDecayFunction) GetMultiValueMode() string { - return fn.multiValueMode -} - -// Source returns the serializable JSON data of this score function. -func (fn *LinearDecayFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source[fn.fieldName] = params - if fn.origin != nil { - params["origin"] = fn.origin - } - params["scale"] = fn.scale - if fn.decay != nil && *fn.decay > 0 { - params["decay"] = *fn.decay - } - if fn.offset != nil { - params["offset"] = fn.offset - } - if fn.multiValueMode != "" { - source["multi_value_mode"] = fn.multiValueMode - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} - -// -- Script -- - -// ScriptFunction builds a script score function. It uses a script to -// compute or influence the score of documents that match with the inner -// query or filter. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_script_score -// for details. -type ScriptFunction struct { - script *Script - weight *float64 -} - -// NewScriptFunction initializes and returns a new ScriptFunction. -func NewScriptFunction(script *Script) *ScriptFunction { - return &ScriptFunction{ - script: script, - } -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *ScriptFunction) Name() string { - return "script_score" -} - -// Script specifies the script to be executed. -func (fn *ScriptFunction) Script(script *Script) *ScriptFunction { - fn.script = script - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *ScriptFunction) Weight(weight float64) *ScriptFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *ScriptFunction) GetWeight() *float64 { - return fn.weight -} - -// Source returns the serializable JSON data of this score function. -func (fn *ScriptFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - if fn.script != nil { - src, err := fn.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} - -// -- Field value factor -- - -// FieldValueFactorFunction is a function score function that allows you -// to use a field from a document to influence the score. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_field_value_factor. -type FieldValueFactorFunction struct { - field string - factor *float64 - missing *float64 - weight *float64 - modifier string -} - -// NewFieldValueFactorFunction initializes and returns a new FieldValueFactorFunction. -func NewFieldValueFactorFunction() *FieldValueFactorFunction { - return &FieldValueFactorFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *FieldValueFactorFunction) Name() string { - return "field_value_factor" -} - -// Field is the field to be extracted from the document. -func (fn *FieldValueFactorFunction) Field(field string) *FieldValueFactorFunction { - fn.field = field - return fn -} - -// Factor is the (optional) factor to multiply the field with. If you do not -// specify a factor, the default is 1. -func (fn *FieldValueFactorFunction) Factor(factor float64) *FieldValueFactorFunction { - fn.factor = &factor - return fn -} - -// Modifier to apply to the field value. It can be one of: none, log, log1p, -// log2p, ln, ln1p, ln2p, square, sqrt, or reciprocal. Defaults to: none. -func (fn *FieldValueFactorFunction) Modifier(modifier string) *FieldValueFactorFunction { - fn.modifier = modifier - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *FieldValueFactorFunction) Weight(weight float64) *FieldValueFactorFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *FieldValueFactorFunction) GetWeight() *float64 { - return fn.weight -} - -// Missing is used if a document does not have that field. -func (fn *FieldValueFactorFunction) Missing(missing float64) *FieldValueFactorFunction { - fn.missing = &missing - return fn -} - -// Source returns the serializable JSON data of this score function. -func (fn *FieldValueFactorFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - if fn.field != "" { - source["field"] = fn.field - } - if fn.factor != nil { - source["factor"] = *fn.factor - } - if fn.missing != nil { - source["missing"] = *fn.missing - } - if fn.modifier != "" { - source["modifier"] = strings.ToLower(fn.modifier) - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} - -// -- Weight Factor -- - -// WeightFactorFunction builds a weight factor function that multiplies -// the weight to the score. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_weight -// for details. -type WeightFactorFunction struct { - weight float64 -} - -// NewWeightFactorFunction initializes and returns a new WeightFactorFunction. -func NewWeightFactorFunction(weight float64) *WeightFactorFunction { - return &WeightFactorFunction{weight: weight} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *WeightFactorFunction) Name() string { - return "weight" -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *WeightFactorFunction) Weight(weight float64) *WeightFactorFunction { - fn.weight = weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *WeightFactorFunction) GetWeight() *float64 { - return &fn.weight -} - -// Source returns the serializable JSON data of this score function. -func (fn *WeightFactorFunction) Source() (interface{}, error) { - // Notice that the weight has to be serialized in FunctionScoreQuery. - return fn.weight, nil -} - -// -- Random -- - -// RandomFunction builds a random score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_random -// for details. -type RandomFunction struct { - field string - seed interface{} - weight *float64 -} - -// NewRandomFunction initializes and returns a new RandomFunction. -func NewRandomFunction() *RandomFunction { - return &RandomFunction{} -} - -// Name represents the JSON field name under which the output of Source -// needs to be serialized by FunctionScoreQuery (see FunctionScoreQuery.Source). -func (fn *RandomFunction) Name() string { - return "random_score" -} - -// Field is the field to be used for random number generation. -// This parameter is compulsory when a Seed is set and ignored -// otherwise. Note that documents that have the same value for a -// field will get the same score. -func (fn *RandomFunction) Field(field string) *RandomFunction { - fn.field = field - return fn -} - -// Seed sets the seed based on which the random number will be generated. -// Using the same seed is guaranteed to generate the same random number for a specific doc. -// Seed must be an integer, e.g. int or int64. It is specified as an interface{} -// here for compatibility with older versions (which also accepted strings). -func (fn *RandomFunction) Seed(seed interface{}) *RandomFunction { - fn.seed = seed - return fn -} - -// Weight adjusts the score of the score function. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-function-score-query.html#_using_function_score -// for details. -func (fn *RandomFunction) Weight(weight float64) *RandomFunction { - fn.weight = &weight - return fn -} - -// GetWeight returns the adjusted score. It is part of the ScoreFunction interface. -// Returns nil if weight is not specified. -func (fn *RandomFunction) GetWeight() *float64 { - return fn.weight -} - -// Source returns the serializable JSON data of this score function. -func (fn *RandomFunction) Source() (interface{}, error) { - source := make(map[string]interface{}) - if fn.field != "" { - source["field"] = fn.field - } - if fn.seed != nil { - source["seed"] = fn.seed - } - // Notice that the weight has to be serialized in FunctionScoreQuery. - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_fuzzy.go b/vendor/github.com/olivere/elastic/v7/search_queries_fuzzy.go deleted file mode 100644 index a81acfc..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_fuzzy.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// FuzzyQuery uses similarity based on Levenshtein edit distance for -// string fields, and a +/- margin on numeric and date fields. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-fuzzy-query.html -type FuzzyQuery struct { - name string - value interface{} - boost *float64 - fuzziness interface{} - prefixLength *int - maxExpansions *int - transpositions *bool - rewrite string - queryName string -} - -// NewFuzzyQuery creates a new fuzzy query. -func NewFuzzyQuery(name string, value interface{}) *FuzzyQuery { - q := &FuzzyQuery{ - name: name, - value: value, - } - return q -} - -// Boost sets the boost for this query. Documents matching this query will -// (in addition to the normal weightings) have their score multiplied by -// the boost provided. -func (q *FuzzyQuery) Boost(boost float64) *FuzzyQuery { - q.boost = &boost - return q -} - -// Fuzziness can be an integer/long like 0, 1 or 2 as well as strings -// like "auto", "0..1", "1..4" or "0.0..1.0". -func (q *FuzzyQuery) Fuzziness(fuzziness interface{}) *FuzzyQuery { - q.fuzziness = fuzziness - return q -} - -func (q *FuzzyQuery) PrefixLength(prefixLength int) *FuzzyQuery { - q.prefixLength = &prefixLength - return q -} - -func (q *FuzzyQuery) MaxExpansions(maxExpansions int) *FuzzyQuery { - q.maxExpansions = &maxExpansions - return q -} - -func (q *FuzzyQuery) Transpositions(transpositions bool) *FuzzyQuery { - q.transpositions = &transpositions - return q -} - -func (q *FuzzyQuery) Rewrite(rewrite string) *FuzzyQuery { - q.rewrite = rewrite - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *FuzzyQuery) QueryName(queryName string) *FuzzyQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *FuzzyQuery) Source() (interface{}, error) { - // { - // "fuzzy" : { - // "user" : { - // "value" : "ki", - // "boost" : 1.0, - // "fuzziness" : 2, - // "prefix_length" : 0, - // "max_expansions" : 100 - // } - // } - - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["fuzzy"] = query - - fq := make(map[string]interface{}) - query[q.name] = fq - - fq["value"] = q.value - - if q.boost != nil { - fq["boost"] = *q.boost - } - if q.transpositions != nil { - fq["transpositions"] = *q.transpositions - } - if q.fuzziness != nil { - fq["fuzziness"] = q.fuzziness - } - if q.prefixLength != nil { - fq["prefix_length"] = *q.prefixLength - } - if q.maxExpansions != nil { - fq["max_expansions"] = *q.maxExpansions - } - if q.rewrite != "" { - fq["rewrite"] = q.rewrite - } - if q.queryName != "" { - fq["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_geo_bounding_box.go b/vendor/github.com/olivere/elastic/v7/search_queries_geo_bounding_box.go deleted file mode 100644 index 6345867..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_geo_bounding_box.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// GeoBoundingBoxQuery allows to filter hits based on a point location using -// a bounding box. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-geo-bounding-box-query.html -type GeoBoundingBoxQuery struct { - name string - top *float64 - left *float64 - bottom *float64 - right *float64 - typ string - queryName string -} - -// NewGeoBoundingBoxQuery creates and initializes a new GeoBoundingBoxQuery. -func NewGeoBoundingBoxQuery(name string) *GeoBoundingBoxQuery { - return &GeoBoundingBoxQuery{ - name: name, - } -} - -func (q *GeoBoundingBoxQuery) TopLeft(top, left float64) *GeoBoundingBoxQuery { - q.top = &top - q.left = &left - return q -} - -func (q *GeoBoundingBoxQuery) TopLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { - return q.TopLeft(point.Lat, point.Lon) -} - -func (q *GeoBoundingBoxQuery) BottomRight(bottom, right float64) *GeoBoundingBoxQuery { - q.bottom = &bottom - q.right = &right - return q -} - -func (q *GeoBoundingBoxQuery) BottomRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { - return q.BottomRight(point.Lat, point.Lon) -} - -func (q *GeoBoundingBoxQuery) BottomLeft(bottom, left float64) *GeoBoundingBoxQuery { - q.bottom = &bottom - q.left = &left - return q -} - -func (q *GeoBoundingBoxQuery) BottomLeftFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { - return q.BottomLeft(point.Lat, point.Lon) -} - -func (q *GeoBoundingBoxQuery) TopRight(top, right float64) *GeoBoundingBoxQuery { - q.top = &top - q.right = &right - return q -} - -func (q *GeoBoundingBoxQuery) TopRightFromGeoPoint(point *GeoPoint) *GeoBoundingBoxQuery { - return q.TopRight(point.Lat, point.Lon) -} - -// Type sets the type of executing the geo bounding box. It can be either -// memory or indexed. It defaults to memory. -func (q *GeoBoundingBoxQuery) Type(typ string) *GeoBoundingBoxQuery { - q.typ = typ - return q -} - -func (q *GeoBoundingBoxQuery) QueryName(queryName string) *GeoBoundingBoxQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *GeoBoundingBoxQuery) Source() (interface{}, error) { - // { - // "geo_bounding_box" : { - // ... - // } - // } - - if q.top == nil { - return nil, errors.New("geo_bounding_box requires top latitude to be set") - } - if q.bottom == nil { - return nil, errors.New("geo_bounding_box requires bottom latitude to be set") - } - if q.right == nil { - return nil, errors.New("geo_bounding_box requires right longitude to be set") - } - if q.left == nil { - return nil, errors.New("geo_bounding_box requires left longitude to be set") - } - - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["geo_bounding_box"] = params - - box := make(map[string]interface{}) - box["top_left"] = []float64{*q.left, *q.top} - box["bottom_right"] = []float64{*q.right, *q.bottom} - params[q.name] = box - - if q.typ != "" { - params["type"] = q.typ - } - if q.queryName != "" { - params["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_geo_distance.go b/vendor/github.com/olivere/elastic/v7/search_queries_geo_distance.go deleted file mode 100644 index 4e6878a..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_geo_distance.go +++ /dev/null @@ -1,107 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoDistanceQuery filters documents that include only hits that exists -// within a specific distance from a geo point. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-geo-distance-query.html -type GeoDistanceQuery struct { - name string - distance string - lat float64 - lon float64 - geohash string - distanceType string - queryName string -} - -// NewGeoDistanceQuery creates and initializes a new GeoDistanceQuery. -func NewGeoDistanceQuery(name string) *GeoDistanceQuery { - return &GeoDistanceQuery{name: name} -} - -func (q *GeoDistanceQuery) GeoPoint(point *GeoPoint) *GeoDistanceQuery { - q.lat = point.Lat - q.lon = point.Lon - return q -} - -func (q *GeoDistanceQuery) Point(lat, lon float64) *GeoDistanceQuery { - q.lat = lat - q.lon = lon - return q -} - -func (q *GeoDistanceQuery) Lat(lat float64) *GeoDistanceQuery { - q.lat = lat - return q -} - -func (q *GeoDistanceQuery) Lon(lon float64) *GeoDistanceQuery { - q.lon = lon - return q -} - -func (q *GeoDistanceQuery) GeoHash(geohash string) *GeoDistanceQuery { - q.geohash = geohash - return q -} - -func (q *GeoDistanceQuery) Distance(distance string) *GeoDistanceQuery { - q.distance = distance - return q -} - -func (q *GeoDistanceQuery) DistanceType(distanceType string) *GeoDistanceQuery { - q.distanceType = distanceType - return q -} - -func (q *GeoDistanceQuery) QueryName(queryName string) *GeoDistanceQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *GeoDistanceQuery) Source() (interface{}, error) { - // { - // "geo_distance" : { - // "distance" : "200km", - // "pin.location" : { - // "lat" : 40, - // "lon" : -70 - // } - // } - // } - - source := make(map[string]interface{}) - - params := make(map[string]interface{}) - - if q.geohash != "" { - params[q.name] = q.geohash - } else { - location := make(map[string]interface{}) - location["lat"] = q.lat - location["lon"] = q.lon - params[q.name] = location - } - - if q.distance != "" { - params["distance"] = q.distance - } - if q.distanceType != "" { - params["distance_type"] = q.distanceType - } - if q.queryName != "" { - params["_name"] = q.queryName - } - - source["geo_distance"] = params - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_geo_polygon.go b/vendor/github.com/olivere/elastic/v7/search_queries_geo_polygon.go deleted file mode 100644 index 98cd5fe..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_geo_polygon.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// GeoPolygonQuery allows to include hits that only fall within a polygon of points. -// -// For more details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-geo-polygon-query.html -type GeoPolygonQuery struct { - name string - points []*GeoPoint - queryName string -} - -// NewGeoPolygonQuery creates and initializes a new GeoPolygonQuery. -func NewGeoPolygonQuery(name string) *GeoPolygonQuery { - return &GeoPolygonQuery{ - name: name, - points: make([]*GeoPoint, 0), - } -} - -// AddPoint adds a point from latitude and longitude. -func (q *GeoPolygonQuery) AddPoint(lat, lon float64) *GeoPolygonQuery { - q.points = append(q.points, GeoPointFromLatLon(lat, lon)) - return q -} - -// AddGeoPoint adds a GeoPoint. -func (q *GeoPolygonQuery) AddGeoPoint(point *GeoPoint) *GeoPolygonQuery { - q.points = append(q.points, point) - return q -} - -func (q *GeoPolygonQuery) QueryName(queryName string) *GeoPolygonQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *GeoPolygonQuery) Source() (interface{}, error) { - // "geo_polygon" : { - // "person.location" : { - // "points" : [ - // {"lat" : 40, "lon" : -70}, - // {"lat" : 30, "lon" : -80}, - // {"lat" : 20, "lon" : -90} - // ] - // } - // } - source := make(map[string]interface{}) - - params := make(map[string]interface{}) - source["geo_polygon"] = params - - polygon := make(map[string]interface{}) - params[q.name] = polygon - - var points []interface{} - for _, point := range q.points { - points = append(points, point.Source()) - } - polygon["points"] = points - - if q.queryName != "" { - params["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_has_child.go b/vendor/github.com/olivere/elastic/v7/search_queries_has_child.go deleted file mode 100644 index d2ce2f9..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_has_child.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// HasChildQuery accepts a query and the child type to run against, and results -// in parent documents that have child docs matching the query. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-has-child-query.html -type HasChildQuery struct { - query Query - childType string - boost *float64 - scoreMode string - minChildren *int - maxChildren *int - shortCircuitCutoff *int - queryName string - innerHit *InnerHit -} - -// NewHasChildQuery creates and initializes a new has_child query. -func NewHasChildQuery(childType string, query Query) *HasChildQuery { - return &HasChildQuery{ - query: query, - childType: childType, - } -} - -// Boost sets the boost for this query. -func (q *HasChildQuery) Boost(boost float64) *HasChildQuery { - q.boost = &boost - return q -} - -// ScoreMode defines how the scores from the matching child documents -// are mapped into the parent document. Allowed values are: min, max, -// avg, or none. -func (q *HasChildQuery) ScoreMode(scoreMode string) *HasChildQuery { - q.scoreMode = scoreMode - return q -} - -// MinChildren defines the minimum number of children that are required -// to match for the parent to be considered a match. -func (q *HasChildQuery) MinChildren(minChildren int) *HasChildQuery { - q.minChildren = &minChildren - return q -} - -// MaxChildren defines the maximum number of children that are required -// to match for the parent to be considered a match. -func (q *HasChildQuery) MaxChildren(maxChildren int) *HasChildQuery { - q.maxChildren = &maxChildren - return q -} - -// ShortCircuitCutoff configures what cut off point only to evaluate -// parent documents that contain the matching parent id terms instead -// of evaluating all parent docs. -func (q *HasChildQuery) ShortCircuitCutoff(shortCircuitCutoff int) *HasChildQuery { - q.shortCircuitCutoff = &shortCircuitCutoff - return q -} - -// QueryName specifies the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *HasChildQuery) QueryName(queryName string) *HasChildQuery { - q.queryName = queryName - return q -} - -// InnerHit sets the inner hit definition in the scope of this query and -// reusing the defined type and query. -func (q *HasChildQuery) InnerHit(innerHit *InnerHit) *HasChildQuery { - q.innerHit = innerHit - return q -} - -// Source returns JSON for the function score query. -func (q *HasChildQuery) Source() (interface{}, error) { - // { - // "has_child" : { - // "type" : "blog_tag", - // "score_mode" : "min", - // "query" : { - // "term" : { - // "tag" : "something" - // } - // } - // } - // } - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["has_child"] = query - - src, err := q.query.Source() - if err != nil { - return nil, err - } - query["query"] = src - query["type"] = q.childType - if q.boost != nil { - query["boost"] = *q.boost - } - if q.scoreMode != "" { - query["score_mode"] = q.scoreMode - } - if q.minChildren != nil { - query["min_children"] = *q.minChildren - } - if q.maxChildren != nil { - query["max_children"] = *q.maxChildren - } - if q.shortCircuitCutoff != nil { - query["short_circuit_cutoff"] = *q.shortCircuitCutoff - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.innerHit != nil { - src, err := q.innerHit.Source() - if err != nil { - return nil, err - } - query["inner_hits"] = src - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_has_parent.go b/vendor/github.com/olivere/elastic/v7/search_queries_has_parent.go deleted file mode 100644 index 1723f00..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_has_parent.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// HasParentQuery accepts a query and a parent type. The query is executed -// in the parent document space which is specified by the parent type. -// This query returns child documents which associated parents have matched. -// For the rest has_parent query has the same options and works in the -// same manner as has_child query. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-has-parent-query.html -type HasParentQuery struct { - query Query - parentType string - boost *float64 - score *bool - queryName string - innerHit *InnerHit - ignoreUnmapped *bool -} - -// NewHasParentQuery creates and initializes a new has_parent query. -func NewHasParentQuery(parentType string, query Query) *HasParentQuery { - return &HasParentQuery{ - query: query, - parentType: parentType, - } -} - -// Boost sets the boost for this query. -func (q *HasParentQuery) Boost(boost float64) *HasParentQuery { - q.boost = &boost - return q -} - -// Score defines if the parent score is mapped into the child documents. -func (q *HasParentQuery) Score(score bool) *HasParentQuery { - q.score = &score - return q -} - -// QueryName specifies the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *HasParentQuery) QueryName(queryName string) *HasParentQuery { - q.queryName = queryName - return q -} - -// InnerHit sets the inner hit definition in the scope of this query and -// reusing the defined type and query. -func (q *HasParentQuery) InnerHit(innerHit *InnerHit) *HasParentQuery { - q.innerHit = innerHit - return q -} - -// IgnoreUnmapped specifies whether unmapped types should be ignored. -// If set to false, the query failes when an unmapped type is found. -func (q *HasParentQuery) IgnoreUnmapped(ignore bool) *HasParentQuery { - q.ignoreUnmapped = &ignore - return q -} - -// Source returns JSON for the function score query. -func (q *HasParentQuery) Source() (interface{}, error) { - // { - // "has_parent" : { - // "parent_type" : "blog", - // "query" : { - // "term" : { - // "tag" : "something" - // } - // } - // } - // } - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["has_parent"] = query - - src, err := q.query.Source() - if err != nil { - return nil, err - } - query["query"] = src - query["parent_type"] = q.parentType - if q.boost != nil { - query["boost"] = *q.boost - } - if q.score != nil { - query["score"] = *q.score - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.innerHit != nil { - src, err := q.innerHit.Source() - if err != nil { - return nil, err - } - query["inner_hits"] = src - } - if q.ignoreUnmapped != nil { - query["ignore_unmapped"] = *q.ignoreUnmapped - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_ids.go b/vendor/github.com/olivere/elastic/v7/search_queries_ids.go deleted file mode 100644 index 8873fc2..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_ids.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// IdsQuery filters documents that only have the provided ids. -// Note, this query uses the _uid field. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-ids-query.html -type IdsQuery struct { - types []string - values []string - boost *float64 - queryName string -} - -// NewIdsQuery creates and initializes a new ids query. -// -// Deprecated: Types are in the process of being removed, prefer to filter on a field instead. -func NewIdsQuery(types ...string) *IdsQuery { - return &IdsQuery{ - types: types, - values: make([]string, 0), - } -} - -// Ids adds ids to the filter. -func (q *IdsQuery) Ids(ids ...string) *IdsQuery { - q.values = append(q.values, ids...) - return q -} - -// Boost sets the boost for this query. -func (q *IdsQuery) Boost(boost float64) *IdsQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter. -func (q *IdsQuery) QueryName(queryName string) *IdsQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *IdsQuery) Source() (interface{}, error) { - // { - // "ids" : { - // "type" : "my_type", - // "values" : ["1", "4", "100"] - // } - // } - - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["ids"] = query - - // type(s) - if len(q.types) == 1 { - query["type"] = q.types[0] - } else if len(q.types) > 1 { - query["types"] = q.types - } - - // values - query["values"] = q.values - - if q.boost != nil { - query["boost"] = *q.boost - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_interval.go b/vendor/github.com/olivere/elastic/v7/search_queries_interval.go deleted file mode 100644 index d2a9e24..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_interval.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// IntervalQueryRule represents the generic matching interval rule interface. -// Interval Rule is actually just a Query, but may be used only inside -// IntervalQuery. An extra method is added just to shield its -// implementations (*Rule objects) from other query objects. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html -// for details. -type IntervalQueryRule interface { - Query - - // isIntervalQueryRule is never actually called, and is used just for Rule to - // differ from standard Query. - isIntervalQueryRule() bool -} - -// IntervalQuery returns documents based on the order and proximity of matching terms. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html -type IntervalQuery struct { - field string - rule IntervalQueryRule -} - -// NewIntervalQuery creates and initializes a new IntervalQuery. -func NewIntervalQuery(field string, rule IntervalQueryRule) *IntervalQuery { - return &IntervalQuery{field: field, rule: rule} -} - -// Source returns JSON for the function score query. -func (q *IntervalQuery) Source() (interface{}, error) { - // { - // "intervals" : { ... } - // } - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["intervals"] = params - - src, err := q.rule.Source() - if err != nil { - return nil, err - } - params[q.field] = src - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_interval_filter.go b/vendor/github.com/olivere/elastic/v7/search_queries_interval_filter.go deleted file mode 100644 index d7f0dca..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_interval_filter.go +++ /dev/null @@ -1,175 +0,0 @@ -package elastic - -var ( - _ IntervalQueryRule = (*IntervalQueryFilter)(nil) -) - -// IntervalQueryFilter specifies filters used in some -// IntervalQueryRule implementations, e.g. IntervalQueryRuleAllOf. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#interval_filter -// for details. -type IntervalQueryFilter struct { - after IntervalQueryRule - before IntervalQueryRule - containedBy IntervalQueryRule - containing IntervalQueryRule - overlapping IntervalQueryRule - notContainedBy IntervalQueryRule - notContaining IntervalQueryRule - notOverlapping IntervalQueryRule - script *Script -} - -// NewIntervalQueryFilter initializes and creates a new -// IntervalQueryFilter. -func NewIntervalQueryFilter() *IntervalQueryFilter { - return &IntervalQueryFilter{} -} - -// After specifies the query to be used to return intervals that follow -// an interval from the filter rule. -func (r *IntervalQueryFilter) After(after IntervalQueryRule) *IntervalQueryFilter { - r.after = after - return r -} - -// Before specifies the query to be used to return intervals that occur -// before an interval from the filter rule. -func (r *IntervalQueryFilter) Before(before IntervalQueryRule) *IntervalQueryFilter { - r.before = before - return r -} - -// ContainedBy specifies the query to be used to return intervals contained -// by an interval from the filter rule. -func (r *IntervalQueryFilter) ContainedBy(containedBy IntervalQueryRule) *IntervalQueryFilter { - r.containedBy = containedBy - return r -} - -// Containing specifies the query to be used to return intervals that contain an -// interval from the filter rule. -func (r *IntervalQueryFilter) Containing(containing IntervalQueryRule) *IntervalQueryFilter { - r.containing = containing - return r -} - -// Overlapping specifies the query to be used to return intervals that overlap -// with an interval from the filter rule. -func (r *IntervalQueryFilter) Overlapping(overlapping IntervalQueryRule) *IntervalQueryFilter { - r.overlapping = overlapping - return r -} - -// NotContainedBy specifies the query to be used to return intervals that are NOT -// contained by an interval from the filter rule. -func (r *IntervalQueryFilter) NotContainedBy(notContainedBy IntervalQueryRule) *IntervalQueryFilter { - r.notContainedBy = notContainedBy - return r -} - -// NotContaining specifies the query to be used to return intervals that do NOT -// contain an interval from the filter rule. -func (r *IntervalQueryFilter) NotContaining(notContaining IntervalQueryRule) *IntervalQueryFilter { - r.notContaining = notContaining - return r -} - -// NotOverlapping specifies the query to be used to return intervals that do NOT -// overlap with an interval from the filter rule. -func (r *IntervalQueryFilter) NotOverlapping(notOverlapping IntervalQueryRule) *IntervalQueryFilter { - r.notOverlapping = notOverlapping - return r -} - -// Script allows a script to be used to return matching documents. The script -// must return a boolean value, true or false. -func (r *IntervalQueryFilter) Script(script *Script) *IntervalQueryFilter { - r.script = script - return r -} - -// Source returns JSON for the function score query. -func (r *IntervalQueryFilter) Source() (interface{}, error) { - source := make(map[string]interface{}) - - if r.before != nil { - src, err := r.before.Source() - if err != nil { - return nil, err - } - source["before"] = src - } - - if r.after != nil { - src, err := r.after.Source() - if err != nil { - return nil, err - } - source["after"] = src - } - - if r.containedBy != nil { - src, err := r.containedBy.Source() - if err != nil { - return nil, err - } - source["contained_by"] = src - } - - if r.containing != nil { - src, err := r.containing.Source() - if err != nil { - return nil, err - } - source["containing"] = src - } - - if r.overlapping != nil { - src, err := r.overlapping.Source() - if err != nil { - return nil, err - } - source["overlapping"] = src - } - - if r.notContainedBy != nil { - src, err := r.notContainedBy.Source() - if err != nil { - return nil, err - } - source["not_contained_by"] = src - } - - if r.notContaining != nil { - src, err := r.notContaining.Source() - if err != nil { - return nil, err - } - source["not_containing"] = src - } - - if r.notOverlapping != nil { - src, err := r.notOverlapping.Source() - if err != nil { - return nil, err - } - source["not_overlapping"] = src - } - - if r.script != nil { - src, err := r.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - - return source, nil -} - -// isIntervalQueryRule implements the marker interface. -func (r *IntervalQueryFilter) isIntervalQueryRule() bool { - return true -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_all_of.go b/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_all_of.go deleted file mode 100644 index 99314bb..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_all_of.go +++ /dev/null @@ -1,82 +0,0 @@ -package elastic - -var ( - _ IntervalQueryRule = (*IntervalQueryRuleAllOf)(nil) -) - -// IntervalQueryRuleAllOf is an implementation of IntervalQueryRule. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-all_of -// for details. -type IntervalQueryRuleAllOf struct { - intervals []IntervalQueryRule - maxGaps *int - ordered *bool - filter *IntervalQueryFilter -} - -// NewIntervalQueryRuleAllOf initializes and returns a new instance -// of IntervalQueryRuleAllOf. -func NewIntervalQueryRuleAllOf(intervals ...IntervalQueryRule) *IntervalQueryRuleAllOf { - return &IntervalQueryRuleAllOf{intervals: intervals} -} - -// MaxGaps specifies the maximum number of positions between the matching -// terms. Terms further apart than this are considered matches. Defaults to -1. -func (r *IntervalQueryRuleAllOf) MaxGaps(maxGaps int) *IntervalQueryRuleAllOf { - r.maxGaps = &maxGaps - return r -} - -// Ordered, if true, indicates that matching terms must appear in their specified -// order. Defaults to false. -func (r *IntervalQueryRuleAllOf) Ordered(ordered bool) *IntervalQueryRuleAllOf { - r.ordered = &ordered - return r -} - -// Filter adds an additional interval filter. -func (r *IntervalQueryRuleAllOf) Filter(filter *IntervalQueryFilter) *IntervalQueryRuleAllOf { - r.filter = filter - return r -} - -// Source returns JSON for the function score query. -func (r *IntervalQueryRuleAllOf) Source() (interface{}, error) { - source := make(map[string]interface{}) - - intervalSources := make([]interface{}, 0) - for _, interval := range r.intervals { - src, err := interval.Source() - if err != nil { - return nil, err - } - - intervalSources = append(intervalSources, src) - } - source["intervals"] = intervalSources - - if r.ordered != nil { - source["ordered"] = *r.ordered - } - if r.maxGaps != nil { - source["max_gaps"] = *r.maxGaps - } - if r.filter != nil { - src, err := r.filter.Source() - if err != nil { - return nil, err - } - - source["filter"] = src - } - - return map[string]interface{}{ - "all_of": source, - }, nil -} - -// isIntervalQueryRule implements the marker interface. -func (r *IntervalQueryRuleAllOf) isIntervalQueryRule() bool { - return true -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_any_of.go b/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_any_of.go deleted file mode 100644 index 1009247..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_any_of.go +++ /dev/null @@ -1,60 +0,0 @@ -package elastic - -var ( - _ IntervalQueryRule = (*IntervalQueryRuleAnyOf)(nil) -) - -// IntervalQueryRuleAnyOf is an implementation of IntervalQueryRule. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-any_of -// for details. -type IntervalQueryRuleAnyOf struct { - intervals []IntervalQueryRule - filter *IntervalQueryFilter -} - -// NewIntervalQueryRuleAnyOf initializes and returns a new instance -// of IntervalQueryRuleAnyOf. -func NewIntervalQueryRuleAnyOf(intervals ...IntervalQueryRule) *IntervalQueryRuleAnyOf { - return &IntervalQueryRuleAnyOf{intervals: intervals} -} - -// Filter adds an additional interval filter. -func (r *IntervalQueryRuleAnyOf) Filter(filter *IntervalQueryFilter) *IntervalQueryRuleAnyOf { - r.filter = filter - return r -} - -// Source returns JSON for the function score query. -func (r *IntervalQueryRuleAnyOf) Source() (interface{}, error) { - source := make(map[string]interface{}) - - var intervalSources []interface{} - for _, interval := range r.intervals { - src, err := interval.Source() - if err != nil { - return nil, err - } - - intervalSources = append(intervalSources, src) - } - source["intervals"] = intervalSources - - if r.filter != nil { - src, err := r.filter.Source() - if err != nil { - return nil, err - } - - source["filter"] = src - } - - return map[string]interface{}{ - "any_of": source, - }, nil -} - -// isIntervalQueryRule implements the marker interface. -func (r *IntervalQueryRuleAnyOf) isIntervalQueryRule() bool { - return true -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_match.go b/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_match.go deleted file mode 100644 index be07ca2..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_match.go +++ /dev/null @@ -1,94 +0,0 @@ -package elastic - -var ( - _ IntervalQueryRule = (*IntervalQueryRuleMatch)(nil) -) - -// IntervalQueryRuleMatch is an implementation of IntervalQueryRule. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-match -// for details. -type IntervalQueryRuleMatch struct { - query string - maxGaps *int - ordered *bool - analyzer string - useField string - filter *IntervalQueryFilter -} - -// NewIntervalQueryRuleMatch initializes and returns a new instance -// of IntervalQueryRuleMatch. -func NewIntervalQueryRuleMatch(query string) *IntervalQueryRuleMatch { - return &IntervalQueryRuleMatch{query: query} -} - -// MaxGaps specifies the maximum number of positions between the matching -// terms. Terms further apart than this are considered matches. Defaults to -1. -func (r *IntervalQueryRuleMatch) MaxGaps(maxGaps int) *IntervalQueryRuleMatch { - r.maxGaps = &maxGaps - return r -} - -// Ordered, if true, indicates that matching terms must appear in their specified -// order. Defaults to false. -func (r *IntervalQueryRuleMatch) Ordered(ordered bool) *IntervalQueryRuleMatch { - r.ordered = &ordered - return r -} - -// Analyzer specifies the analyzer used to analyze terms in the query. -func (r *IntervalQueryRuleMatch) Analyzer(analyzer string) *IntervalQueryRuleMatch { - r.analyzer = analyzer - return r -} - -// UseField, if specified, matches the intervals from this field rather than -// the top-level field. -func (r *IntervalQueryRuleMatch) UseField(useField string) *IntervalQueryRuleMatch { - r.useField = useField - return r -} - -// Filter adds an additional interval filter. -func (r *IntervalQueryRuleMatch) Filter(filter *IntervalQueryFilter) *IntervalQueryRuleMatch { - r.filter = filter - return r -} - -// Source returns JSON for the function score query. -func (r *IntervalQueryRuleMatch) Source() (interface{}, error) { - source := make(map[string]interface{}) - - source["query"] = r.query - - if r.ordered != nil { - source["ordered"] = *r.ordered - } - if r.maxGaps != nil { - source["max_gaps"] = *r.maxGaps - } - if r.analyzer != "" { - source["analyzer"] = r.analyzer - } - if r.useField != "" { - source["use_field"] = r.useField - } - if r.filter != nil { - filterRuleSource, err := r.filter.Source() - if err != nil { - return nil, err - } - - source["filter"] = filterRuleSource - } - - return map[string]interface{}{ - "match": source, - }, nil -} - -// isIntervalQueryRule implements the marker interface. -func (r *IntervalQueryRuleMatch) isIntervalQueryRule() bool { - return true -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_prefix.go b/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_prefix.go deleted file mode 100644 index 7eea791..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_prefix.go +++ /dev/null @@ -1,57 +0,0 @@ -package elastic - -var ( - _ IntervalQueryRule = (*IntervalQueryRulePrefix)(nil) -) - -// IntervalQueryRulePrefix is an implementation of IntervalQueryRule. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-prefix -// for details. -type IntervalQueryRulePrefix struct { - prefix string - analyzer string - useField string -} - -// NewIntervalQueryRulePrefix initializes and returns a new instance -// of IntervalQueryRulePrefix. -func NewIntervalQueryRulePrefix(prefix string) *IntervalQueryRulePrefix { - return &IntervalQueryRulePrefix{prefix: prefix} -} - -// Analyzer specifies the analyzer used to analyze terms in the query. -func (r *IntervalQueryRulePrefix) Analyzer(analyzer string) *IntervalQueryRulePrefix { - r.analyzer = analyzer - return r -} - -// UseField, if specified, matches the intervals from this field rather than -// the top-level field. -func (r *IntervalQueryRulePrefix) UseField(useField string) *IntervalQueryRulePrefix { - r.useField = useField - return r -} - -// Source returns JSON for the function score query. -func (r *IntervalQueryRulePrefix) Source() (interface{}, error) { - source := make(map[string]interface{}) - - source["query"] = r.prefix - - if r.analyzer != "" { - source["analyzer"] = r.analyzer - } - if r.useField != "" { - source["use_field"] = r.useField - } - - return map[string]interface{}{ - "prefix": source, - }, nil -} - -// isIntervalQueryRule implements the marker interface. -func (r *IntervalQueryRulePrefix) isIntervalQueryRule() bool { - return true -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_wildcard.go b/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_wildcard.go deleted file mode 100644 index 8978852..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_interval_rules_wildcard.go +++ /dev/null @@ -1,57 +0,0 @@ -package elastic - -var ( - _ IntervalQueryRule = (*IntervalQueryRuleWildcard)(nil) -) - -// IntervalQueryRuleWildcard is an implementation of IntervalQueryRule. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/query-dsl-intervals-query.html#intervals-wildcard -// for details. -type IntervalQueryRuleWildcard struct { - pattern string - analyzer string - useField string -} - -// NewIntervalQueryRuleWildcard initializes and returns a new instance -// of IntervalQueryRuleWildcard. -func NewIntervalQueryRuleWildcard(pattern string) *IntervalQueryRuleWildcard { - return &IntervalQueryRuleWildcard{pattern: pattern} -} - -// Analyzer specifies the analyzer used to analyze terms in the query. -func (r *IntervalQueryRuleWildcard) Analyzer(analyzer string) *IntervalQueryRuleWildcard { - r.analyzer = analyzer - return r -} - -// UseField, if specified, matches the intervals from this field rather than -// the top-level field. -func (r *IntervalQueryRuleWildcard) UseField(useField string) *IntervalQueryRuleWildcard { - r.useField = useField - return r -} - -// Source returns JSON for the function score query. -func (r *IntervalQueryRuleWildcard) Source() (interface{}, error) { - source := make(map[string]interface{}) - - source["pattern"] = r.pattern - - if r.analyzer != "" { - source["analyzer"] = r.analyzer - } - if r.useField != "" { - source["use_field"] = r.useField - } - - return map[string]interface{}{ - "wildcard": source, - }, nil -} - -// isIntervalQueryRule implements the marker interface. -func (r *IntervalQueryRuleWildcard) isIntervalQueryRule() bool { - return true -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_match.go b/vendor/github.com/olivere/elastic/v7/search_queries_match.go deleted file mode 100644 index fce1078..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_match.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MatchQuery is a family of queries that accepts text/numerics/dates, -// analyzes them, and constructs a query. -// -// To create a new MatchQuery, use NewMatchQuery. To create specific types -// of queries, e.g. a match_phrase query, use NewMatchPhrQuery(...).Type("phrase"), -// or use one of the shortcuts e.g. NewMatchPhraseQuery(...). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-match-query.html -type MatchQuery struct { - name string - text interface{} - operator string // or / and - analyzer string - boost *float64 - fuzziness string - prefixLength *int - maxExpansions *int - minimumShouldMatch string - fuzzyRewrite string - lenient *bool - fuzzyTranspositions *bool - zeroTermsQuery string - cutoffFrequency *float64 - queryName string -} - -// NewMatchQuery creates and initializes a new MatchQuery. -func NewMatchQuery(name string, text interface{}) *MatchQuery { - return &MatchQuery{name: name, text: text} -} - -// Operator sets the operator to use when using a boolean query. -// Can be "AND" or "OR" (default). -func (q *MatchQuery) Operator(operator string) *MatchQuery { - q.operator = operator - return q -} - -// Analyzer explicitly sets the analyzer to use. It defaults to use explicit -// mapping config for the field, or, if not set, the default search analyzer. -func (q *MatchQuery) Analyzer(analyzer string) *MatchQuery { - q.analyzer = analyzer - return q -} - -// Fuzziness sets the fuzziness when evaluated to a fuzzy query type. -// Defaults to "AUTO". -func (q *MatchQuery) Fuzziness(fuzziness string) *MatchQuery { - q.fuzziness = fuzziness - return q -} - -// PrefixLength sets the length of a length of common (non-fuzzy) -// prefix for fuzzy match queries. It must be non-negative. -func (q *MatchQuery) PrefixLength(prefixLength int) *MatchQuery { - q.prefixLength = &prefixLength - return q -} - -// MaxExpansions is used with fuzzy or prefix type queries. It specifies -// the number of term expansions to use. It defaults to unbounded so that -// its recommended to set it to a reasonable value for faster execution. -func (q *MatchQuery) MaxExpansions(maxExpansions int) *MatchQuery { - q.maxExpansions = &maxExpansions - return q -} - -// CutoffFrequency can be a value in [0..1] (or an absolute number >=1). -// It represents the maximum treshold of a terms document frequency to be -// considered a low frequency term. -func (q *MatchQuery) CutoffFrequency(cutoff float64) *MatchQuery { - q.cutoffFrequency = &cutoff - return q -} - -// MinimumShouldMatch sets the optional minimumShouldMatch value to -// apply to the query. -func (q *MatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MatchQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -// FuzzyRewrite sets the fuzzy_rewrite parameter controlling how the -// fuzzy query will get rewritten. -func (q *MatchQuery) FuzzyRewrite(fuzzyRewrite string) *MatchQuery { - q.fuzzyRewrite = fuzzyRewrite - return q -} - -// FuzzyTranspositions sets whether transpositions are supported in -// fuzzy queries. -// -// The default metric used by fuzzy queries to determine a match is -// the Damerau-Levenshtein distance formula which supports transpositions. -// Setting transposition to false will -// * switch to classic Levenshtein distance. -// * If not set, Damerau-Levenshtein distance metric will be used. -func (q *MatchQuery) FuzzyTranspositions(fuzzyTranspositions bool) *MatchQuery { - q.fuzzyTranspositions = &fuzzyTranspositions - return q -} - -// Lenient specifies whether format based failures will be ignored. -func (q *MatchQuery) Lenient(lenient bool) *MatchQuery { - q.lenient = &lenient - return q -} - -// ZeroTermsQuery can be "all" or "none". -func (q *MatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MatchQuery { - q.zeroTermsQuery = zeroTermsQuery - return q -} - -// Boost sets the boost to apply to this query. -func (q *MatchQuery) Boost(boost float64) *MatchQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *MatchQuery) QueryName(queryName string) *MatchQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *MatchQuery) Source() (interface{}, error) { - // {"match":{"name":{"query":"value","type":"boolean/phrase"}}} - source := make(map[string]interface{}) - - match := make(map[string]interface{}) - source["match"] = match - - query := make(map[string]interface{}) - match[q.name] = query - - query["query"] = q.text - - if q.operator != "" { - query["operator"] = q.operator - } - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.fuzziness != "" { - query["fuzziness"] = q.fuzziness - } - if q.prefixLength != nil { - query["prefix_length"] = *q.prefixLength - } - if q.maxExpansions != nil { - query["max_expansions"] = *q.maxExpansions - } - if q.minimumShouldMatch != "" { - query["minimum_should_match"] = q.minimumShouldMatch - } - if q.fuzzyRewrite != "" { - query["fuzzy_rewrite"] = q.fuzzyRewrite - } - if q.lenient != nil { - query["lenient"] = *q.lenient - } - if q.fuzzyTranspositions != nil { - query["fuzzy_transpositions"] = *q.fuzzyTranspositions - } - if q.zeroTermsQuery != "" { - query["zero_terms_query"] = q.zeroTermsQuery - } - if q.cutoffFrequency != nil { - query["cutoff_frequency"] = *q.cutoffFrequency - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_match_all.go b/vendor/github.com/olivere/elastic/v7/search_queries_match_all.go deleted file mode 100644 index 921ead5..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_match_all.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MatchAllQuery is the most simple query, which matches all documents, -// giving them all a _score of 1.0. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-match-all-query.html -type MatchAllQuery struct { - boost *float64 - queryName string -} - -// NewMatchAllQuery creates and initializes a new match all query. -func NewMatchAllQuery() *MatchAllQuery { - return &MatchAllQuery{} -} - -// Boost sets the boost for this query. Documents matching this query will -// (in addition to the normal weightings) have their score multiplied by the -// boost provided. -func (q *MatchAllQuery) Boost(boost float64) *MatchAllQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name. -func (q *MatchAllQuery) QueryName(name string) *MatchAllQuery { - q.queryName = name - return q -} - -// Source returns JSON for the match all query. -func (q MatchAllQuery) Source() (interface{}, error) { - // { - // "match_all" : { ... } - // } - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["match_all"] = params - if q.boost != nil { - params["boost"] = *q.boost - } - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_match_none.go b/vendor/github.com/olivere/elastic/v7/search_queries_match_none.go deleted file mode 100644 index a1d1f87..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_match_none.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MatchNoneQuery returns no documents. It is the inverse of -// MatchAllQuery. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-match-all-query.html -type MatchNoneQuery struct { - queryName string -} - -// NewMatchNoneQuery creates and initializes a new match none query. -func NewMatchNoneQuery() *MatchNoneQuery { - return &MatchNoneQuery{} -} - -// QueryName sets the query name. -func (q *MatchNoneQuery) QueryName(name string) *MatchNoneQuery { - q.queryName = name - return q -} - -// Source returns JSON for the match none query. -func (q MatchNoneQuery) Source() (interface{}, error) { - // { - // "match_none" : { ... } - // } - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["match_none"] = params - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_match_phrase.go b/vendor/github.com/olivere/elastic/v7/search_queries_match_phrase.go deleted file mode 100644 index 974a49a..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_match_phrase.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MatchPhraseQuery analyzes the text and creates a phrase query out of -// the analyzed text. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-match-query-phrase.html -type MatchPhraseQuery struct { - name string - value interface{} - analyzer string - slop *int - boost *float64 - queryName string -} - -// NewMatchPhraseQuery creates and initializes a new MatchPhraseQuery. -func NewMatchPhraseQuery(name string, value interface{}) *MatchPhraseQuery { - return &MatchPhraseQuery{name: name, value: value} -} - -// Analyzer explicitly sets the analyzer to use. It defaults to use explicit -// mapping config for the field, or, if not set, the default search analyzer. -func (q *MatchPhraseQuery) Analyzer(analyzer string) *MatchPhraseQuery { - q.analyzer = analyzer - return q -} - -// Slop sets the phrase slop if evaluated to a phrase query type. -func (q *MatchPhraseQuery) Slop(slop int) *MatchPhraseQuery { - q.slop = &slop - return q -} - -// Boost sets the boost to apply to this query. -func (q *MatchPhraseQuery) Boost(boost float64) *MatchPhraseQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *MatchPhraseQuery) QueryName(queryName string) *MatchPhraseQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *MatchPhraseQuery) Source() (interface{}, error) { - // {"match_phrase":{"name":{"query":"value","analyzer":"my_analyzer"}}} - source := make(map[string]interface{}) - - match := make(map[string]interface{}) - source["match_phrase"] = match - - query := make(map[string]interface{}) - match[q.name] = query - - query["query"] = q.value - - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.slop != nil { - query["slop"] = *q.slop - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_match_phrase_prefix.go b/vendor/github.com/olivere/elastic/v7/search_queries_match_phrase_prefix.go deleted file mode 100644 index fb366d3..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_match_phrase_prefix.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// MatchPhrasePrefixQuery is the same as match_phrase, except that it allows for -// prefix matches on the last term in the text. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-match-query-phrase-prefix.html -type MatchPhrasePrefixQuery struct { - name string - value interface{} - analyzer string - slop *int - maxExpansions *int - boost *float64 - queryName string -} - -// NewMatchPhrasePrefixQuery creates and initializes a new MatchPhrasePrefixQuery. -func NewMatchPhrasePrefixQuery(name string, value interface{}) *MatchPhrasePrefixQuery { - return &MatchPhrasePrefixQuery{name: name, value: value} -} - -// Analyzer explicitly sets the analyzer to use. It defaults to use explicit -// mapping config for the field, or, if not set, the default search analyzer. -func (q *MatchPhrasePrefixQuery) Analyzer(analyzer string) *MatchPhrasePrefixQuery { - q.analyzer = analyzer - return q -} - -// Slop sets the phrase slop if evaluated to a phrase query type. -func (q *MatchPhrasePrefixQuery) Slop(slop int) *MatchPhrasePrefixQuery { - q.slop = &slop - return q -} - -// MaxExpansions sets the number of term expansions to use. -func (q *MatchPhrasePrefixQuery) MaxExpansions(n int) *MatchPhrasePrefixQuery { - q.maxExpansions = &n - return q -} - -// Boost sets the boost to apply to this query. -func (q *MatchPhrasePrefixQuery) Boost(boost float64) *MatchPhrasePrefixQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *MatchPhrasePrefixQuery) QueryName(queryName string) *MatchPhrasePrefixQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *MatchPhrasePrefixQuery) Source() (interface{}, error) { - // {"match_phrase_prefix":{"name":{"query":"value","max_expansions":10}}} - source := make(map[string]interface{}) - - match := make(map[string]interface{}) - source["match_phrase_prefix"] = match - - query := make(map[string]interface{}) - match[q.name] = query - - query["query"] = q.value - - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.slop != nil { - query["slop"] = *q.slop - } - if q.maxExpansions != nil { - query["max_expansions"] = *q.maxExpansions - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_more_like_this.go b/vendor/github.com/olivere/elastic/v7/search_queries_more_like_this.go deleted file mode 100644 index 0ee389b..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_more_like_this.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// MoreLikeThis query (MLT Query) finds documents that are "like" a given -// set of documents. In order to do so, MLT selects a set of representative -// terms of these input documents, forms a query using these terms, executes -// the query and returns the results. The user controls the input documents, -// how the terms should be selected and how the query is formed. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-mlt-query.html -type MoreLikeThisQuery struct { - fields []string - docs []*MoreLikeThisQueryItem - unlikeDocs []*MoreLikeThisQueryItem - include *bool - minimumShouldMatch string - minTermFreq *int - maxQueryTerms *int - stopWords []string - minDocFreq *int - maxDocFreq *int - minWordLength *int - maxWordLength *int - boostTerms *float64 - boost *float64 - analyzer string - failOnUnsupportedField *bool - queryName string -} - -// NewMoreLikeThisQuery creates and initializes a new MoreLikeThisQuery. -func NewMoreLikeThisQuery() *MoreLikeThisQuery { - return &MoreLikeThisQuery{ - fields: make([]string, 0), - stopWords: make([]string, 0), - docs: make([]*MoreLikeThisQueryItem, 0), - unlikeDocs: make([]*MoreLikeThisQueryItem, 0), - } -} - -// Field adds one or more field names to the query. -func (q *MoreLikeThisQuery) Field(fields ...string) *MoreLikeThisQuery { - q.fields = append(q.fields, fields...) - return q -} - -// StopWord sets the stopwords. Any word in this set is considered -// "uninteresting" and ignored. Even if your Analyzer allows stopwords, -// you might want to tell the MoreLikeThis code to ignore them, as for -// the purposes of document similarity it seems reasonable to assume that -// "a stop word is never interesting". -func (q *MoreLikeThisQuery) StopWord(stopWords ...string) *MoreLikeThisQuery { - q.stopWords = append(q.stopWords, stopWords...) - return q -} - -// LikeText sets the text to use in order to find documents that are "like" this. -func (q *MoreLikeThisQuery) LikeText(likeTexts ...string) *MoreLikeThisQuery { - for _, s := range likeTexts { - item := NewMoreLikeThisQueryItem().LikeText(s) - q.docs = append(q.docs, item) - } - return q -} - -// LikeItems sets the documents to use in order to find documents that are "like" this. -func (q *MoreLikeThisQuery) LikeItems(docs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { - q.docs = append(q.docs, docs...) - return q -} - -// IgnoreLikeText sets the text from which the terms should not be selected from. -func (q *MoreLikeThisQuery) IgnoreLikeText(ignoreLikeText ...string) *MoreLikeThisQuery { - for _, s := range ignoreLikeText { - item := NewMoreLikeThisQueryItem().LikeText(s) - q.unlikeDocs = append(q.unlikeDocs, item) - } - return q -} - -// IgnoreLikeItems sets the documents from which the terms should not be selected from. -func (q *MoreLikeThisQuery) IgnoreLikeItems(ignoreDocs ...*MoreLikeThisQueryItem) *MoreLikeThisQuery { - q.unlikeDocs = append(q.unlikeDocs, ignoreDocs...) - return q -} - -// Ids sets the document ids to use in order to find documents that are "like" this. -func (q *MoreLikeThisQuery) Ids(ids ...string) *MoreLikeThisQuery { - for _, id := range ids { - item := NewMoreLikeThisQueryItem().Id(id) - q.docs = append(q.docs, item) - } - return q -} - -// Include specifies whether the input documents should also be included -// in the results returned. Defaults to false. -func (q *MoreLikeThisQuery) Include(include bool) *MoreLikeThisQuery { - q.include = &include - return q -} - -// MinimumShouldMatch sets the number of terms that must match the generated -// query expressed in the common syntax for minimum should match. -// The default value is "30%". -// -// This used to be "PercentTermsToMatch" in Elasticsearch versions before 2.0. -func (q *MoreLikeThisQuery) MinimumShouldMatch(minimumShouldMatch string) *MoreLikeThisQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -// MinTermFreq is the frequency below which terms will be ignored in the -// source doc. The default frequency is 2. -func (q *MoreLikeThisQuery) MinTermFreq(minTermFreq int) *MoreLikeThisQuery { - q.minTermFreq = &minTermFreq - return q -} - -// MaxQueryTerms sets the maximum number of query terms that will be included -// in any generated query. It defaults to 25. -func (q *MoreLikeThisQuery) MaxQueryTerms(maxQueryTerms int) *MoreLikeThisQuery { - q.maxQueryTerms = &maxQueryTerms - return q -} - -// MinDocFreq sets the frequency at which words will be ignored which do -// not occur in at least this many docs. The default is 5. -func (q *MoreLikeThisQuery) MinDocFreq(minDocFreq int) *MoreLikeThisQuery { - q.minDocFreq = &minDocFreq - return q -} - -// MaxDocFreq sets the maximum frequency for which words may still appear. -// Words that appear in more than this many docs will be ignored. -// It defaults to unbounded. -func (q *MoreLikeThisQuery) MaxDocFreq(maxDocFreq int) *MoreLikeThisQuery { - q.maxDocFreq = &maxDocFreq - return q -} - -// MinWordLength sets the minimum word length below which words will be -// ignored. It defaults to 0. -func (q *MoreLikeThisQuery) MinWordLength(minWordLength int) *MoreLikeThisQuery { - q.minWordLength = &minWordLength - return q -} - -// MaxWordLength sets the maximum word length above which words will be ignored. -// Defaults to unbounded (0). -func (q *MoreLikeThisQuery) MaxWordLength(maxWordLength int) *MoreLikeThisQuery { - q.maxWordLength = &maxWordLength - return q -} - -// BoostTerms sets the boost factor to use when boosting terms. -// It defaults to 1. -func (q *MoreLikeThisQuery) BoostTerms(boostTerms float64) *MoreLikeThisQuery { - q.boostTerms = &boostTerms - return q -} - -// Analyzer specifies the analyzer that will be use to analyze the text. -// Defaults to the analyzer associated with the field. -func (q *MoreLikeThisQuery) Analyzer(analyzer string) *MoreLikeThisQuery { - q.analyzer = analyzer - return q -} - -// Boost sets the boost for this query. -func (q *MoreLikeThisQuery) Boost(boost float64) *MoreLikeThisQuery { - q.boost = &boost - return q -} - -// FailOnUnsupportedField indicates whether to fail or return no result -// when this query is run against a field which is not supported such as -// a binary/numeric field. -func (q *MoreLikeThisQuery) FailOnUnsupportedField(fail bool) *MoreLikeThisQuery { - q.failOnUnsupportedField = &fail - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *MoreLikeThisQuery) QueryName(queryName string) *MoreLikeThisQuery { - q.queryName = queryName - return q -} - -// Source creates the source for the MLT query. -// It may return an error if the caller forgot to specify any documents to -// be "liked" in the MoreLikeThisQuery. -func (q *MoreLikeThisQuery) Source() (interface{}, error) { - // { - // "match_all" : { ... } - // } - if len(q.docs) == 0 { - return nil, errors.New(`more_like_this requires some documents to be "liked"`) - } - - source := make(map[string]interface{}) - - params := make(map[string]interface{}) - source["more_like_this"] = params - - if len(q.fields) > 0 { - params["fields"] = q.fields - } - - var likes []interface{} - for _, doc := range q.docs { - src, err := doc.Source() - if err != nil { - return nil, err - } - likes = append(likes, src) - } - params["like"] = likes - - if len(q.unlikeDocs) > 0 { - var dontLikes []interface{} - for _, doc := range q.unlikeDocs { - src, err := doc.Source() - if err != nil { - return nil, err - } - dontLikes = append(dontLikes, src) - } - params["unlike"] = dontLikes - } - - if q.minimumShouldMatch != "" { - params["minimum_should_match"] = q.minimumShouldMatch - } - if q.minTermFreq != nil { - params["min_term_freq"] = *q.minTermFreq - } - if q.maxQueryTerms != nil { - params["max_query_terms"] = *q.maxQueryTerms - } - if len(q.stopWords) > 0 { - params["stop_words"] = q.stopWords - } - if q.minDocFreq != nil { - params["min_doc_freq"] = *q.minDocFreq - } - if q.maxDocFreq != nil { - params["max_doc_freq"] = *q.maxDocFreq - } - if q.minWordLength != nil { - params["min_word_length"] = *q.minWordLength - } - if q.maxWordLength != nil { - params["max_word_length"] = *q.maxWordLength - } - if q.boostTerms != nil { - params["boost_terms"] = *q.boostTerms - } - if q.boost != nil { - params["boost"] = *q.boost - } - if q.analyzer != "" { - params["analyzer"] = q.analyzer - } - if q.failOnUnsupportedField != nil { - params["fail_on_unsupported_field"] = *q.failOnUnsupportedField - } - if q.queryName != "" { - params["_name"] = q.queryName - } - if q.include != nil { - params["include"] = *q.include - } - - return source, nil -} - -// -- MoreLikeThisQueryItem -- - -// MoreLikeThisQueryItem represents a single item of a MoreLikeThisQuery -// to be "liked" or "unliked". -type MoreLikeThisQueryItem struct { - likeText string - - index string - typ string - id string - doc interface{} - fields []string - routing string - fsc *FetchSourceContext - version int64 - versionType string -} - -// NewMoreLikeThisQueryItem creates and initializes a MoreLikeThisQueryItem. -func NewMoreLikeThisQueryItem() *MoreLikeThisQueryItem { - return &MoreLikeThisQueryItem{ - version: -1, - } -} - -// LikeText represents a text to be "liked". -func (item *MoreLikeThisQueryItem) LikeText(likeText string) *MoreLikeThisQueryItem { - item.likeText = likeText - return item -} - -// Index represents the index of the item. -func (item *MoreLikeThisQueryItem) Index(index string) *MoreLikeThisQueryItem { - item.index = index - return item -} - -// Type represents the document type of the item. -// -// Deprecated: Types are in the process of being removed. -func (item *MoreLikeThisQueryItem) Type(typ string) *MoreLikeThisQueryItem { - item.typ = typ - return item -} - -// Id represents the document id of the item. -func (item *MoreLikeThisQueryItem) Id(id string) *MoreLikeThisQueryItem { - item.id = id - return item -} - -// Doc represents a raw document template for the item. -func (item *MoreLikeThisQueryItem) Doc(doc interface{}) *MoreLikeThisQueryItem { - item.doc = doc - return item -} - -// Fields represents the list of fields of the item. -func (item *MoreLikeThisQueryItem) Fields(fields ...string) *MoreLikeThisQueryItem { - item.fields = append(item.fields, fields...) - return item -} - -// Routing sets the routing associated with the item. -func (item *MoreLikeThisQueryItem) Routing(routing string) *MoreLikeThisQueryItem { - item.routing = routing - return item -} - -// FetchSourceContext represents the fetch source of the item which controls -// if and how _source should be returned. -func (item *MoreLikeThisQueryItem) FetchSourceContext(fsc *FetchSourceContext) *MoreLikeThisQueryItem { - item.fsc = fsc - return item -} - -// Version specifies the version of the item. -func (item *MoreLikeThisQueryItem) Version(version int64) *MoreLikeThisQueryItem { - item.version = version - return item -} - -// VersionType represents the version type of the item. -func (item *MoreLikeThisQueryItem) VersionType(versionType string) *MoreLikeThisQueryItem { - item.versionType = versionType - return item -} - -// Source returns the JSON-serializable fragment of the entity. -func (item *MoreLikeThisQueryItem) Source() (interface{}, error) { - if item.likeText != "" { - return item.likeText, nil - } - - source := make(map[string]interface{}) - - if item.index != "" { - source["_index"] = item.index - } - if item.typ != "" { - source["_type"] = item.typ - } - if item.id != "" { - source["_id"] = item.id - } - if item.doc != nil { - source["doc"] = item.doc - } - if len(item.fields) > 0 { - source["fields"] = item.fields - } - if item.routing != "" { - source["routing"] = item.routing - } - if item.fsc != nil { - src, err := item.fsc.Source() - if err != nil { - return nil, err - } - source["_source"] = src - } - if item.version >= 0 { - source["_version"] = item.version - } - if item.versionType != "" { - source["_version_type"] = item.versionType - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_multi_match.go b/vendor/github.com/olivere/elastic/v7/search_queries_multi_match.go deleted file mode 100644 index 48c5f28..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_multi_match.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "strings" -) - -// MultiMatchQuery builds on the MatchQuery to allow multi-field queries. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-multi-match-query.html -type MultiMatchQuery struct { - text interface{} - fields []string - fieldBoosts map[string]*float64 - typ string // best_fields, boolean, most_fields, cross_fields, phrase, phrase_prefix - operator string // AND or OR - analyzer string - boost *float64 - slop *int - fuzziness string - prefixLength *int - maxExpansions *int - minimumShouldMatch string - rewrite string - fuzzyRewrite string - tieBreaker *float64 - lenient *bool - cutoffFrequency *float64 - zeroTermsQuery string - queryName string -} - -// MultiMatchQuery creates and initializes a new MultiMatchQuery. -func NewMultiMatchQuery(text interface{}, fields ...string) *MultiMatchQuery { - q := &MultiMatchQuery{ - text: text, - fields: make([]string, 0), - fieldBoosts: make(map[string]*float64), - } - q.fields = append(q.fields, fields...) - return q -} - -// Field adds a field to run the multi match against. -func (q *MultiMatchQuery) Field(field string) *MultiMatchQuery { - q.fields = append(q.fields, field) - return q -} - -// FieldWithBoost adds a field to run the multi match against with a specific boost. -func (q *MultiMatchQuery) FieldWithBoost(field string, boost float64) *MultiMatchQuery { - q.fields = append(q.fields, field) - q.fieldBoosts[field] = &boost - return q -} - -// Type can be "best_fields", "boolean", "most_fields", "cross_fields", -// "phrase", "phrase_prefix" or "bool_prefix" -func (q *MultiMatchQuery) Type(typ string) *MultiMatchQuery { - var zero = float64(0.0) - var one = float64(1.0) - - switch strings.ToLower(typ) { - default: // best_fields / boolean - q.typ = "best_fields" - q.tieBreaker = &zero - case "most_fields": - q.typ = "most_fields" - q.tieBreaker = &one - case "cross_fields": - q.typ = "cross_fields" - q.tieBreaker = &zero - case "phrase": - q.typ = "phrase" - q.tieBreaker = &zero - case "phrase_prefix": - q.typ = "phrase_prefix" - q.tieBreaker = &zero - case "bool_prefix": - q.typ = "bool_prefix" - q.tieBreaker = &zero - } - return q -} - -// Operator sets the operator to use when using boolean query. -// It can be either AND or OR (default). -func (q *MultiMatchQuery) Operator(operator string) *MultiMatchQuery { - q.operator = operator - return q -} - -// Analyzer sets the analyzer to use explicitly. It defaults to use explicit -// mapping config for the field, or, if not set, the default search analyzer. -func (q *MultiMatchQuery) Analyzer(analyzer string) *MultiMatchQuery { - q.analyzer = analyzer - return q -} - -// Boost sets the boost for this query. -func (q *MultiMatchQuery) Boost(boost float64) *MultiMatchQuery { - q.boost = &boost - return q -} - -// Slop sets the phrase slop if evaluated to a phrase query type. -func (q *MultiMatchQuery) Slop(slop int) *MultiMatchQuery { - q.slop = &slop - return q -} - -// Fuzziness sets the fuzziness used when evaluated to a fuzzy query type. -// It defaults to "AUTO". -func (q *MultiMatchQuery) Fuzziness(fuzziness string) *MultiMatchQuery { - q.fuzziness = fuzziness - return q -} - -// PrefixLength for the fuzzy process. -func (q *MultiMatchQuery) PrefixLength(prefixLength int) *MultiMatchQuery { - q.prefixLength = &prefixLength - return q -} - -// MaxExpansions is the number of term expansions to use when using fuzzy -// or prefix type query. It defaults to unbounded so it's recommended -// to set it to a reasonable value for faster execution. -func (q *MultiMatchQuery) MaxExpansions(maxExpansions int) *MultiMatchQuery { - q.maxExpansions = &maxExpansions - return q -} - -// MinimumShouldMatch represents the minimum number of optional should clauses -// to match. -func (q *MultiMatchQuery) MinimumShouldMatch(minimumShouldMatch string) *MultiMatchQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -func (q *MultiMatchQuery) Rewrite(rewrite string) *MultiMatchQuery { - q.rewrite = rewrite - return q -} - -func (q *MultiMatchQuery) FuzzyRewrite(fuzzyRewrite string) *MultiMatchQuery { - q.fuzzyRewrite = fuzzyRewrite - return q -} - -// TieBreaker for "best-match" disjunction queries (OR queries). -// The tie breaker capability allows documents that match more than one -// query clause (in this case on more than one field) to be scored better -// than documents that match only the best of the fields, without confusing -// this with the better case of two distinct matches in the multiple fields. -// -// A tie-breaker value of 1.0 is interpreted as a signal to score queries as -// "most-match" queries where all matching query clauses are considered for scoring. -func (q *MultiMatchQuery) TieBreaker(tieBreaker float64) *MultiMatchQuery { - q.tieBreaker = &tieBreaker - return q -} - -// Lenient indicates whether format based failures will be ignored. -func (q *MultiMatchQuery) Lenient(lenient bool) *MultiMatchQuery { - q.lenient = &lenient - return q -} - -// CutoffFrequency sets a cutoff value in [0..1] (or absolute number >=1) -// representing the maximum threshold of a terms document frequency to be -// considered a low frequency term. -func (q *MultiMatchQuery) CutoffFrequency(cutoff float64) *MultiMatchQuery { - q.cutoffFrequency = &cutoff - return q -} - -// ZeroTermsQuery can be "all" or "none". -func (q *MultiMatchQuery) ZeroTermsQuery(zeroTermsQuery string) *MultiMatchQuery { - q.zeroTermsQuery = zeroTermsQuery - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *MultiMatchQuery) QueryName(queryName string) *MultiMatchQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *MultiMatchQuery) Source() (interface{}, error) { - // - // { - // "multi_match" : { - // "query" : "this is a test", - // "fields" : [ "subject", "message" ] - // } - // } - - source := make(map[string]interface{}) - - multiMatch := make(map[string]interface{}) - source["multi_match"] = multiMatch - - multiMatch["query"] = q.text - - if len(q.fields) > 0 { - var fields []string - for _, field := range q.fields { - if boost, found := q.fieldBoosts[field]; found { - if boost != nil { - fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) - } else { - fields = append(fields, field) - } - } else { - fields = append(fields, field) - } - } - multiMatch["fields"] = fields - } - - if q.typ != "" { - multiMatch["type"] = q.typ - } - - if q.operator != "" { - multiMatch["operator"] = q.operator - } - if q.analyzer != "" { - multiMatch["analyzer"] = q.analyzer - } - if q.boost != nil { - multiMatch["boost"] = *q.boost - } - if q.slop != nil { - multiMatch["slop"] = *q.slop - } - if q.fuzziness != "" { - multiMatch["fuzziness"] = q.fuzziness - } - if q.prefixLength != nil { - multiMatch["prefix_length"] = *q.prefixLength - } - if q.maxExpansions != nil { - multiMatch["max_expansions"] = *q.maxExpansions - } - if q.minimumShouldMatch != "" { - multiMatch["minimum_should_match"] = q.minimumShouldMatch - } - if q.rewrite != "" { - multiMatch["rewrite"] = q.rewrite - } - if q.fuzzyRewrite != "" { - multiMatch["fuzzy_rewrite"] = q.fuzzyRewrite - } - if q.tieBreaker != nil { - multiMatch["tie_breaker"] = *q.tieBreaker - } - if q.lenient != nil { - multiMatch["lenient"] = *q.lenient - } - if q.cutoffFrequency != nil { - multiMatch["cutoff_frequency"] = *q.cutoffFrequency - } - if q.zeroTermsQuery != "" { - multiMatch["zero_terms_query"] = q.zeroTermsQuery - } - if q.queryName != "" { - multiMatch["_name"] = q.queryName - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_nested.go b/vendor/github.com/olivere/elastic/v7/search_queries_nested.go deleted file mode 100644 index 211649d..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_nested.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// NestedQuery allows to query nested objects / docs. -// The query is executed against the nested objects / docs as if they were -// indexed as separate docs (they are, internally) and resulting in the -// root parent doc (or parent nested mapping). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-nested-query.html -type NestedQuery struct { - query Query - path string - scoreMode string - boost *float64 - queryName string - innerHit *InnerHit - ignoreUnmapped *bool -} - -// NewNestedQuery creates and initializes a new NestedQuery. -func NewNestedQuery(path string, query Query) *NestedQuery { - return &NestedQuery{path: path, query: query} -} - -// ScoreMode specifies the score mode. -func (q *NestedQuery) ScoreMode(scoreMode string) *NestedQuery { - q.scoreMode = scoreMode - return q -} - -// Boost sets the boost for this query. -func (q *NestedQuery) Boost(boost float64) *NestedQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *NestedQuery) QueryName(queryName string) *NestedQuery { - q.queryName = queryName - return q -} - -// InnerHit sets the inner hit definition in the scope of this nested query -// and reusing the defined path and query. -func (q *NestedQuery) InnerHit(innerHit *InnerHit) *NestedQuery { - q.innerHit = innerHit - return q -} - -// IgnoreUnmapped sets the ignore_unmapped option for the filter that ignores -// unmapped nested fields -func (q *NestedQuery) IgnoreUnmapped(value bool) *NestedQuery { - q.ignoreUnmapped = &value - return q -} - -// Source returns JSON for the query. -func (q *NestedQuery) Source() (interface{}, error) { - query := make(map[string]interface{}) - nq := make(map[string]interface{}) - query["nested"] = nq - - src, err := q.query.Source() - if err != nil { - return nil, err - } - nq["query"] = src - - nq["path"] = q.path - - if q.scoreMode != "" { - nq["score_mode"] = q.scoreMode - } - if q.boost != nil { - nq["boost"] = *q.boost - } - if q.queryName != "" { - nq["_name"] = q.queryName - } - if q.ignoreUnmapped != nil { - nq["ignore_unmapped"] = *q.ignoreUnmapped - } - if q.innerHit != nil { - src, err := q.innerHit.Source() - if err != nil { - return nil, err - } - nq["inner_hits"] = src - } - return query, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_parent_id.go b/vendor/github.com/olivere/elastic/v7/search_queries_parent_id.go deleted file mode 100644 index 46a4999..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_parent_id.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// ParentIdQuery can be used to find child documents which belong to a -// particular parent. Given the following mapping definition. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-parent-id-query.html -type ParentIdQuery struct { - typ string - id string - ignoreUnmapped *bool - boost *float64 - queryName string - innerHit *InnerHit -} - -// NewParentIdQuery creates and initializes a new parent_id query. -func NewParentIdQuery(typ, id string) *ParentIdQuery { - return &ParentIdQuery{ - typ: typ, - id: id, - } -} - -// Type sets the parent type. -func (q *ParentIdQuery) Type(typ string) *ParentIdQuery { - q.typ = typ - return q -} - -// Id sets the id. -func (q *ParentIdQuery) Id(id string) *ParentIdQuery { - q.id = id - return q -} - -// IgnoreUnmapped specifies whether unmapped types should be ignored. -// If set to false, the query failes when an unmapped type is found. -func (q *ParentIdQuery) IgnoreUnmapped(ignore bool) *ParentIdQuery { - q.ignoreUnmapped = &ignore - return q -} - -// Boost sets the boost for this query. -func (q *ParentIdQuery) Boost(boost float64) *ParentIdQuery { - q.boost = &boost - return q -} - -// QueryName specifies the query name for the filter that can be used when -// searching for matched filters per hit. -func (q *ParentIdQuery) QueryName(queryName string) *ParentIdQuery { - q.queryName = queryName - return q -} - -// InnerHit sets the inner hit definition in the scope of this query and -// reusing the defined type and query. -func (q *ParentIdQuery) InnerHit(innerHit *InnerHit) *ParentIdQuery { - q.innerHit = innerHit - return q -} - -// Source returns JSON for the parent_id query. -func (q *ParentIdQuery) Source() (interface{}, error) { - // { - // "parent_id" : { - // "type" : "blog", - // "id" : "1" - // } - // } - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["parent_id"] = query - - query["type"] = q.typ - query["id"] = q.id - if q.boost != nil { - query["boost"] = *q.boost - } - if q.ignoreUnmapped != nil { - query["ignore_unmapped"] = *q.ignoreUnmapped - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.innerHit != nil { - src, err := q.innerHit.Source() - if err != nil { - return nil, err - } - query["inner_hits"] = src - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_percolator.go b/vendor/github.com/olivere/elastic/v7/search_queries_percolator.go deleted file mode 100644 index 5a459d0..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_percolator.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// PercolatorQuery can be used to match queries stored in an index. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-percolate-query.html -type PercolatorQuery struct { - field string - name string - documentType string // deprecated - documents []interface{} - indexedDocumentIndex string - indexedDocumentType string - indexedDocumentId string - indexedDocumentRouting string - indexedDocumentPreference string - indexedDocumentVersion *int64 -} - -// NewPercolatorQuery creates and initializes a new Percolator query. -func NewPercolatorQuery() *PercolatorQuery { - return &PercolatorQuery{} -} - -func (q *PercolatorQuery) Field(field string) *PercolatorQuery { - q.field = field - return q -} - -// Name used for identification purposes in "_percolator_document_slot" response -// field when multiple percolate queries have been specified in the main query. -func (q *PercolatorQuery) Name(name string) *PercolatorQuery { - q.name = name - return q -} - -// Deprecated: DocumentType is deprecated as of 6.0. -func (q *PercolatorQuery) DocumentType(typ string) *PercolatorQuery { - q.documentType = typ - return q -} - -func (q *PercolatorQuery) Document(docs ...interface{}) *PercolatorQuery { - q.documents = append(q.documents, docs...) - return q -} - -func (q *PercolatorQuery) IndexedDocumentIndex(index string) *PercolatorQuery { - q.indexedDocumentIndex = index - return q -} - -func (q *PercolatorQuery) IndexedDocumentType(typ string) *PercolatorQuery { - q.indexedDocumentType = typ - return q -} - -func (q *PercolatorQuery) IndexedDocumentId(id string) *PercolatorQuery { - q.indexedDocumentId = id - return q -} - -func (q *PercolatorQuery) IndexedDocumentRouting(routing string) *PercolatorQuery { - q.indexedDocumentRouting = routing - return q -} - -func (q *PercolatorQuery) IndexedDocumentPreference(preference string) *PercolatorQuery { - q.indexedDocumentPreference = preference - return q -} - -func (q *PercolatorQuery) IndexedDocumentVersion(version int64) *PercolatorQuery { - q.indexedDocumentVersion = &version - return q -} - -// Source returns JSON for the percolate query. -func (q *PercolatorQuery) Source() (interface{}, error) { - if len(q.field) == 0 { - return nil, errors.New("elastic: Field is required in PercolatorQuery") - } - - // { - // "percolate" : { ... } - // } - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["percolate"] = params - params["field"] = q.field - if q.documentType != "" { - params["document_type"] = q.documentType - } - if q.name != "" { - params["name"] = q.name - } - - switch len(q.documents) { - case 0: - case 1: - params["document"] = q.documents[0] - default: - params["documents"] = q.documents - } - - if s := q.indexedDocumentIndex; s != "" { - params["index"] = s - } - if s := q.indexedDocumentType; s != "" { - params["type"] = s - } - if s := q.indexedDocumentId; s != "" { - params["id"] = s - } - if s := q.indexedDocumentRouting; s != "" { - params["routing"] = s - } - if s := q.indexedDocumentPreference; s != "" { - params["preference"] = s - } - if v := q.indexedDocumentVersion; v != nil { - params["version"] = *v - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_prefix.go b/vendor/github.com/olivere/elastic/v7/search_queries_prefix.go deleted file mode 100644 index 8178aac..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_prefix.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// PrefixQuery matches documents that have fields containing terms -// with a specified prefix (not analyzed). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-prefix-query.html -type PrefixQuery struct { - name string - prefix string - boost *float64 - rewrite string - queryName string -} - -// NewPrefixQuery creates and initializes a new PrefixQuery. -func NewPrefixQuery(name string, prefix string) *PrefixQuery { - return &PrefixQuery{name: name, prefix: prefix} -} - -// Boost sets the boost for this query. -func (q *PrefixQuery) Boost(boost float64) *PrefixQuery { - q.boost = &boost - return q -} - -func (q *PrefixQuery) Rewrite(rewrite string) *PrefixQuery { - q.rewrite = rewrite - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *PrefixQuery) QueryName(queryName string) *PrefixQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *PrefixQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["prefix"] = query - - if q.boost == nil && q.rewrite == "" && q.queryName == "" { - query[q.name] = q.prefix - } else { - subQuery := make(map[string]interface{}) - subQuery["value"] = q.prefix - if q.boost != nil { - subQuery["boost"] = *q.boost - } - if q.rewrite != "" { - subQuery["rewrite"] = q.rewrite - } - if q.queryName != "" { - subQuery["_name"] = q.queryName - } - query[q.name] = subQuery - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_query_string.go b/vendor/github.com/olivere/elastic/v7/search_queries_query_string.go deleted file mode 100644 index f9baa21..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_query_string.go +++ /dev/null @@ -1,350 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" -) - -// QueryStringQuery uses the query parser in order to parse its content. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-query-string-query.html -type QueryStringQuery struct { - queryString string - defaultField string - defaultOperator string - analyzer string - quoteAnalyzer string - quoteFieldSuffix string - allowLeadingWildcard *bool - lowercaseExpandedTerms *bool // Deprecated: Decision is now made by the analyzer - enablePositionIncrements *bool - analyzeWildcard *bool - locale string // Deprecated: Decision is now made by the analyzer - boost *float64 - fuzziness string - fuzzyPrefixLength *int - fuzzyMaxExpansions *int - fuzzyRewrite string - phraseSlop *int - fields []string - fieldBoosts map[string]*float64 - tieBreaker *float64 - rewrite string - minimumShouldMatch string - lenient *bool - queryName string - timeZone string - maxDeterminizedStates *int - escape *bool - typ string -} - -// NewQueryStringQuery creates and initializes a new QueryStringQuery. -func NewQueryStringQuery(queryString string) *QueryStringQuery { - return &QueryStringQuery{ - queryString: queryString, - fields: make([]string, 0), - fieldBoosts: make(map[string]*float64), - } -} - -// DefaultField specifies the field to run against when no prefix field -// is specified. Only relevant when not explicitly adding fields the query -// string will run against. -func (q *QueryStringQuery) DefaultField(defaultField string) *QueryStringQuery { - q.defaultField = defaultField - return q -} - -// Field adds a field to run the query string against. -func (q *QueryStringQuery) Field(field string) *QueryStringQuery { - q.fields = append(q.fields, field) - return q -} - -// Type sets how multiple fields should be combined to build textual part queries, -// e.g. "best_fields". -func (q *QueryStringQuery) Type(typ string) *QueryStringQuery { - q.typ = typ - return q -} - -// FieldWithBoost adds a field to run the query string against with a specific boost. -func (q *QueryStringQuery) FieldWithBoost(field string, boost float64) *QueryStringQuery { - q.fields = append(q.fields, field) - q.fieldBoosts[field] = &boost - return q -} - -// TieBreaker is used when more than one field is used with the query string, -// and combined queries are using dismax. -func (q *QueryStringQuery) TieBreaker(tieBreaker float64) *QueryStringQuery { - q.tieBreaker = &tieBreaker - return q -} - -// DefaultOperator sets the boolean operator of the query parser used to -// parse the query string. -// -// In default mode (OR) terms without any modifiers -// are considered optional, e.g. "capital of Hungary" is equal to -// "capital OR of OR Hungary". -// -// In AND mode, terms are considered to be in conjunction. The above mentioned -// query is then parsed as "capital AND of AND Hungary". -func (q *QueryStringQuery) DefaultOperator(operator string) *QueryStringQuery { - q.defaultOperator = operator - return q -} - -// Analyzer is an optional analyzer used to analyze the query string. -// Note, if a field has search analyzer defined for it, then it will be used -// automatically. Defaults to the smart search analyzer. -func (q *QueryStringQuery) Analyzer(analyzer string) *QueryStringQuery { - q.analyzer = analyzer - return q -} - -// QuoteAnalyzer is an optional analyzer to be used to analyze the query string -// for phrase searches. Note, if a field has search analyzer defined for it, -// then it will be used automatically. Defaults to the smart search analyzer. -func (q *QueryStringQuery) QuoteAnalyzer(quoteAnalyzer string) *QueryStringQuery { - q.quoteAnalyzer = quoteAnalyzer - return q -} - -// MaxDeterminizedState protects against too-difficult regular expression queries. -func (q *QueryStringQuery) MaxDeterminizedState(maxDeterminizedStates int) *QueryStringQuery { - q.maxDeterminizedStates = &maxDeterminizedStates - return q -} - -// AllowLeadingWildcard specifies whether leading wildcards should be allowed -// or not (defaults to true). -func (q *QueryStringQuery) AllowLeadingWildcard(allowLeadingWildcard bool) *QueryStringQuery { - q.allowLeadingWildcard = &allowLeadingWildcard - return q -} - -// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy -// and range queries are automatically lower-cased or not. Default is true. -// -// Deprecated: Decision is now made by the analyzer. -func (q *QueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *QueryStringQuery { - q.lowercaseExpandedTerms = &lowercaseExpandedTerms - return q -} - -// EnablePositionIncrements indicates whether to enable position increments -// in result query. Defaults to true. -// -// When set, result phrase and multi-phrase queries will be aware of position -// increments. Useful when e.g. a StopFilter increases the position increment -// of the token that follows an omitted token. -func (q *QueryStringQuery) EnablePositionIncrements(enablePositionIncrements bool) *QueryStringQuery { - q.enablePositionIncrements = &enablePositionIncrements - return q -} - -// Fuzziness sets the edit distance for fuzzy queries. Default is "AUTO". -func (q *QueryStringQuery) Fuzziness(fuzziness string) *QueryStringQuery { - q.fuzziness = fuzziness - return q -} - -// FuzzyPrefixLength sets the minimum prefix length for fuzzy queries. -// Default is 1. -func (q *QueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *QueryStringQuery { - q.fuzzyPrefixLength = &fuzzyPrefixLength - return q -} - -func (q *QueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *QueryStringQuery { - q.fuzzyMaxExpansions = &fuzzyMaxExpansions - return q -} - -func (q *QueryStringQuery) FuzzyRewrite(fuzzyRewrite string) *QueryStringQuery { - q.fuzzyRewrite = fuzzyRewrite - return q -} - -// PhraseSlop sets the default slop for phrases. If zero, then exact matches -// are required. Default value is zero. -func (q *QueryStringQuery) PhraseSlop(phraseSlop int) *QueryStringQuery { - q.phraseSlop = &phraseSlop - return q -} - -// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. -func (q *QueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *QueryStringQuery { - q.analyzeWildcard = &analyzeWildcard - return q -} - -func (q *QueryStringQuery) Rewrite(rewrite string) *QueryStringQuery { - q.rewrite = rewrite - return q -} - -func (q *QueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *QueryStringQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -// Boost sets the boost for this query. -func (q *QueryStringQuery) Boost(boost float64) *QueryStringQuery { - q.boost = &boost - return q -} - -// QuoteFieldSuffix is an optional field name suffix to automatically -// try and add to the field searched when using quoted text. -func (q *QueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *QueryStringQuery { - q.quoteFieldSuffix = quoteFieldSuffix - return q -} - -// Lenient indicates whether the query string parser should be lenient -// when parsing field values. It defaults to the index setting and if not -// set, defaults to false. -func (q *QueryStringQuery) Lenient(lenient bool) *QueryStringQuery { - q.lenient = &lenient - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *QueryStringQuery) QueryName(queryName string) *QueryStringQuery { - q.queryName = queryName - return q -} - -// Locale specifies the locale to be used for string conversions. -// -// Deprecated: Decision is now made by the analyzer. -func (q *QueryStringQuery) Locale(locale string) *QueryStringQuery { - q.locale = locale - return q -} - -// TimeZone can be used to automatically adjust to/from fields using a -// timezone. Only used with date fields, of course. -func (q *QueryStringQuery) TimeZone(timeZone string) *QueryStringQuery { - q.timeZone = timeZone - return q -} - -// Escape performs escaping of the query string. -func (q *QueryStringQuery) Escape(escape bool) *QueryStringQuery { - q.escape = &escape - return q -} - -// Source returns JSON for the query. -func (q *QueryStringQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["query_string"] = query - - query["query"] = q.queryString - - if q.defaultField != "" { - query["default_field"] = q.defaultField - } - - if len(q.fields) > 0 { - var fields []string - for _, field := range q.fields { - if boost, found := q.fieldBoosts[field]; found { - if boost != nil { - fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) - } else { - fields = append(fields, field) - } - } else { - fields = append(fields, field) - } - } - query["fields"] = fields - } - - if q.tieBreaker != nil { - query["tie_breaker"] = *q.tieBreaker - } - if q.defaultOperator != "" { - query["default_operator"] = q.defaultOperator - } - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.quoteAnalyzer != "" { - query["quote_analyzer"] = q.quoteAnalyzer - } - if q.maxDeterminizedStates != nil { - query["max_determinized_states"] = *q.maxDeterminizedStates - } - if q.allowLeadingWildcard != nil { - query["allow_leading_wildcard"] = *q.allowLeadingWildcard - } - if q.lowercaseExpandedTerms != nil { - query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms - } - if q.enablePositionIncrements != nil { - query["enable_position_increments"] = *q.enablePositionIncrements - } - if q.fuzziness != "" { - query["fuzziness"] = q.fuzziness - } - if q.boost != nil { - query["boost"] = *q.boost - } - if q.fuzzyPrefixLength != nil { - query["fuzzy_prefix_length"] = *q.fuzzyPrefixLength - } - if q.fuzzyMaxExpansions != nil { - query["fuzzy_max_expansions"] = *q.fuzzyMaxExpansions - } - if q.fuzzyRewrite != "" { - query["fuzzy_rewrite"] = q.fuzzyRewrite - } - if q.phraseSlop != nil { - query["phrase_slop"] = *q.phraseSlop - } - if q.analyzeWildcard != nil { - query["analyze_wildcard"] = *q.analyzeWildcard - } - if q.rewrite != "" { - query["rewrite"] = q.rewrite - } - if q.minimumShouldMatch != "" { - query["minimum_should_match"] = q.minimumShouldMatch - } - if q.quoteFieldSuffix != "" { - query["quote_field_suffix"] = q.quoteFieldSuffix - } - if q.lenient != nil { - query["lenient"] = *q.lenient - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.locale != "" { - query["locale"] = q.locale - } - if q.timeZone != "" { - query["time_zone"] = q.timeZone - } - if q.escape != nil { - query["escape"] = *q.escape - } - if q.typ != "" { - query["type"] = q.typ - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_range.go b/vendor/github.com/olivere/elastic/v7/search_queries_range.go deleted file mode 100644 index e3c0730..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_range.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// RangeQuery matches documents with fields that have terms within a certain range. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-range-query.html -type RangeQuery struct { - name string - from interface{} - to interface{} - timeZone string - includeLower bool - includeUpper bool - boost *float64 - queryName string - format string - relation string -} - -// NewRangeQuery creates and initializes a new RangeQuery. -func NewRangeQuery(name string) *RangeQuery { - return &RangeQuery{name: name, includeLower: true, includeUpper: true} -} - -// From indicates the from part of the RangeQuery. -// Use nil to indicate an unbounded from part. -func (q *RangeQuery) From(from interface{}) *RangeQuery { - q.from = from - return q -} - -// Gt indicates a greater-than value for the from part. -// Use nil to indicate an unbounded from part. -func (q *RangeQuery) Gt(from interface{}) *RangeQuery { - q.from = from - q.includeLower = false - return q -} - -// Gte indicates a greater-than-or-equal value for the from part. -// Use nil to indicate an unbounded from part. -func (q *RangeQuery) Gte(from interface{}) *RangeQuery { - q.from = from - q.includeLower = true - return q -} - -// To indicates the to part of the RangeQuery. -// Use nil to indicate an unbounded to part. -func (q *RangeQuery) To(to interface{}) *RangeQuery { - q.to = to - return q -} - -// Lt indicates a less-than value for the to part. -// Use nil to indicate an unbounded to part. -func (q *RangeQuery) Lt(to interface{}) *RangeQuery { - q.to = to - q.includeUpper = false - return q -} - -// Lte indicates a less-than-or-equal value for the to part. -// Use nil to indicate an unbounded to part. -func (q *RangeQuery) Lte(to interface{}) *RangeQuery { - q.to = to - q.includeUpper = true - return q -} - -// IncludeLower indicates whether the lower bound should be included or not. -// Defaults to true. -func (q *RangeQuery) IncludeLower(includeLower bool) *RangeQuery { - q.includeLower = includeLower - return q -} - -// IncludeUpper indicates whether the upper bound should be included or not. -// Defaults to true. -func (q *RangeQuery) IncludeUpper(includeUpper bool) *RangeQuery { - q.includeUpper = includeUpper - return q -} - -// Boost sets the boost for this query. -func (q *RangeQuery) Boost(boost float64) *RangeQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *RangeQuery) QueryName(queryName string) *RangeQuery { - q.queryName = queryName - return q -} - -// TimeZone is used for date fields. In that case, we can adjust the -// from/to fields using a timezone. -func (q *RangeQuery) TimeZone(timeZone string) *RangeQuery { - q.timeZone = timeZone - return q -} - -// Format is used for date fields. In that case, we can set the format -// to be used instead of the mapper format. -func (q *RangeQuery) Format(format string) *RangeQuery { - q.format = format - return q -} - -// Relation is used for range fields. which can be one of -// "within", "contains", "intersects" (default) and "disjoint". -func (q *RangeQuery) Relation(relation string) *RangeQuery { - q.relation = relation - return q -} - -// Source returns JSON for the query. -func (q *RangeQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - - rangeQ := make(map[string]interface{}) - source["range"] = rangeQ - - params := make(map[string]interface{}) - rangeQ[q.name] = params - - params["from"] = q.from - params["to"] = q.to - if q.timeZone != "" { - params["time_zone"] = q.timeZone - } - if q.format != "" { - params["format"] = q.format - } - if q.relation != "" { - params["relation"] = q.relation - } - if q.boost != nil { - params["boost"] = *q.boost - } - params["include_lower"] = q.includeLower - params["include_upper"] = q.includeUpper - - if q.queryName != "" { - rangeQ["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_raw_string.go b/vendor/github.com/olivere/elastic/v7/search_queries_raw_string.go deleted file mode 100644 index 3f9685c..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_raw_string.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2012-present Oliver Eilhard, John Stanford. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "encoding/json" - -// RawStringQuery can be used to treat a string representation of an ES query -// as a Query. Example usage: -// q := RawStringQuery("{\"match_all\":{}}") -// db.Search().Query(q).From(1).Size(100).Do() -type RawStringQuery string - -// NewRawStringQuery ininitializes a new RawStringQuery. -// It is the same as RawStringQuery(q). -func NewRawStringQuery(q string) RawStringQuery { - return RawStringQuery(q) -} - -// Source returns the JSON encoded body -func (q RawStringQuery) Source() (interface{}, error) { - var f interface{} - err := json.Unmarshal([]byte(q), &f) - return f, err -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_regexp.go b/vendor/github.com/olivere/elastic/v7/search_queries_regexp.go deleted file mode 100644 index 25a0c3d..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_regexp.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// RegexpQuery allows you to use regular expression term queries. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-regexp-query.html -type RegexpQuery struct { - name string - regexp string - flags string - boost *float64 - rewrite string - queryName string - maxDeterminizedStates *int -} - -// NewRegexpQuery creates and initializes a new RegexpQuery. -func NewRegexpQuery(name string, regexp string) *RegexpQuery { - return &RegexpQuery{name: name, regexp: regexp} -} - -// Flags sets the regexp flags. -func (q *RegexpQuery) Flags(flags string) *RegexpQuery { - q.flags = flags - return q -} - -// MaxDeterminizedStates protects against complex regular expressions. -func (q *RegexpQuery) MaxDeterminizedStates(maxDeterminizedStates int) *RegexpQuery { - q.maxDeterminizedStates = &maxDeterminizedStates - return q -} - -// Boost sets the boost for this query. -func (q *RegexpQuery) Boost(boost float64) *RegexpQuery { - q.boost = &boost - return q -} - -func (q *RegexpQuery) Rewrite(rewrite string) *RegexpQuery { - q.rewrite = rewrite - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *RegexpQuery) QueryName(queryName string) *RegexpQuery { - q.queryName = queryName - return q -} - -// Source returns the JSON-serializable query data. -func (q *RegexpQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["regexp"] = query - - x := make(map[string]interface{}) - x["value"] = q.regexp - if q.flags != "" { - x["flags"] = q.flags - } - if q.maxDeterminizedStates != nil { - x["max_determinized_states"] = *q.maxDeterminizedStates - } - if q.boost != nil { - x["boost"] = *q.boost - } - if q.rewrite != "" { - x["rewrite"] = q.rewrite - } - if q.queryName != "" { - x["name"] = q.queryName - } - query[q.name] = x - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_script.go b/vendor/github.com/olivere/elastic/v7/search_queries_script.go deleted file mode 100644 index d99b0c0..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_script.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// ScriptQuery allows to define scripts as filters. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-script-query.html -type ScriptQuery struct { - script *Script - queryName string -} - -// NewScriptQuery creates and initializes a new ScriptQuery. -func NewScriptQuery(script *Script) *ScriptQuery { - return &ScriptQuery{ - script: script, - } -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *ScriptQuery) QueryName(queryName string) *ScriptQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *ScriptQuery) Source() (interface{}, error) { - if q.script == nil { - return nil, errors.New("ScriptQuery expected a script") - } - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["script"] = params - - src, err := q.script.Source() - if err != nil { - return nil, err - } - params["script"] = src - - if q.queryName != "" { - params["_name"] = q.queryName - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_script_score.go b/vendor/github.com/olivere/elastic/v7/search_queries_script_score.go deleted file mode 100644 index 7f00e0f..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_script_score.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// ScriptScoreQuery uses a script to provide a custom score for returned documents. -// -// A ScriptScoreQuery query is useful if, for example, a scoring function is -// expensive and you only need to calculate the score of a filtered set of documents. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.4/query-dsl-script-score-query.html -type ScriptScoreQuery struct { - query Query - script *Script - minScore *float64 - boost *float64 - queryName string -} - -// NewScriptScoreQuery creates and initializes a new script_score query. -func NewScriptScoreQuery(query Query, script *Script) *ScriptScoreQuery { - return &ScriptScoreQuery{ - query: query, - script: script, - } -} - -// Query to be used in the ScriptScoreQuery. -func (q *ScriptScoreQuery) Query(query Query) *ScriptScoreQuery { - q.query = query - return q -} - -// Script to calculate the score. -func (q *ScriptScoreQuery) Script(script *Script) *ScriptScoreQuery { - q.script = script - return q -} - -// MinScore sets the minimum score. -func (q *ScriptScoreQuery) MinScore(minScore float64) *ScriptScoreQuery { - q.minScore = &minScore - return q -} - -// Boost sets the boost for this query. -func (q *ScriptScoreQuery) Boost(boost float64) *ScriptScoreQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter. -func (q *ScriptScoreQuery) QueryName(queryName string) *ScriptScoreQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the function score query. -func (q *ScriptScoreQuery) Source() (interface{}, error) { - // { - // "script_score" : { - // "query" : { - // "match" : { "message": "elasticsearch" } - // }, - // "script" : { - // "source" : "doc['likes'].value / 10" - // } - // } - // } - - source := make(map[string]interface{}) - query := make(map[string]interface{}) - source["script_score"] = query - - if q.query == nil { - return nil, errors.New("ScriptScoreQuery: Query is missing") - } - if q.script == nil { - return nil, errors.New("ScriptScoreQuery: Script is missing") - } - - if src, err := q.query.Source(); err != nil { - return nil, err - } else { - query["query"] = src - } - - if src, err := q.script.Source(); err != nil { - return nil, err - } else { - query["script"] = src - } - - if v := q.minScore; v != nil { - query["min_score"] = *v - } - - if v := q.boost; v != nil { - query["boost"] = *v - } - if q.queryName != "" { - query["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_simple_query_string.go b/vendor/github.com/olivere/elastic/v7/search_queries_simple_query_string.go deleted file mode 100644 index 230e041..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_simple_query_string.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" - "strings" -) - -// SimpleQueryStringQuery is a query that uses the SimpleQueryParser -// to parse its context. Unlike the regular query_string query, -// the simple_query_string query will never throw an exception, -// and discards invalid parts of the query. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-simple-query-string-query.html -type SimpleQueryStringQuery struct { - queryText string - analyzer string - quoteFieldSuffix string - defaultOperator string - fields []string - fieldBoosts map[string]*float64 - minimumShouldMatch string - flags string - boost *float64 - lowercaseExpandedTerms *bool // deprecated - lenient *bool - analyzeWildcard *bool - locale string // deprecated - queryName string - autoGenerateSynonymsPhraseQuery *bool - fuzzyPrefixLength int - fuzzyMaxExpansions int - fuzzyTranspositions *bool -} - -// NewSimpleQueryStringQuery creates and initializes a new SimpleQueryStringQuery. -func NewSimpleQueryStringQuery(text string) *SimpleQueryStringQuery { - return &SimpleQueryStringQuery{ - queryText: text, - fields: make([]string, 0), - fieldBoosts: make(map[string]*float64), - fuzzyPrefixLength: -1, - fuzzyMaxExpansions: -1, - } -} - -// Field adds a field to run the query against. -func (q *SimpleQueryStringQuery) Field(field string) *SimpleQueryStringQuery { - q.fields = append(q.fields, field) - return q -} - -// Field adds a field to run the query against with a specific boost. -func (q *SimpleQueryStringQuery) FieldWithBoost(field string, boost float64) *SimpleQueryStringQuery { - q.fields = append(q.fields, field) - q.fieldBoosts[field] = &boost - return q -} - -// Boost sets the boost for this query. -func (q *SimpleQueryStringQuery) Boost(boost float64) *SimpleQueryStringQuery { - q.boost = &boost - return q -} - -// QuoteFieldSuffix is an optional field name suffix to automatically -// try and add to the field searched when using quoted text. -func (q *SimpleQueryStringQuery) QuoteFieldSuffix(quoteFieldSuffix string) *SimpleQueryStringQuery { - q.quoteFieldSuffix = quoteFieldSuffix - return q -} - -// QueryName sets the query name for the filter that can be used when -// searching for matched_filters per hit. -func (q *SimpleQueryStringQuery) QueryName(queryName string) *SimpleQueryStringQuery { - q.queryName = queryName - return q -} - -// Analyzer specifies the analyzer to use for the query. -func (q *SimpleQueryStringQuery) Analyzer(analyzer string) *SimpleQueryStringQuery { - q.analyzer = analyzer - return q -} - -// DefaultOperator specifies the default operator for the query. -func (q *SimpleQueryStringQuery) DefaultOperator(defaultOperator string) *SimpleQueryStringQuery { - q.defaultOperator = defaultOperator - return q -} - -// Flags sets the flags for the query. -func (q *SimpleQueryStringQuery) Flags(flags string) *SimpleQueryStringQuery { - q.flags = flags - return q -} - -// LowercaseExpandedTerms indicates whether terms of wildcard, prefix, fuzzy -// and range queries are automatically lower-cased or not. Default is true. -// -// Deprecated: Decision is now made by the analyzer. -func (q *SimpleQueryStringQuery) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *SimpleQueryStringQuery { - q.lowercaseExpandedTerms = &lowercaseExpandedTerms - return q -} - -// Locale to be used in the query. -// -// Deprecated: Decision is now made by the analyzer. -func (q *SimpleQueryStringQuery) Locale(locale string) *SimpleQueryStringQuery { - q.locale = locale - return q -} - -// Lenient indicates whether the query string parser should be lenient -// when parsing field values. It defaults to the index setting and if not -// set, defaults to false. -func (q *SimpleQueryStringQuery) Lenient(lenient bool) *SimpleQueryStringQuery { - q.lenient = &lenient - return q -} - -// AnalyzeWildcard indicates whether to enabled analysis on wildcard and prefix queries. -func (q *SimpleQueryStringQuery) AnalyzeWildcard(analyzeWildcard bool) *SimpleQueryStringQuery { - q.analyzeWildcard = &analyzeWildcard - return q -} - -// MinimumShouldMatch specifies the minimumShouldMatch to apply to the -// resulting query should that be a Boolean query. -func (q *SimpleQueryStringQuery) MinimumShouldMatch(minimumShouldMatch string) *SimpleQueryStringQuery { - q.minimumShouldMatch = minimumShouldMatch - return q -} - -// AutoGenerateSynonymsPhraseQuery indicates whether phrase queries should be -// automatically generated for multi terms synonyms. Defaults to true. -func (q *SimpleQueryStringQuery) AutoGenerateSynonymsPhraseQuery(enable bool) *SimpleQueryStringQuery { - q.autoGenerateSynonymsPhraseQuery = &enable - return q -} - -// FuzzyPrefixLength defines the prefix length in fuzzy queries. -func (q *SimpleQueryStringQuery) FuzzyPrefixLength(fuzzyPrefixLength int) *SimpleQueryStringQuery { - q.fuzzyPrefixLength = fuzzyPrefixLength - return q -} - -// FuzzyMaxExpansions defines the number of terms fuzzy queries will expand to. -func (q *SimpleQueryStringQuery) FuzzyMaxExpansions(fuzzyMaxExpansions int) *SimpleQueryStringQuery { - q.fuzzyMaxExpansions = fuzzyMaxExpansions - return q -} - -// FuzzyTranspositions defines whether to use transpositions in fuzzy queries. -func (q *SimpleQueryStringQuery) FuzzyTranspositions(fuzzyTranspositions bool) *SimpleQueryStringQuery { - q.fuzzyTranspositions = &fuzzyTranspositions - return q -} - -// Source returns JSON for the query. -func (q *SimpleQueryStringQuery) Source() (interface{}, error) { - // { - // "simple_query_string" : { - // "query" : "\"fried eggs\" +(eggplant | potato) -frittata", - // "analyzer" : "snowball", - // "fields" : ["body^5","_all"], - // "default_operator" : "and" - // } - // } - - source := make(map[string]interface{}) - - query := make(map[string]interface{}) - source["simple_query_string"] = query - - query["query"] = q.queryText - - if len(q.fields) > 0 { - var fields []string - for _, field := range q.fields { - if boost, found := q.fieldBoosts[field]; found { - if boost != nil { - fields = append(fields, fmt.Sprintf("%s^%f", field, *boost)) - } else { - fields = append(fields, field) - } - } else { - fields = append(fields, field) - } - } - query["fields"] = fields - } - - if q.flags != "" { - query["flags"] = q.flags - } - if q.analyzer != "" { - query["analyzer"] = q.analyzer - } - if q.defaultOperator != "" { - query["default_operator"] = strings.ToLower(q.defaultOperator) - } - if q.lowercaseExpandedTerms != nil { - query["lowercase_expanded_terms"] = *q.lowercaseExpandedTerms - } - if q.lenient != nil { - query["lenient"] = *q.lenient - } - if q.analyzeWildcard != nil { - query["analyze_wildcard"] = *q.analyzeWildcard - } - if q.locale != "" { - query["locale"] = q.locale - } - if q.queryName != "" { - query["_name"] = q.queryName - } - if q.minimumShouldMatch != "" { - query["minimum_should_match"] = q.minimumShouldMatch - } - if q.quoteFieldSuffix != "" { - query["quote_field_suffix"] = q.quoteFieldSuffix - } - if q.boost != nil { - query["boost"] = *q.boost - } - if v := q.autoGenerateSynonymsPhraseQuery; v != nil { - query["auto_generate_synonyms_phrase_query"] = *v - } - if v := q.fuzzyPrefixLength; v != -1 { - query["fuzzy_prefix_length"] = v - } - if v := q.fuzzyMaxExpansions; v != -1 { - query["fuzzy_max_expansions"] = v - } - if v := q.fuzzyTranspositions; v != nil { - query["fuzzy_transpositions"] = *v - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_slice.go b/vendor/github.com/olivere/elastic/v7/search_queries_slice.go deleted file mode 100644 index 0eac784..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_slice.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// SliceQuery allows to partition the documents into several slices. -// It is used e.g. to slice scroll operations in Elasticsearch 5.0 or later. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-scroll.html#sliced-scroll -// for details. -type SliceQuery struct { - field string - id *int - max *int -} - -// NewSliceQuery creates a new SliceQuery. -func NewSliceQuery() *SliceQuery { - return &SliceQuery{} -} - -// Field is the name of the field to slice against (_uid by default). -func (s *SliceQuery) Field(field string) *SliceQuery { - s.field = field - return s -} - -// Id is the id of the slice. -func (s *SliceQuery) Id(id int) *SliceQuery { - s.id = &id - return s -} - -// Max is the maximum number of slices. -func (s *SliceQuery) Max(max int) *SliceQuery { - s.max = &max - return s -} - -// Source returns the JSON body. -func (s *SliceQuery) Source() (interface{}, error) { - m := make(map[string]interface{}) - if s.field != "" { - m["field"] = s.field - } - if s.id != nil { - m["id"] = *s.id - } - if s.max != nil { - m["max"] = *s.max - } - return m, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_term.go b/vendor/github.com/olivere/elastic/v7/search_queries_term.go deleted file mode 100644 index 832e67e..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_term.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TermQuery finds documents that contain the exact term specified -// in the inverted index. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-term-query.html -type TermQuery struct { - name string - value interface{} - boost *float64 - queryName string -} - -// NewTermQuery creates and initializes a new TermQuery. -func NewTermQuery(name string, value interface{}) *TermQuery { - return &TermQuery{name: name, value: value} -} - -// Boost sets the boost for this query. -func (q *TermQuery) Boost(boost float64) *TermQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *TermQuery) QueryName(queryName string) *TermQuery { - q.queryName = queryName - return q -} - -// Source returns JSON for the query. -func (q *TermQuery) Source() (interface{}, error) { - // {"term":{"name":"value"}} - source := make(map[string]interface{}) - tq := make(map[string]interface{}) - source["term"] = tq - - if q.boost == nil && q.queryName == "" { - tq[q.name] = q.value - } else { - subQ := make(map[string]interface{}) - subQ["value"] = q.value - if q.boost != nil { - subQ["boost"] = *q.boost - } - if q.queryName != "" { - subQ["_name"] = q.queryName - } - tq[q.name] = subQ - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_terms.go b/vendor/github.com/olivere/elastic/v7/search_queries_terms.go deleted file mode 100644 index b8171cd..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_terms.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TermsQuery filters documents that have fields that match any -// of the provided terms (not analyzed). -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-terms-query.html -type TermsQuery struct { - name string - values []interface{} - termsLookup *TermsLookup - queryName string - boost *float64 -} - -// NewTermsQuery creates and initializes a new TermsQuery. -func NewTermsQuery(name string, values ...interface{}) *TermsQuery { - q := &TermsQuery{ - name: name, - values: make([]interface{}, 0), - } - if len(values) > 0 { - q.values = append(q.values, values...) - } - return q -} - -// TermsLookup adds terms lookup details to the query. -func (q *TermsQuery) TermsLookup(lookup *TermsLookup) *TermsQuery { - q.termsLookup = lookup - return q -} - -// Boost sets the boost for this query. -func (q *TermsQuery) Boost(boost float64) *TermsQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *TermsQuery) QueryName(queryName string) *TermsQuery { - q.queryName = queryName - return q -} - -// Creates the query source for the term query. -func (q *TermsQuery) Source() (interface{}, error) { - // {"terms":{"name":["value1","value2"]}} - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["terms"] = params - - if q.termsLookup != nil { - src, err := q.termsLookup.Source() - if err != nil { - return nil, err - } - params[q.name] = src - } else { - params[q.name] = q.values - if q.boost != nil { - params["boost"] = *q.boost - } - if q.queryName != "" { - params["_name"] = q.queryName - } - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_terms_set.go b/vendor/github.com/olivere/elastic/v7/search_queries_terms_set.go deleted file mode 100644 index 6a33a2a..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_terms_set.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TermsSetQuery returns any documents that match with at least -// one or more of the provided terms. The terms are not analyzed -// and thus must match exactly. The number of terms that must -// match varies per document and is either controlled by a -// minimum should match field or computed per document in a -// minimum should match script. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-terms-set-query.html -type TermsSetQuery struct { - name string - values []interface{} - minimumShouldMatchField string - minimumShouldMatchScript *Script - queryName string - boost *float64 -} - -// NewTermsSetQuery creates and initializes a new TermsSetQuery. -func NewTermsSetQuery(name string, values ...interface{}) *TermsSetQuery { - q := &TermsSetQuery{ - name: name, - values: make([]interface{}, 0), - } - if len(values) > 0 { - q.values = append(q.values, values...) - } - return q -} - -// MinimumShouldMatchField specifies the field to match. -func (q *TermsSetQuery) MinimumShouldMatchField(minimumShouldMatchField string) *TermsSetQuery { - q.minimumShouldMatchField = minimumShouldMatchField - return q -} - -// MinimumShouldMatchScript specifies the script to match. -func (q *TermsSetQuery) MinimumShouldMatchScript(minimumShouldMatchScript *Script) *TermsSetQuery { - q.minimumShouldMatchScript = minimumShouldMatchScript - return q -} - -// Boost sets the boost for this query. -func (q *TermsSetQuery) Boost(boost float64) *TermsSetQuery { - q.boost = &boost - return q -} - -// QueryName sets the query name for the filter that can be used -// when searching for matched_filters per hit -func (q *TermsSetQuery) QueryName(queryName string) *TermsSetQuery { - q.queryName = queryName - return q -} - -// Source creates the query source for the term query. -func (q *TermsSetQuery) Source() (interface{}, error) { - // {"terms_set":{"codes":{"terms":["abc","def"],"minimum_should_match_field":"required_matches"}}} - source := make(map[string]interface{}) - inner := make(map[string]interface{}) - params := make(map[string]interface{}) - inner[q.name] = params - source["terms_set"] = inner - - // terms - params["terms"] = q.values - - // minimum_should_match_field - if match := q.minimumShouldMatchField; match != "" { - params["minimum_should_match_field"] = match - } - - // minimum_should_match_script - if match := q.minimumShouldMatchScript; match != nil { - src, err := match.Source() - if err != nil { - return nil, err - } - params["minimum_should_match_script"] = src - } - - // Common parameters for all queries - if q.boost != nil { - params["boost"] = *q.boost - } - if q.queryName != "" { - params["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_type.go b/vendor/github.com/olivere/elastic/v7/search_queries_type.go deleted file mode 100644 index 61867c1..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_type.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TypeQuery filters documents matching the provided document / mapping type. -// -// For details, see: -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-type-query.html -type TypeQuery struct { - typ string -} - -func NewTypeQuery(typ string) *TypeQuery { - return &TypeQuery{typ: typ} -} - -// Source returns JSON for the query. -func (q *TypeQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - params := make(map[string]interface{}) - source["type"] = params - params["value"] = q.typ - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_wildcard.go b/vendor/github.com/olivere/elastic/v7/search_queries_wildcard.go deleted file mode 100644 index 33e4ea2..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_wildcard.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// WildcardQuery matches documents that have fields matching a wildcard -// expression (not analyzed). Supported wildcards are *, which matches -// any character sequence (including the empty one), and ?, which matches -// any single character. Note this query can be slow, as it needs to iterate -// over many terms. In order to prevent extremely slow wildcard queries, -// a wildcard term should not start with one of the wildcards * or ?. -// The wildcard query maps to Lucene WildcardQuery. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-wildcard-query.html -type WildcardQuery struct { - name string - wildcard string - boost *float64 - rewrite string - queryName string -} - -// NewWildcardQuery creates and initializes a new WildcardQuery. -func NewWildcardQuery(name, wildcard string) *WildcardQuery { - return &WildcardQuery{ - name: name, - wildcard: wildcard, - } -} - -// Boost sets the boost for this query. -func (q *WildcardQuery) Boost(boost float64) *WildcardQuery { - q.boost = &boost - return q -} - -func (q *WildcardQuery) Rewrite(rewrite string) *WildcardQuery { - q.rewrite = rewrite - return q -} - -// QueryName sets the name of this query. -func (q *WildcardQuery) QueryName(queryName string) *WildcardQuery { - q.queryName = queryName - return q -} - -// Source returns the JSON serializable body of this query. -func (q *WildcardQuery) Source() (interface{}, error) { - // { - // "wildcard" : { - // "user" : { - // "wildcard" : "ki*y", - // "boost" : 1.0 - // } - // } - - source := make(map[string]interface{}) - - query := make(map[string]interface{}) - source["wildcard"] = query - - wq := make(map[string]interface{}) - query[q.name] = wq - - wq["wildcard"] = q.wildcard - - if q.boost != nil { - wq["boost"] = *q.boost - } - if q.rewrite != "" { - wq["rewrite"] = q.rewrite - } - if q.queryName != "" { - wq["_name"] = q.queryName - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_queries_wrapper.go b/vendor/github.com/olivere/elastic/v7/search_queries_wrapper.go deleted file mode 100644 index 2a32d1e..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_queries_wrapper.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// WrapperQuery accepts any other query as base64 encoded string. -// -// For details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-wrapper-query.html. -type WrapperQuery struct { - source string -} - -// NewWrapperQuery creates and initializes a new WrapperQuery. -func NewWrapperQuery(source string) *WrapperQuery { - return &WrapperQuery{source: source} -} - -// Source returns JSON for the query. -func (q *WrapperQuery) Source() (interface{}, error) { - // {"wrapper":{"query":"..."}} - source := make(map[string]interface{}) - tq := make(map[string]interface{}) - source["wrapper"] = tq - tq["query"] = q.source - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_request.go b/vendor/github.com/olivere/elastic/v7/search_request.go deleted file mode 100644 index 3a444b8..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_request.go +++ /dev/null @@ -1,586 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "strings" -) - -// SearchRequest combines a search request and its -// query details (see SearchSource). -// It is used in combination with MultiSearch. -type SearchRequest struct { - searchType string - indices []string - types []string - routing *string - preference *string - requestCache *bool - allowPartialSearchResults *bool - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - scroll string - source interface{} - searchSource *SearchSource - batchedReduceSize *int - maxConcurrentShardRequests *int - preFilterShardSize *int -} - -// NewSearchRequest creates a new search request. -func NewSearchRequest() *SearchRequest { - return &SearchRequest{ - searchSource: NewSearchSource(), - } -} - -// SearchType must be one of "dfs_query_then_fetch", "dfs_query_and_fetch", -// "query_then_fetch", or "query_and_fetch". -func (r *SearchRequest) SearchType(searchType string) *SearchRequest { - r.searchType = searchType - return r -} - -// SearchTypeDfsQueryThenFetch sets search type to "dfs_query_then_fetch". -func (r *SearchRequest) SearchTypeDfsQueryThenFetch() *SearchRequest { - return r.SearchType("dfs_query_then_fetch") -} - -// SearchTypeQueryThenFetch sets search type to "query_then_fetch". -func (r *SearchRequest) SearchTypeQueryThenFetch() *SearchRequest { - return r.SearchType("query_then_fetch") -} - -// Index specifies the indices to use in the request. -func (r *SearchRequest) Index(indices ...string) *SearchRequest { - r.indices = append(r.indices, indices...) - return r -} - -// HasIndices returns true if there are indices used, false otherwise. -func (r *SearchRequest) HasIndices() bool { - return len(r.indices) > 0 -} - -// Type specifies one or more types to be used. -// -// Deprecated: Types are in the process of being removed. Instead of using a type, prefer to -// filter on a field on the document. -func (r *SearchRequest) Type(types ...string) *SearchRequest { - r.types = append(r.types, types...) - return r -} - -// Routing specifies the routing parameter. It is a comma-separated list. -func (r *SearchRequest) Routing(routing string) *SearchRequest { - r.routing = &routing - return r -} - -// Routings to be used in the request. -func (r *SearchRequest) Routings(routings ...string) *SearchRequest { - if routings != nil { - routings := strings.Join(routings, ",") - r.routing = &routings - } else { - r.routing = nil - } - return r -} - -// Preference to execute the search. Defaults to randomize across shards. -// Can be set to "_local" to prefer local shards, "_primary" to execute -// only on primary shards, or a custom value, which guarantees that the -// same order will be used across different requests. -func (r *SearchRequest) Preference(preference string) *SearchRequest { - r.preference = &preference - return r -} - -// RequestCache specifies if this request should use the request cache -// or not, assuming that it can. By default, will default to the index -// level setting if request cache is enabled or not. -func (r *SearchRequest) RequestCache(requestCache bool) *SearchRequest { - r.requestCache = &requestCache - return r -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *SearchRequest) IgnoreUnavailable(ignoreUnavailable bool) *SearchRequest { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified). -func (s *SearchRequest) AllowNoIndices(allowNoIndices bool) *SearchRequest { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *SearchRequest) ExpandWildcards(expandWildcards string) *SearchRequest { - s.expandWildcards = expandWildcards - return s -} - -// Scroll, if set, will enable scrolling of the search request. -// Pass a timeout value, e.g. "2m" or "30s" as a value. -func (r *SearchRequest) Scroll(scroll string) *SearchRequest { - r.scroll = scroll - return r -} - -// SearchSource allows passing your own SearchSource, overriding -// all values set on the request (except Source). -func (r *SearchRequest) SearchSource(searchSource *SearchSource) *SearchRequest { - if searchSource == nil { - r.searchSource = NewSearchSource() - return r - } - r.searchSource = searchSource - return r -} - -// Source allows passing your own request body. It will have preference over -// all other properties set on the request. -func (r *SearchRequest) Source(source interface{}) *SearchRequest { - r.source = source - return r -} - -// Timeout value for the request, e.g. "30s" or "2m". -func (r *SearchRequest) Timeout(timeout string) *SearchRequest { - r.searchSource = r.searchSource.Timeout(timeout) - return r -} - -// TerminateAfter, when set, specifies an optional document count, -// upon collecting which the search query will terminate early. -func (r *SearchRequest) TerminateAfter(docs int) *SearchRequest { - r.searchSource = r.searchSource.TerminateAfter(docs) - return r -} - -// Query for the search. -func (r *SearchRequest) Query(query Query) *SearchRequest { - r.searchSource = r.searchSource.Query(query) - return r -} - -// PostFilter is a filter that will be executed after the query -// has been executed and only has affect on the search hits -// (not aggregations). This filter is always executed as last -// filtering mechanism. -func (r *SearchRequest) PostFilter(filter Query) *SearchRequest { - r.searchSource = r.searchSource.PostFilter(filter) - return r -} - -// MinScore below which documents are filtered out. -func (r *SearchRequest) MinScore(minScore float64) *SearchRequest { - r.searchSource = r.searchSource.MinScore(minScore) - return r -} - -// From index to start search from (default is 0). -func (r *SearchRequest) From(from int) *SearchRequest { - r.searchSource = r.searchSource.From(from) - return r -} - -// Size is the number of search hits to return (default is 10). -func (r *SearchRequest) Size(size int) *SearchRequest { - r.searchSource = r.searchSource.Size(size) - return r -} - -// Explain indicates whether to return an explanation for each hit. -func (r *SearchRequest) Explain(explain bool) *SearchRequest { - r.searchSource = r.searchSource.Explain(explain) - return r -} - -// Version indicates whether each hit should be returned with -// its version. -func (r *SearchRequest) Version(version bool) *SearchRequest { - r.searchSource = r.searchSource.Version(version) - return r -} - -// IndexBoost sets a boost a specific index will receive when -// the query is executed against it. -func (r *SearchRequest) IndexBoost(index string, boost float64) *SearchRequest { - r.searchSource = r.searchSource.IndexBoost(index, boost) - return r -} - -// Stats groups that this request will be aggregated under. -func (r *SearchRequest) Stats(statsGroup ...string) *SearchRequest { - r.searchSource = r.searchSource.Stats(statsGroup...) - return r -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (r *SearchRequest) FetchSource(fetchSource bool) *SearchRequest { - r.searchSource = r.searchSource.FetchSource(fetchSource) - return r -} - -// FetchSourceIncludeExclude specifies that _source should be returned -// with each hit, where "include" and "exclude" serve as a simple wildcard -// matcher that gets applied to its fields -// (e.g. include := []string{"obj1.*","obj2.*"}, exclude := []string{"description.*"}). -func (r *SearchRequest) FetchSourceIncludeExclude(include, exclude []string) *SearchRequest { - r.searchSource = r.searchSource.FetchSourceIncludeExclude(include, exclude) - return r -} - -// FetchSourceContext indicates how the _source should be fetched. -func (r *SearchRequest) FetchSourceContext(fsc *FetchSourceContext) *SearchRequest { - r.searchSource = r.searchSource.FetchSourceContext(fsc) - return r -} - -// DocValueField adds a docvalue based field to load and return. -// The field does not have to be stored, but it's recommended to use -// non analyzed or numeric fields. -func (r *SearchRequest) DocValueField(field string) *SearchRequest { - r.searchSource = r.searchSource.DocvalueField(field) - return r -} - -// DocValueFieldWithFormat adds a docvalue based field to load and return. -// The field does not have to be stored, but it's recommended to use -// non analyzed or numeric fields. -func (r *SearchRequest) DocValueFieldWithFormat(field DocvalueField) *SearchRequest { - r.searchSource = r.searchSource.DocvalueFieldWithFormat(field) - return r -} - -// DocValueFields adds one or more docvalue based field to load and return. -// The fields do not have to be stored, but it's recommended to use -// non analyzed or numeric fields. -func (r *SearchRequest) DocValueFields(fields ...string) *SearchRequest { - r.searchSource = r.searchSource.DocvalueFields(fields...) - return r -} - -// DocValueFieldsWithFormat adds one or more docvalue based field to load and return. -// The fields do not have to be stored, but it's recommended to use -// non analyzed or numeric fields. -func (r *SearchRequest) DocValueFieldsWithFormat(fields ...DocvalueField) *SearchRequest { - r.searchSource = r.searchSource.DocvalueFieldsWithFormat(fields...) - return r -} - -// StoredField adds a stored field to load and return -// (note, it must be stored) as part of the search request. -func (r *SearchRequest) StoredField(field string) *SearchRequest { - r.searchSource = r.searchSource.StoredField(field) - return r -} - -// NoStoredFields indicates that no fields should be loaded, -// resulting in only id and type to be returned per field. -func (r *SearchRequest) NoStoredFields() *SearchRequest { - r.searchSource = r.searchSource.NoStoredFields() - return r -} - -// StoredFields adds one or more stored field to load and return -// (note, they must be stored) as part of the search request. -func (r *SearchRequest) StoredFields(fields ...string) *SearchRequest { - r.searchSource = r.searchSource.StoredFields(fields...) - return r -} - -// ScriptField adds a script based field to load and return. -// The field does not have to be stored, but it's recommended -// to use non analyzed or numeric fields. -func (r *SearchRequest) ScriptField(field *ScriptField) *SearchRequest { - r.searchSource = r.searchSource.ScriptField(field) - return r -} - -// ScriptFields adds one or more script based field to load and return. -// The fields do not have to be stored, but it's recommended -// to use non analyzed or numeric fields. -func (r *SearchRequest) ScriptFields(fields ...*ScriptField) *SearchRequest { - r.searchSource = r.searchSource.ScriptFields(fields...) - return r -} - -// Sort adds a sort order. -func (r *SearchRequest) Sort(field string, ascending bool) *SearchRequest { - r.searchSource = r.searchSource.Sort(field, ascending) - return r -} - -// SortWithInfo adds a sort order. -func (r *SearchRequest) SortWithInfo(info SortInfo) *SearchRequest { - r.searchSource = r.searchSource.SortWithInfo(info) - return r -} - -// SortBy adds a sort order. -func (r *SearchRequest) SortBy(sorter ...Sorter) *SearchRequest { - r.searchSource = r.searchSource.SortBy(sorter...) - return r -} - -// SearchAfter sets the sort values that indicates which docs this -// request should "search after". -func (r *SearchRequest) SearchAfter(sortValues ...interface{}) *SearchRequest { - r.searchSource = r.searchSource.SearchAfter(sortValues...) - return r -} - -// Slice allows partitioning the documents in multiple slices. -// It is e.g. used to slice a scroll operation, supported in -// Elasticsearch 5.0 or later. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-scroll.html#sliced-scroll -// for details. -func (r *SearchRequest) Slice(sliceQuery Query) *SearchRequest { - r.searchSource = r.searchSource.Slice(sliceQuery) - return r -} - -// TrackScores is applied when sorting and controls if scores will be -// tracked as well. Defaults to false. -func (r *SearchRequest) TrackScores(trackScores bool) *SearchRequest { - r.searchSource = r.searchSource.TrackScores(trackScores) - return r -} - -// TrackTotalHits indicates if the total hit count for the query should be tracked. -// Defaults to true. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-track-total-hits.html -// for details. -func (r *SearchRequest) TrackTotalHits(trackTotalHits interface{}) *SearchRequest { - r.searchSource = r.searchSource.TrackTotalHits(trackTotalHits) - return r -} - -// Aggregation adds an aggreation to perform as part of the search. -func (r *SearchRequest) Aggregation(name string, aggregation Aggregation) *SearchRequest { - r.searchSource = r.searchSource.Aggregation(name, aggregation) - return r -} - -// Highlight adds highlighting to the search. -func (r *SearchRequest) Highlight(highlight *Highlight) *SearchRequest { - r.searchSource = r.searchSource.Highlight(highlight) - return r -} - -// Suggester adds a suggester to the search. -func (r *SearchRequest) Suggester(suggester Suggester) *SearchRequest { - r.searchSource = r.searchSource.Suggester(suggester) - return r -} - -// Rescorer adds a rescorer to the search. -func (r *SearchRequest) Rescorer(rescore *Rescore) *SearchRequest { - r.searchSource = r.searchSource.Rescorer(rescore) - return r -} - -// ClearRescorers removes all rescorers from the search. -func (r *SearchRequest) ClearRescorers() *SearchRequest { - r.searchSource = r.searchSource.ClearRescorers() - return r -} - -// Profile specifies that this search source should activate the -// Profile API for queries made on it. -func (r *SearchRequest) Profile(profile bool) *SearchRequest { - r.searchSource = r.searchSource.Profile(profile) - return r -} - -// Collapse adds field collapsing. -func (r *SearchRequest) Collapse(collapse *CollapseBuilder) *SearchRequest { - r.searchSource = r.searchSource.Collapse(collapse) - return r -} - -// AllowPartialSearchResults indicates if this request should allow partial -// results. (If method is not called, will default to the cluster level -// setting). -func (r *SearchRequest) AllowPartialSearchResults(allow bool) *SearchRequest { - r.allowPartialSearchResults = &allow - return r -} - -// BatchedReduceSize specifies the number of shard results that should be -// reduced at once on the coordinating node. This value should be used -// as a protection mechanism to reduce the memory overhead per search request -// if the potential number of shards in the request can be large. -func (r *SearchRequest) BatchedReduceSize(size int) *SearchRequest { - r.batchedReduceSize = &size - return r -} - -// MaxConcurrentShardRequests sets the number of shard requests that should -// be executed concurrently. This value should be used as a protection -// mechanism to reduce the number of shard requests fired per high level -// search request. Searches that hit the entire cluster can be throttled -// with this number to reduce the cluster load. The default grows with -// the number of nodes in the cluster but is at most 256. -func (r *SearchRequest) MaxConcurrentShardRequests(size int) *SearchRequest { - r.maxConcurrentShardRequests = &size - return r -} - -// PreFilterShardSize sets a threshold that enforces a pre-filter roundtrip -// to pre-filter search shards based on query rewriting if the number of -// shards the search request expands to exceeds the threshold. -// This filter roundtrip can limit the number of shards significantly if for -// instance a shard can not match any documents based on it's rewrite -// method ie. if date filters are mandatory to match but the shard -// bounds and the query are disjoint. The default is 128. -func (r *SearchRequest) PreFilterShardSize(size int) *SearchRequest { - r.preFilterShardSize = &size - return r -} - -// header is used e.g. by MultiSearch to get information about the search header -// of one SearchRequest. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-multi-search.html -func (r *SearchRequest) header() interface{} { - h := make(map[string]interface{}) - if r.searchType != "" { - h["search_type"] = r.searchType - } - - switch len(r.indices) { - case 0: - case 1: - h["index"] = r.indices[0] - default: - h["indices"] = r.indices - } - - switch len(r.types) { - case 0: - case 1: - h["type"] = r.types[0] - default: - h["types"] = r.types - } - - if r.routing != nil && *r.routing != "" { - h["routing"] = *r.routing - } - if r.preference != nil && *r.preference != "" { - h["preference"] = *r.preference - } - if r.requestCache != nil { - h["request_cache"] = *r.requestCache - } - if r.ignoreUnavailable != nil { - h["ignore_unavailable"] = *r.ignoreUnavailable - } - if r.allowNoIndices != nil { - h["allow_no_indices"] = *r.allowNoIndices - } - if r.expandWildcards != "" { - h["expand_wildcards"] = r.expandWildcards - } - if v := r.allowPartialSearchResults; v != nil { - h["allow_partial_search_results"] = *v - } - if r.scroll != "" { - h["scroll"] = r.scroll - } - - return h -} - -// Body allows to access the search body of the request, as generated by the DSL. -// Notice that Body is read-only. You must not change the request body. -// -// Body is used e.g. by MultiSearch to get information about the search body -// of one SearchRequest. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-multi-search.html -func (r *SearchRequest) Body() (string, error) { - if r.source == nil { - // Default: No custom source specified - src, err := r.searchSource.Source() - if err != nil { - return "", err - } - body, err := json.Marshal(src) - if err != nil { - return "", err - } - return string(body), nil - } - switch t := r.source.(type) { - default: - body, err := json.Marshal(r.source) - if err != nil { - return "", err - } - return string(body), nil - case *SearchSource: - src, err := t.Source() - if err != nil { - return "", err - } - body, err := json.Marshal(src) - if err != nil { - return "", err - } - return string(body), nil - case json.RawMessage: - return string(t), nil - case *json.RawMessage: - return string(*t), nil - case string: - return t, nil - case *string: - if t != nil { - return *t, nil - } - return "{}", nil - } -} - -// source returns the search source. It is used by Reindex. -func (r *SearchRequest) sourceAsMap() (interface{}, error) { - if r.source == nil { - // Default: No custom source specified - return r.searchSource.Source() - } - switch t := r.source.(type) { - default: - body, err := json.Marshal(r.source) - if err != nil { - return "", err - } - return RawStringQuery(body), nil - case *SearchSource: - return t.Source() - case json.RawMessage: - return RawStringQuery(string(t)), nil - case *json.RawMessage: - return RawStringQuery(string(*t)), nil - case string: - return RawStringQuery(t), nil - case *string: - if t != nil { - return RawStringQuery(*t), nil - } - return RawStringQuery("{}"), nil - } -} diff --git a/vendor/github.com/olivere/elastic/v7/search_shards.go b/vendor/github.com/olivere/elastic/v7/search_shards.go deleted file mode 100644 index 6e566ac..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_shards.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SearchShardsService returns the indices and shards that a search request would be executed against. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-shards.html -type SearchShardsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - routing string - local *bool - preference string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string -} - -// NewSearchShardsService creates a new SearchShardsService. -func NewSearchShardsService(client *Client) *SearchShardsService { - return &SearchShardsService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SearchShardsService) Pretty(pretty bool) *SearchShardsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SearchShardsService) Human(human bool) *SearchShardsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SearchShardsService) ErrorTrace(errorTrace bool) *SearchShardsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SearchShardsService) FilterPath(filterPath ...string) *SearchShardsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SearchShardsService) Header(name string, value string) *SearchShardsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SearchShardsService) Headers(headers http.Header) *SearchShardsService { - s.headers = headers - return s -} - -// Index sets the names of the indices to restrict the results. -func (s *SearchShardsService) Index(index ...string) *SearchShardsService { - s.index = append(s.index, index...) - return s -} - -//A boolean value whether to read the cluster state locally in order to -//determine where shards are allocated instead of using the Master node’s cluster state. -func (s *SearchShardsService) Local(local bool) *SearchShardsService { - s.local = &local - return s -} - -// Routing sets a specific routing value. -func (s *SearchShardsService) Routing(routing string) *SearchShardsService { - s.routing = routing - return s -} - -// Preference specifies the node or shard the operation should be performed on (default: random). -func (s *SearchShardsService) Preference(preference string) *SearchShardsService { - s.preference = preference - return s -} - -// IgnoreUnavailable indicates whether the specified concrete indices -// should be ignored when unavailable (missing or closed). -func (s *SearchShardsService) IgnoreUnavailable(ignoreUnavailable bool) *SearchShardsService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` string -// or when no indices have been specified). -func (s *SearchShardsService) AllowNoIndices(allowNoIndices bool) *SearchShardsService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *SearchShardsService) ExpandWildcards(expandWildcards string) *SearchShardsService { - s.expandWildcards = expandWildcards - return s -} - -// buildURL builds the URL for the operation. -func (s *SearchShardsService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/{index}/_search_shards", map[string]string{ - "index": strings.Join(s.index, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.local != nil { - params.Set("local", fmt.Sprintf("%v", *s.local)) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if s.ignoreUnavailable != nil { - params.Set("ignore_unavailable", fmt.Sprintf("%v", *s.ignoreUnavailable)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *SearchShardsService) Validate() error { - var invalid []string - if len(s.index) < 1 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *SearchShardsService) Do(ctx context.Context) (*SearchShardsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SearchShardsResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// SearchShardsResponse is the response of SearchShardsService.Do. -type SearchShardsResponse struct { - Nodes map[string]interface{} `json:"nodes"` - Indices map[string]interface{} `json:"indices"` - Shards [][]*SearchShardsResponseShardsInfo `json:"shards"` -} - -type SearchShardsResponseShardsInfo struct { - Index string `json:"index"` - Node string `json:"node"` - Primary bool `json:"primary"` - Shard uint `json:"shard"` - State string `json:"state"` - AllocationId *AllocationId `json:"allocation_id,omitempty"` - RelocatingNode string `json:"relocating_node"` - ExpectedShardSizeInBytes int64 `json:"expected_shard_size_in_bytes,omitempty"` - RecoverySource *RecoverySource `json:"recovery_source,omitempty"` - UnassignedInfo *UnassignedInfo `json:"unassigned_info,omitempty"` -} - -type RecoverySource struct { - Type string `json:"type"` - // TODO add missing fields here based on the Type -} - -type AllocationId struct { - Id string `json:"id"` - RelocationId string `json:"relocation_id,omitempty"` -} - -type UnassignedInfo struct { - Reason string `json:"reason"` - At *time.Time `json:"at,omitempty"` - FailedAttempts int `json:"failed_attempts,omitempty"` - Delayed bool `json:"delayed"` - Details string `json:"details,omitempty"` - AllocationStatus string `json:"allocation_status"` -} diff --git a/vendor/github.com/olivere/elastic/v7/search_source.go b/vendor/github.com/olivere/elastic/v7/search_source.go deleted file mode 100644 index 724fc1c..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_source.go +++ /dev/null @@ -1,592 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "fmt" -) - -// SearchSource enables users to build the search source. -// It resembles the SearchSourceBuilder in Elasticsearch. -type SearchSource struct { - query Query // query - postQuery Query // post_filter - sliceQuery Query // slice - from int // from - size int // size - explain *bool // explain - version *bool // version - seqNoAndPrimaryTerm *bool // seq_no_primary_term - sorters []Sorter // sort - trackScores *bool // track_scores - trackTotalHits interface{} // track_total_hits - searchAfterSortValues []interface{} // search_after - minScore *float64 // min_score - timeout string // timeout - terminateAfter *int // terminate_after - storedFieldNames []string // stored_fields - docvalueFields DocvalueFields // docvalue_fields - scriptFields []*ScriptField // script_fields - fetchSourceContext *FetchSourceContext // _source - aggregations map[string]Aggregation // aggregations / aggs - highlight *Highlight // highlight - globalSuggestText string - suggesters []Suggester // suggest - rescores []*Rescore // rescore - defaultRescoreWindowSize *int - indexBoosts map[string]float64 // indices_boost - stats []string // stats - innerHits map[string]*InnerHit - collapse *CollapseBuilder // collapse - profile bool // profile - // TODO extBuilders []SearchExtBuilder // ext -} - -// NewSearchSource initializes a new SearchSource. -func NewSearchSource() *SearchSource { - return &SearchSource{ - from: -1, - size: -1, - aggregations: make(map[string]Aggregation), - indexBoosts: make(map[string]float64), - innerHits: make(map[string]*InnerHit), - } -} - -// Query sets the query to use with this search source. -func (s *SearchSource) Query(query Query) *SearchSource { - s.query = query - return s -} - -// Profile specifies that this search source should activate the -// Profile API for queries made on it. -func (s *SearchSource) Profile(profile bool) *SearchSource { - s.profile = profile - return s -} - -// PostFilter will be executed after the query has been executed and -// only affects the search hits, not the aggregations. -// This filter is always executed as the last filtering mechanism. -func (s *SearchSource) PostFilter(postFilter Query) *SearchSource { - s.postQuery = postFilter - return s -} - -// Slice allows partitioning the documents in multiple slices. -// It is e.g. used to slice a scroll operation, supported in -// Elasticsearch 5.0 or later. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-scroll.html#sliced-scroll -// for details. -func (s *SearchSource) Slice(sliceQuery Query) *SearchSource { - s.sliceQuery = sliceQuery - return s -} - -// From index to start the search from. Defaults to 0. -func (s *SearchSource) From(from int) *SearchSource { - s.from = from - return s -} - -// Size is the number of search hits to return. Defaults to 10. -func (s *SearchSource) Size(size int) *SearchSource { - s.size = size - return s -} - -// MinScore sets the minimum score below which docs will be filtered out. -func (s *SearchSource) MinScore(minScore float64) *SearchSource { - s.minScore = &minScore - return s -} - -// Explain indicates whether each search hit should be returned with -// an explanation of the hit (ranking). -func (s *SearchSource) Explain(explain bool) *SearchSource { - s.explain = &explain - return s -} - -// Version indicates whether each search hit should be returned with -// a version associated to it. -func (s *SearchSource) Version(version bool) *SearchSource { - s.version = &version - return s -} - -// SeqNoAndPrimaryTerm indicates whether SearchHits should be returned with the -// sequence number and primary term of the last modification of the document. -func (s *SearchSource) SeqNoAndPrimaryTerm(enabled bool) *SearchSource { - s.seqNoAndPrimaryTerm = &enabled - return s -} - -// Timeout controls how long a search is allowed to take, e.g. "1s" or "500ms". -func (s *SearchSource) Timeout(timeout string) *SearchSource { - s.timeout = timeout - return s -} - -// TimeoutInMillis controls how many milliseconds a search is allowed -// to take before it is canceled. -func (s *SearchSource) TimeoutInMillis(timeoutInMillis int) *SearchSource { - s.timeout = fmt.Sprintf("%dms", timeoutInMillis) - return s -} - -// TerminateAfter specifies the maximum number of documents to collect for -// each shard, upon reaching which the query execution will terminate early. -func (s *SearchSource) TerminateAfter(terminateAfter int) *SearchSource { - s.terminateAfter = &terminateAfter - return s -} - -// Sort adds a sort order. -func (s *SearchSource) Sort(field string, ascending bool) *SearchSource { - s.sorters = append(s.sorters, SortInfo{Field: field, Ascending: ascending}) - return s -} - -// SortWithInfo adds a sort order. -func (s *SearchSource) SortWithInfo(info SortInfo) *SearchSource { - s.sorters = append(s.sorters, info) - return s -} - -// SortBy adds a sort order. -func (s *SearchSource) SortBy(sorter ...Sorter) *SearchSource { - s.sorters = append(s.sorters, sorter...) - return s -} - -func (s *SearchSource) hasSort() bool { - return len(s.sorters) > 0 -} - -// TrackScores is applied when sorting and controls if scores will be -// tracked as well. Defaults to false. -func (s *SearchSource) TrackScores(trackScores bool) *SearchSource { - s.trackScores = &trackScores - return s -} - -// TrackTotalHits controls how the total number of hits should be tracked. -// Defaults to 10000 which will count the total hit accurately up to 10,000 hits. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-track-total-hits.html -// for details. -func (s *SearchSource) TrackTotalHits(trackTotalHits interface{}) *SearchSource { - s.trackTotalHits = trackTotalHits - return s -} - -// SearchAfter allows a different form of pagination by using a live cursor, -// using the results of the previous page to help the retrieval of the next. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-search-after.html -func (s *SearchSource) SearchAfter(sortValues ...interface{}) *SearchSource { - s.searchAfterSortValues = append(s.searchAfterSortValues, sortValues...) - return s -} - -// Aggregation adds an aggreation to perform as part of the search. -func (s *SearchSource) Aggregation(name string, aggregation Aggregation) *SearchSource { - s.aggregations[name] = aggregation - return s -} - -// DefaultRescoreWindowSize sets the rescore window size for rescores -// that don't specify their window. -func (s *SearchSource) DefaultRescoreWindowSize(defaultRescoreWindowSize int) *SearchSource { - s.defaultRescoreWindowSize = &defaultRescoreWindowSize - return s -} - -// Highlight adds highlighting to the search. -func (s *SearchSource) Highlight(highlight *Highlight) *SearchSource { - s.highlight = highlight - return s -} - -// Highlighter returns the highlighter. -func (s *SearchSource) Highlighter() *Highlight { - if s.highlight == nil { - s.highlight = NewHighlight() - } - return s.highlight -} - -// GlobalSuggestText defines the global text to use with all suggesters. -// This avoids repetition. -func (s *SearchSource) GlobalSuggestText(text string) *SearchSource { - s.globalSuggestText = text - return s -} - -// Suggester adds a suggester to the search. -func (s *SearchSource) Suggester(suggester Suggester) *SearchSource { - s.suggesters = append(s.suggesters, suggester) - return s -} - -// Rescorer adds a rescorer to the search. -func (s *SearchSource) Rescorer(rescore *Rescore) *SearchSource { - s.rescores = append(s.rescores, rescore) - return s -} - -// ClearRescorers removes all rescorers from the search. -func (s *SearchSource) ClearRescorers() *SearchSource { - s.rescores = make([]*Rescore, 0) - return s -} - -// FetchSource indicates whether the response should contain the stored -// _source for every hit. -func (s *SearchSource) FetchSource(fetchSource bool) *SearchSource { - if s.fetchSourceContext == nil { - s.fetchSourceContext = NewFetchSourceContext(fetchSource) - } else { - s.fetchSourceContext.SetFetchSource(fetchSource) - } - return s -} - -// FetchSourceContext indicates how the _source should be fetched. -func (s *SearchSource) FetchSourceContext(fetchSourceContext *FetchSourceContext) *SearchSource { - s.fetchSourceContext = fetchSourceContext - return s -} - -// FetchSourceIncludeExclude specifies that _source should be returned -// with each hit, where "include" and "exclude" serve as a simple wildcard -// matcher that gets applied to its fields -// (e.g. include := []string{"obj1.*","obj2.*"}, exclude := []string{"description.*"}). -func (s *SearchSource) FetchSourceIncludeExclude(include, exclude []string) *SearchSource { - s.fetchSourceContext = NewFetchSourceContext(true). - Include(include...). - Exclude(exclude...) - return s -} - -// NoStoredFields indicates that no fields should be loaded, resulting in only -// id and type to be returned per field. -func (s *SearchSource) NoStoredFields() *SearchSource { - s.storedFieldNames = []string{} - return s -} - -// StoredField adds a single field to load and return (note, must be stored) as -// part of the search request. If none are specified, the source of the -// document will be returned. -func (s *SearchSource) StoredField(storedFieldName string) *SearchSource { - s.storedFieldNames = append(s.storedFieldNames, storedFieldName) - return s -} - -// StoredFields sets the fields to load and return as part of the search request. -// If none are specified, the source of the document will be returned. -func (s *SearchSource) StoredFields(storedFieldNames ...string) *SearchSource { - s.storedFieldNames = append(s.storedFieldNames, storedFieldNames...) - return s -} - -// DocvalueField adds a single field to load from the field data cache -// and return as part of the search request. -func (s *SearchSource) DocvalueField(fieldDataField string) *SearchSource { - s.docvalueFields = append(s.docvalueFields, DocvalueField{Field: fieldDataField}) - return s -} - -// DocvalueField adds a single docvalue field to load from the field data cache -// and return as part of the search request. -func (s *SearchSource) DocvalueFieldWithFormat(fieldDataFieldWithFormat DocvalueField) *SearchSource { - s.docvalueFields = append(s.docvalueFields, fieldDataFieldWithFormat) - return s -} - -// DocvalueFields adds one or more fields to load from the field data cache -// and return as part of the search request. -func (s *SearchSource) DocvalueFields(docvalueFields ...string) *SearchSource { - for _, f := range docvalueFields { - s.docvalueFields = append(s.docvalueFields, DocvalueField{Field: f}) - } - return s -} - -// DocvalueFields adds one or more docvalue fields to load from the field data cache -// and return as part of the search request. -func (s *SearchSource) DocvalueFieldsWithFormat(docvalueFields ...DocvalueField) *SearchSource { - s.docvalueFields = append(s.docvalueFields, docvalueFields...) - return s -} - -// ScriptField adds a single script field with the provided script. -func (s *SearchSource) ScriptField(scriptField *ScriptField) *SearchSource { - s.scriptFields = append(s.scriptFields, scriptField) - return s -} - -// ScriptFields adds one or more script fields with the provided scripts. -func (s *SearchSource) ScriptFields(scriptFields ...*ScriptField) *SearchSource { - s.scriptFields = append(s.scriptFields, scriptFields...) - return s -} - -// IndexBoost sets the boost that a specific index will receive when the -// query is executed against it. -func (s *SearchSource) IndexBoost(index string, boost float64) *SearchSource { - s.indexBoosts[index] = boost - return s -} - -// Stats group this request will be aggregated under. -func (s *SearchSource) Stats(statsGroup ...string) *SearchSource { - s.stats = append(s.stats, statsGroup...) - return s -} - -// InnerHit adds an inner hit to return with the result. -func (s *SearchSource) InnerHit(name string, innerHit *InnerHit) *SearchSource { - s.innerHits[name] = innerHit - return s -} - -// Collapse adds field collapsing. -func (s *SearchSource) Collapse(collapse *CollapseBuilder) *SearchSource { - s.collapse = collapse - return s -} - -// Source returns the serializable JSON for the source builder. -func (s *SearchSource) Source() (interface{}, error) { - source := make(map[string]interface{}) - - if s.from != -1 { - source["from"] = s.from - } - if s.size != -1 { - source["size"] = s.size - } - if s.timeout != "" { - source["timeout"] = s.timeout - } - if s.terminateAfter != nil { - source["terminate_after"] = *s.terminateAfter - } - if s.query != nil { - src, err := s.query.Source() - if err != nil { - return nil, err - } - source["query"] = src - } - if s.postQuery != nil { - src, err := s.postQuery.Source() - if err != nil { - return nil, err - } - source["post_filter"] = src - } - if s.minScore != nil { - source["min_score"] = *s.minScore - } - if s.version != nil { - source["version"] = *s.version - } - if s.explain != nil { - source["explain"] = *s.explain - } - if s.profile { - source["profile"] = s.profile - } - if s.fetchSourceContext != nil { - src, err := s.fetchSourceContext.Source() - if err != nil { - return nil, err - } - source["_source"] = src - } - if s.storedFieldNames != nil { - switch len(s.storedFieldNames) { - case 1: - source["stored_fields"] = s.storedFieldNames[0] - default: - source["stored_fields"] = s.storedFieldNames - } - } - if len(s.docvalueFields) > 0 { - src, err := s.docvalueFields.Source() - if err != nil { - return nil, err - } - source["docvalue_fields"] = src - } - if len(s.scriptFields) > 0 { - sfmap := make(map[string]interface{}) - for _, scriptField := range s.scriptFields { - src, err := scriptField.Source() - if err != nil { - return nil, err - } - sfmap[scriptField.FieldName] = src - } - source["script_fields"] = sfmap - } - if len(s.sorters) > 0 { - var sortarr []interface{} - for _, sorter := range s.sorters { - src, err := sorter.Source() - if err != nil { - return nil, err - } - sortarr = append(sortarr, src) - } - source["sort"] = sortarr - } - if v := s.trackScores; v != nil { - source["track_scores"] = *v - } - if v := s.trackTotalHits; v != nil { - source["track_total_hits"] = v - } - if len(s.searchAfterSortValues) > 0 { - source["search_after"] = s.searchAfterSortValues - } - if s.sliceQuery != nil { - src, err := s.sliceQuery.Source() - if err != nil { - return nil, err - } - source["slice"] = src - } - if len(s.indexBoosts) > 0 { - source["indices_boost"] = s.indexBoosts - } - if len(s.aggregations) > 0 { - aggsMap := make(map[string]interface{}) - for name, aggregate := range s.aggregations { - src, err := aggregate.Source() - if err != nil { - return nil, err - } - aggsMap[name] = src - } - source["aggregations"] = aggsMap - } - if s.highlight != nil { - src, err := s.highlight.Source() - if err != nil { - return nil, err - } - source["highlight"] = src - } - if len(s.suggesters) > 0 { - suggesters := make(map[string]interface{}) - for _, s := range s.suggesters { - src, err := s.Source(false) - if err != nil { - return nil, err - } - suggesters[s.Name()] = src - } - if s.globalSuggestText != "" { - suggesters["text"] = s.globalSuggestText - } - source["suggest"] = suggesters - } - if len(s.rescores) > 0 { - // Strip empty rescores from request - var rescores []*Rescore - for _, r := range s.rescores { - if !r.IsEmpty() { - rescores = append(rescores, r) - } - } - if len(rescores) == 1 { - rescores[0].defaultRescoreWindowSize = s.defaultRescoreWindowSize - src, err := rescores[0].Source() - if err != nil { - return nil, err - } - source["rescore"] = src - } else { - var slice []interface{} - for _, r := range rescores { - r.defaultRescoreWindowSize = s.defaultRescoreWindowSize - src, err := r.Source() - if err != nil { - return nil, err - } - slice = append(slice, src) - } - source["rescore"] = slice - } - } - if len(s.stats) > 0 { - source["stats"] = s.stats - } - // TODO ext builders - - if s.collapse != nil { - src, err := s.collapse.Source() - if err != nil { - return nil, err - } - source["collapse"] = src - } - - if v := s.seqNoAndPrimaryTerm; v != nil { - source["seq_no_primary_term"] = *v - } - - if len(s.innerHits) > 0 { - // Top-level inner hits - // See http://www.elastic.co/guide/en/elasticsearch/reference/1.5/search-request-inner-hits.html#top-level-inner-hits - // "inner_hits": { - // "": { - // "": { - // "": { - // , - // [,"inner_hits" : { []+ } ]? - // } - // } - // }, - // [,"" : { ... } ]* - // } - m := make(map[string]interface{}) - for name, hit := range s.innerHits { - if hit.path != "" { - src, err := hit.Source() - if err != nil { - return nil, err - } - path := make(map[string]interface{}) - path[hit.path] = src - m[name] = map[string]interface{}{ - "path": path, - } - } else if hit.typ != "" { - src, err := hit.Source() - if err != nil { - return nil, err - } - typ := make(map[string]interface{}) - typ[hit.typ] = src - m[name] = map[string]interface{}{ - "type": typ, - } - } else { - // TODO the Java client throws here, because either path or typ must be specified - _ = m - } - } - source["inner_hits"] = m - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/search_terms_lookup.go b/vendor/github.com/olivere/elastic/v7/search_terms_lookup.go deleted file mode 100644 index f5d6541..0000000 --- a/vendor/github.com/olivere/elastic/v7/search_terms_lookup.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TermsLookup encapsulates the parameters needed to fetch terms. -// -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/query-dsl-terms-query.html#query-dsl-terms-lookup. -type TermsLookup struct { - index string - typ string - id string - path string - routing string -} - -// NewTermsLookup creates and initializes a new TermsLookup. -func NewTermsLookup() *TermsLookup { - t := &TermsLookup{} - return t -} - -// Index name. -func (t *TermsLookup) Index(index string) *TermsLookup { - t.index = index - return t -} - -// Type name. -// -// Deprecated: Types are in the process of being removed. -func (t *TermsLookup) Type(typ string) *TermsLookup { - t.typ = typ - return t -} - -// Id to look up. -func (t *TermsLookup) Id(id string) *TermsLookup { - t.id = id - return t -} - -// Path to use for lookup. -func (t *TermsLookup) Path(path string) *TermsLookup { - t.path = path - return t -} - -// Routing value. -func (t *TermsLookup) Routing(routing string) *TermsLookup { - t.routing = routing - return t -} - -// Source creates the JSON source of the builder. -func (t *TermsLookup) Source() (interface{}, error) { - src := make(map[string]interface{}) - if t.index != "" { - src["index"] = t.index - } - if t.typ != "" { - src["type"] = t.typ - } - if t.id != "" { - src["id"] = t.id - } - if t.path != "" { - src["path"] = t.path - } - if t.routing != "" { - src["routing"] = t.routing - } - return src, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_create.go b/vendor/github.com/olivere/elastic/v7/snapshot_create.go deleted file mode 100644 index 0c3de06..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_create.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotCreateService is documented at https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-snapshots.html. -type SnapshotCreateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository string - snapshot string - masterTimeout string - waitForCompletion *bool - bodyJson interface{} - bodyString string -} - -// NewSnapshotCreateService creates a new SnapshotCreateService. -func NewSnapshotCreateService(client *Client) *SnapshotCreateService { - return &SnapshotCreateService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotCreateService) Pretty(pretty bool) *SnapshotCreateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotCreateService) Human(human bool) *SnapshotCreateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotCreateService) ErrorTrace(errorTrace bool) *SnapshotCreateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotCreateService) FilterPath(filterPath ...string) *SnapshotCreateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotCreateService) Header(name string, value string) *SnapshotCreateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotCreateService) Headers(headers http.Header) *SnapshotCreateService { - s.headers = headers - return s -} - -// Repository is the repository name. -func (s *SnapshotCreateService) Repository(repository string) *SnapshotCreateService { - s.repository = repository - return s -} - -// Snapshot is the snapshot name. -func (s *SnapshotCreateService) Snapshot(snapshot string) *SnapshotCreateService { - s.snapshot = snapshot - return s -} - -// MasterTimeout is documented as: Explicit operation timeout for connection to master node. -func (s *SnapshotCreateService) MasterTimeout(masterTimeout string) *SnapshotCreateService { - s.masterTimeout = masterTimeout - return s -} - -// WaitForCompletion is documented as: Should this request wait until the operation has completed before returning. -func (s *SnapshotCreateService) WaitForCompletion(waitForCompletion bool) *SnapshotCreateService { - s.waitForCompletion = &waitForCompletion - return s -} - -// BodyJson is documented as: The snapshot definition. -func (s *SnapshotCreateService) BodyJson(body interface{}) *SnapshotCreateService { - s.bodyJson = body - return s -} - -// BodyString is documented as: The snapshot definition. -func (s *SnapshotCreateService) BodyString(body string) *SnapshotCreateService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *SnapshotCreateService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_snapshot/{repository}/{snapshot}", map[string]string{ - "snapshot": s.snapshot, - "repository": s.repository, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if v := s.waitForCompletion; v != nil { - params.Set("wait_for_completion", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *SnapshotCreateService) Validate() error { - var invalid []string - if s.repository == "" { - invalid = append(invalid, "Repository") - } - if s.snapshot == "" { - invalid = append(invalid, "Snapshot") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *SnapshotCreateService) Do(ctx context.Context) (*SnapshotCreateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SnapshotCreateResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// SnapshotShardFailure stores information about failures that occurred during shard snapshotting process. -type SnapshotShardFailure struct { - Index string `json:"index"` - IndexUUID string `json:"index_uuid"` - ShardID int `json:"shard_id"` - Reason string `json:"reason"` - NodeID string `json:"node_id"` - Status string `json:"status"` -} - -// SnapshotCreateResponse is the response of SnapshotCreateService.Do. -type SnapshotCreateResponse struct { - // Accepted indicates whether the request was accepted by elasticsearch. - // It's available when waitForCompletion is false. - Accepted *bool `json:"accepted"` - - // Snapshot is available when waitForCompletion is true. - Snapshot *Snapshot `json:"snapshot"` -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_create_repository.go b/vendor/github.com/olivere/elastic/v7/snapshot_create_repository.go deleted file mode 100644 index 5976b2a..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_create_repository.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotCreateRepositoryService creates a snapshot repository. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-snapshots.html -// for details. -type SnapshotCreateRepositoryService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository string - masterTimeout string - timeout string - verify *bool - typ string - settings map[string]interface{} - bodyJson interface{} - bodyString string -} - -// NewSnapshotCreateRepositoryService creates a new SnapshotCreateRepositoryService. -func NewSnapshotCreateRepositoryService(client *Client) *SnapshotCreateRepositoryService { - return &SnapshotCreateRepositoryService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotCreateRepositoryService) Pretty(pretty bool) *SnapshotCreateRepositoryService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotCreateRepositoryService) Human(human bool) *SnapshotCreateRepositoryService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotCreateRepositoryService) ErrorTrace(errorTrace bool) *SnapshotCreateRepositoryService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotCreateRepositoryService) FilterPath(filterPath ...string) *SnapshotCreateRepositoryService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotCreateRepositoryService) Header(name string, value string) *SnapshotCreateRepositoryService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotCreateRepositoryService) Headers(headers http.Header) *SnapshotCreateRepositoryService { - s.headers = headers - return s -} - -// Repository is the repository name. -func (s *SnapshotCreateRepositoryService) Repository(repository string) *SnapshotCreateRepositoryService { - s.repository = repository - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *SnapshotCreateRepositoryService) MasterTimeout(masterTimeout string) *SnapshotCreateRepositoryService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout is an explicit operation timeout. -func (s *SnapshotCreateRepositoryService) Timeout(timeout string) *SnapshotCreateRepositoryService { - s.timeout = timeout - return s -} - -// Verify indicates whether to verify the repository after creation. -func (s *SnapshotCreateRepositoryService) Verify(verify bool) *SnapshotCreateRepositoryService { - s.verify = &verify - return s -} - -// Type sets the snapshot repository type, e.g. "fs". -func (s *SnapshotCreateRepositoryService) Type(typ string) *SnapshotCreateRepositoryService { - s.typ = typ - return s -} - -// Settings sets all settings of the snapshot repository. -func (s *SnapshotCreateRepositoryService) Settings(settings map[string]interface{}) *SnapshotCreateRepositoryService { - s.settings = settings - return s -} - -// Setting sets a single settings of the snapshot repository. -func (s *SnapshotCreateRepositoryService) Setting(name string, value interface{}) *SnapshotCreateRepositoryService { - if s.settings == nil { - s.settings = make(map[string]interface{}) - } - s.settings[name] = value - return s -} - -// BodyJson is documented as: The repository definition. -func (s *SnapshotCreateRepositoryService) BodyJson(body interface{}) *SnapshotCreateRepositoryService { - s.bodyJson = body - return s -} - -// BodyString is documented as: The repository definition. -func (s *SnapshotCreateRepositoryService) BodyString(body string) *SnapshotCreateRepositoryService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *SnapshotCreateRepositoryService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_snapshot/{repository}", map[string]string{ - "repository": s.repository, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if v := s.verify; v != nil { - params.Set("verify", fmt.Sprint(*v)) - } - return path, params, nil -} - -// buildBody builds the body for the operation. -func (s *SnapshotCreateRepositoryService) buildBody() (interface{}, error) { - if s.bodyJson != nil { - return s.bodyJson, nil - } - if s.bodyString != "" { - return s.bodyString, nil - } - - body := map[string]interface{}{ - "type": s.typ, - } - if len(s.settings) > 0 { - body["settings"] = s.settings - } - return body, nil -} - -// Validate checks if the operation is valid. -func (s *SnapshotCreateRepositoryService) Validate() error { - var invalid []string - if s.repository == "" { - invalid = append(invalid, "Repository") - } - if s.bodyString == "" && s.bodyJson == nil && len(s.settings) == 0 { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *SnapshotCreateRepositoryService) Do(ctx context.Context) (*SnapshotCreateRepositoryResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - body, err := s.buildBody() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SnapshotCreateRepositoryResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// SnapshotCreateRepositoryResponse is the response of SnapshotCreateRepositoryService.Do. -type SnapshotCreateRepositoryResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_delete.go b/vendor/github.com/olivere/elastic/v7/snapshot_delete.go deleted file mode 100644 index 74cf09b..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_delete.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotDeleteService deletes a snapshot from a snapshot repository. -// It is documented at -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-snapshots.html. -type SnapshotDeleteService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository string - snapshot string -} - -// NewSnapshotDeleteService creates a new SnapshotDeleteService. -func NewSnapshotDeleteService(client *Client) *SnapshotDeleteService { - return &SnapshotDeleteService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotDeleteService) Pretty(pretty bool) *SnapshotDeleteService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotDeleteService) Human(human bool) *SnapshotDeleteService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotDeleteService) ErrorTrace(errorTrace bool) *SnapshotDeleteService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotDeleteService) FilterPath(filterPath ...string) *SnapshotDeleteService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotDeleteService) Header(name string, value string) *SnapshotDeleteService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotDeleteService) Headers(headers http.Header) *SnapshotDeleteService { - s.headers = headers - return s -} - -// Repository is the repository name. -func (s *SnapshotDeleteService) Repository(repository string) *SnapshotDeleteService { - s.repository = repository - return s -} - -// Snapshot is the snapshot name. -func (s *SnapshotDeleteService) Snapshot(snapshot string) *SnapshotDeleteService { - s.snapshot = snapshot - return s -} - -// buildURL builds the URL for the operation. -func (s *SnapshotDeleteService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_snapshot/{repository}/{snapshot}", map[string]string{ - "repository": s.repository, - "snapshot": s.snapshot, - }) - if err != nil { - return "", url.Values{}, err - } - return path, url.Values{}, nil -} - -// Validate checks if the operation is valid. -func (s *SnapshotDeleteService) Validate() error { - var invalid []string - if s.repository == "" { - invalid = append(invalid, "Repository") - } - if s.snapshot == "" { - invalid = append(invalid, "Snapshot") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *SnapshotDeleteService) Do(ctx context.Context) (*SnapshotDeleteResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SnapshotDeleteResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// SnapshotDeleteResponse is the response of SnapshotDeleteService.Do. -type SnapshotDeleteResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_delete_repository.go b/vendor/github.com/olivere/elastic/v7/snapshot_delete_repository.go deleted file mode 100644 index 15b755c..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_delete_repository.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotDeleteRepositoryService deletes a snapshot repository. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-snapshots.html -// for details. -type SnapshotDeleteRepositoryService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository []string - masterTimeout string - timeout string -} - -// NewSnapshotDeleteRepositoryService creates a new SnapshotDeleteRepositoryService. -func NewSnapshotDeleteRepositoryService(client *Client) *SnapshotDeleteRepositoryService { - return &SnapshotDeleteRepositoryService{ - client: client, - repository: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotDeleteRepositoryService) Pretty(pretty bool) *SnapshotDeleteRepositoryService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotDeleteRepositoryService) Human(human bool) *SnapshotDeleteRepositoryService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotDeleteRepositoryService) ErrorTrace(errorTrace bool) *SnapshotDeleteRepositoryService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotDeleteRepositoryService) FilterPath(filterPath ...string) *SnapshotDeleteRepositoryService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotDeleteRepositoryService) Header(name string, value string) *SnapshotDeleteRepositoryService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotDeleteRepositoryService) Headers(headers http.Header) *SnapshotDeleteRepositoryService { - s.headers = headers - return s -} - -// Repository is the list of repository names. -func (s *SnapshotDeleteRepositoryService) Repository(repositories ...string) *SnapshotDeleteRepositoryService { - s.repository = append(s.repository, repositories...) - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *SnapshotDeleteRepositoryService) MasterTimeout(masterTimeout string) *SnapshotDeleteRepositoryService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout is an explicit operation timeout. -func (s *SnapshotDeleteRepositoryService) Timeout(timeout string) *SnapshotDeleteRepositoryService { - s.timeout = timeout - return s -} - -// buildURL builds the URL for the operation. -func (s *SnapshotDeleteRepositoryService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_snapshot/{repository}", map[string]string{ - "repository": strings.Join(s.repository, ","), - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *SnapshotDeleteRepositoryService) Validate() error { - var invalid []string - if len(s.repository) == 0 { - invalid = append(invalid, "Repository") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *SnapshotDeleteRepositoryService) Do(ctx context.Context) (*SnapshotDeleteRepositoryResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SnapshotDeleteRepositoryResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// SnapshotDeleteRepositoryResponse is the response of SnapshotDeleteRepositoryService.Do. -type SnapshotDeleteRepositoryResponse struct { - Acknowledged bool `json:"acknowledged"` - ShardsAcknowledged bool `json:"shards_acknowledged"` - Index string `json:"index,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_get.go b/vendor/github.com/olivere/elastic/v7/snapshot_get.go deleted file mode 100644 index 02cf99e..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_get.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotGetService lists the snapshots on a repository -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-snapshots.html -// for details. -type SnapshotGetService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository string - snapshot []string - masterTimeout string - ignoreUnavailable *bool - verbose *bool -} - -// NewSnapshotGetService creates a new SnapshotGetService. -func NewSnapshotGetService(client *Client) *SnapshotGetService { - return &SnapshotGetService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotGetService) Pretty(pretty bool) *SnapshotGetService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotGetService) Human(human bool) *SnapshotGetService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotGetService) ErrorTrace(errorTrace bool) *SnapshotGetService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotGetService) FilterPath(filterPath ...string) *SnapshotGetService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotGetService) Header(name string, value string) *SnapshotGetService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotGetService) Headers(headers http.Header) *SnapshotGetService { - s.headers = headers - return s -} - -// Repository is the repository name. -func (s *SnapshotGetService) Repository(repository string) *SnapshotGetService { - s.repository = repository - return s -} - -// Snapshot is the list of snapshot names. If not set, defaults to all snapshots. -func (s *SnapshotGetService) Snapshot(snapshots ...string) *SnapshotGetService { - s.snapshot = append(s.snapshot, snapshots...) - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *SnapshotGetService) MasterTimeout(masterTimeout string) *SnapshotGetService { - s.masterTimeout = masterTimeout - return s -} - -// IgnoreUnavailable specifies whether to ignore unavailable snapshots, defaults to false -func (s *SnapshotGetService) IgnoreUnavailable(ignoreUnavailable bool) *SnapshotGetService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Verbose specifies whether to show verbose snapshot info or only show the basic info found in the repository index blob -func (s *SnapshotGetService) Verbose(verbose bool) *SnapshotGetService { - s.verbose = &verbose - return s -} - -// buildURL builds the URL for the operation. -func (s *SnapshotGetService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.snapshot) > 0 { - path, err = uritemplates.Expand("/_snapshot/{repository}/{snapshot}", map[string]string{ - "repository": s.repository, - "snapshot": strings.Join(s.snapshot, ","), - }) - } else { - path, err = uritemplates.Expand("/_snapshot/{repository}/_all", map[string]string{ - "repository": s.repository, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if v := s.ignoreUnavailable; v != nil { - params.Set("ignore_unavailable", fmt.Sprint(*v)) - } - if v := s.verbose; v != nil { - params.Set("verbose", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *SnapshotGetService) Validate() error { - var invalid []string - if s.repository == "" { - invalid = append(invalid, "Repository") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *SnapshotGetService) Do(ctx context.Context) (*SnapshotGetResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SnapshotGetResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// SnapshotGetResponse is the response of SnapshotGetService.Do. -type SnapshotGetResponse struct { - Snapshots []*Snapshot `json:"snapshots"` -} - -// Snapshot contains all information about a single snapshot -type Snapshot struct { - Snapshot string `json:"snapshot"` - UUID string `json:"uuid"` - VersionID int `json:"version_id"` - Version string `json:"version"` - Indices []string `json:"indices"` - State string `json:"state"` - Reason string `json:"reason"` - StartTime time.Time `json:"start_time"` - StartTimeInMillis int64 `json:"start_time_in_millis"` - EndTime time.Time `json:"end_time"` - EndTimeInMillis int64 `json:"end_time_in_millis"` - DurationInMillis int64 `json:"duration_in_millis"` - Failures []SnapshotShardFailure `json:"failures"` - Shards *ShardsInfo `json:"shards"` -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_get_repository.go b/vendor/github.com/olivere/elastic/v7/snapshot_get_repository.go deleted file mode 100644 index 2860356..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_get_repository.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotGetRepositoryService reads a snapshot repository. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-snapshots.html -// for details. -type SnapshotGetRepositoryService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository []string - local *bool - masterTimeout string -} - -// NewSnapshotGetRepositoryService creates a new SnapshotGetRepositoryService. -func NewSnapshotGetRepositoryService(client *Client) *SnapshotGetRepositoryService { - return &SnapshotGetRepositoryService{ - client: client, - repository: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotGetRepositoryService) Pretty(pretty bool) *SnapshotGetRepositoryService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotGetRepositoryService) Human(human bool) *SnapshotGetRepositoryService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotGetRepositoryService) ErrorTrace(errorTrace bool) *SnapshotGetRepositoryService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotGetRepositoryService) FilterPath(filterPath ...string) *SnapshotGetRepositoryService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotGetRepositoryService) Header(name string, value string) *SnapshotGetRepositoryService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotGetRepositoryService) Headers(headers http.Header) *SnapshotGetRepositoryService { - s.headers = headers - return s -} - -// Repository is the list of repository names. -func (s *SnapshotGetRepositoryService) Repository(repositories ...string) *SnapshotGetRepositoryService { - s.repository = append(s.repository, repositories...) - return s -} - -// Local indicates whether to return local information, i.e. do not retrieve the state from master node (default: false). -func (s *SnapshotGetRepositoryService) Local(local bool) *SnapshotGetRepositoryService { - s.local = &local - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *SnapshotGetRepositoryService) MasterTimeout(masterTimeout string) *SnapshotGetRepositoryService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *SnapshotGetRepositoryService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.repository) > 0 { - path, err = uritemplates.Expand("/_snapshot/{repository}", map[string]string{ - "repository": strings.Join(s.repository, ","), - }) - } else { - path = "/_snapshot" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *SnapshotGetRepositoryService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *SnapshotGetRepositoryService) Do(ctx context.Context) (SnapshotGetRepositoryResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret SnapshotGetRepositoryResponse - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// SnapshotGetRepositoryResponse is the response of SnapshotGetRepositoryService.Do. -type SnapshotGetRepositoryResponse map[string]*SnapshotRepositoryMetaData - -// SnapshotRepositoryMetaData contains all information about -// a single snapshot repository. -type SnapshotRepositoryMetaData struct { - Type string `json:"type"` - Settings map[string]interface{} `json:"settings,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_restore.go b/vendor/github.com/olivere/elastic/v7/snapshot_restore.go deleted file mode 100644 index 1d0b8c0..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_restore.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotRestoreService restores a snapshot from a snapshot repository. -// -// It is documented at -// https://www.elastic.co/guide/en/elasticsearch/reference/7.1/modules-snapshots.html#_restore. -type SnapshotRestoreService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository string - snapshot string - masterTimeout string - waitForCompletion *bool - ignoreUnavailable *bool - partial *bool - includeAliases *bool - includeGlobalState *bool - bodyString string - renamePattern string - renameReplacement string - indices []string - indexSettings map[string]interface{} -} - -// NewSnapshotCreateService creates a new SnapshotRestoreService. -func NewSnapshotRestoreService(client *Client) *SnapshotRestoreService { - return &SnapshotRestoreService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotRestoreService) Pretty(pretty bool) *SnapshotRestoreService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotRestoreService) Human(human bool) *SnapshotRestoreService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotRestoreService) ErrorTrace(errorTrace bool) *SnapshotRestoreService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotRestoreService) FilterPath(filterPath ...string) *SnapshotRestoreService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotRestoreService) Header(name string, value string) *SnapshotRestoreService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotRestoreService) Headers(headers http.Header) *SnapshotRestoreService { - s.headers = headers - return s -} - -// Repository name. -func (s *SnapshotRestoreService) Repository(repository string) *SnapshotRestoreService { - s.repository = repository - return s -} - -// Snapshot name. -func (s *SnapshotRestoreService) Snapshot(snapshot string) *SnapshotRestoreService { - s.snapshot = snapshot - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *SnapshotRestoreService) MasterTimeout(masterTimeout string) *SnapshotRestoreService { - s.masterTimeout = masterTimeout - return s -} - -// WaitForCompletion indicates whether this request should wait until the operation has -// completed before returning. -func (s *SnapshotRestoreService) WaitForCompletion(waitForCompletion bool) *SnapshotRestoreService { - s.waitForCompletion = &waitForCompletion - return s -} - -// Indices sets the name of the indices that should be restored from the snapshot. -func (s *SnapshotRestoreService) Indices(indices ...string) *SnapshotRestoreService { - s.indices = indices - return s -} - -// IncludeGlobalState allows the global cluster state to be restored, defaults to false. -func (s *SnapshotRestoreService) IncludeGlobalState(includeGlobalState bool) *SnapshotRestoreService { - s.includeGlobalState = &includeGlobalState - return s -} - -// RenamePattern helps rename indices on restore using regular expressions. -func (s *SnapshotRestoreService) RenamePattern(renamePattern string) *SnapshotRestoreService { - s.renamePattern = renamePattern - return s -} - -// RenameReplacement as RenamePattern, helps rename indices on restore using regular expressions. -func (s *SnapshotRestoreService) RenameReplacement(renameReplacement string) *SnapshotRestoreService { - s.renameReplacement = renameReplacement - return s -} - -// Partial indicates whether to restore indices that where partially snapshoted, defaults to false. -func (s *SnapshotRestoreService) Partial(partial bool) *SnapshotRestoreService { - s.partial = &partial - return s -} - -// BodyString allows the user to specify the body of the HTTP request manually. -func (s *SnapshotRestoreService) BodyString(body string) *SnapshotRestoreService { - s.bodyString = body - return s -} - -// IndexSettings sets the settings to be overwritten during the restore process -func (s *SnapshotRestoreService) IndexSettings(indexSettings map[string]interface{}) *SnapshotRestoreService { - s.indexSettings = indexSettings - return s -} - -// IncludeAliases flags whether indices should be restored with their respective aliases, -// defaults to false. -func (s *SnapshotRestoreService) IncludeAliases(includeAliases bool) *SnapshotRestoreService { - s.includeAliases = &includeAliases - return s -} - -// IgnoreUnavailable specifies whether to ignore unavailable snapshots, defaults to false. -func (s *SnapshotRestoreService) IgnoreUnavailable(ignoreUnavailable bool) *SnapshotRestoreService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Do executes the operation. -func (s *SnapshotRestoreService) Do(ctx context.Context) (*SnapshotRestoreResponse, error) { - if err := s.Validate(); err != nil { - return nil, err - } - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - var body interface{} - if len(s.bodyString) > 0 { - body = s.bodyString - } else { - body = s.buildBody() - } - - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - ret := new(SnapshotRestoreResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// Validate checks if the operation is valid. -func (s *SnapshotRestoreService) Validate() error { - var invalid []string - if s.repository == "" { - invalid = append(invalid, "Repository") - } - if s.snapshot == "" { - invalid = append(invalid, "Snapshot") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -func (s *SnapshotRestoreService) buildURL() (string, url.Values, error) { - path, err := uritemplates.Expand("/_snapshot/{repository}/{snapshot}/_restore", map[string]string{ - "snapshot": s.snapshot, - "repository": s.repository, - }) - if err != nil { - return "", url.Values{}, err - } - - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if v := s.waitForCompletion; v != nil { - params.Set("wait_for_completion", fmt.Sprint(*v)) - } - if v := s.ignoreUnavailable; v != nil { - params.Set("ignore_unavailable", fmt.Sprint(*v)) - } - return path, params, nil -} - -func (s *SnapshotRestoreService) buildBody() interface{} { - body := map[string]interface{}{} - - if s.includeGlobalState != nil { - body["include_global_state"] = *s.includeGlobalState - } - if s.partial != nil { - body["partial"] = *s.partial - } - if s.includeAliases != nil { - body["include_aliases"] = *s.includeAliases - } - if len(s.indices) > 0 { - body["indices"] = strings.Join(s.indices, ",") - } - if len(s.renamePattern) > 0 { - body["rename_pattern"] = s.renamePattern - } - if len(s.renamePattern) > 0 { - body["rename_replacement"] = s.renameReplacement - } - if len(s.indexSettings) > 0 { - body["index_settings"] = s.indexSettings - } - return body -} - -// SnapshotRestoreResponse represents the response for SnapshotRestoreService.Do -type SnapshotRestoreResponse struct { - // Accepted indicates whether the request was accepted by Elasticsearch. - Accepted *bool `json:"accepted"` - - // Snapshot information. - Snapshot *RestoreInfo `json:"snapshot"` -} - -// RestoreInfo represents information about the restored snapshot. -type RestoreInfo struct { - Snapshot string `json:"snapshot"` - Indices []string `json:"indices"` - Shards ShardsInfo `json:"shards"` -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_status.go b/vendor/github.com/olivere/elastic/v7/snapshot_status.go deleted file mode 100644 index c03ca37..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_status.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotStatusService returns information about the status of a snapshot. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/modules-snapshots.html -// for details. -type SnapshotStatusService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository string - snapshot []string - masterTimeout string - ignoreUnavailable *bool -} - -// NewSnapshotStatusService creates a new SnapshotStatusService. -func NewSnapshotStatusService(client *Client) *SnapshotStatusService { - return &SnapshotStatusService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotStatusService) Pretty(pretty bool) *SnapshotStatusService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotStatusService) Human(human bool) *SnapshotStatusService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotStatusService) ErrorTrace(errorTrace bool) *SnapshotStatusService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotStatusService) FilterPath(filterPath ...string) *SnapshotStatusService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotStatusService) Header(name string, value string) *SnapshotStatusService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotStatusService) Headers(headers http.Header) *SnapshotStatusService { - s.headers = headers - return s -} - -// Repository is the repository name. -func (s *SnapshotStatusService) Repository(repository string) *SnapshotStatusService { - s.repository = repository - return s -} - -// Snapshot is the list of snapshot names. If not set, defaults to all snapshots. -func (s *SnapshotStatusService) Snapshot(snapshots ...string) *SnapshotStatusService { - s.snapshot = append(s.snapshot, snapshots...) - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *SnapshotStatusService) MasterTimeout(masterTimeout string) *SnapshotStatusService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *SnapshotStatusService) buildURL() (string, url.Values, error) { - var err error - var path string - - if s.repository != "" { - if len(s.snapshot) > 0 { - path, err = uritemplates.Expand("/_snapshot/{repository}/{snapshot}/_status", map[string]string{ - "repository": s.repository, - "snapshot": strings.Join(s.snapshot, ","), - }) - } else { - path, err = uritemplates.Expand("/_snapshot/{repository}/_status", map[string]string{ - "repository": s.repository, - }) - } - } else { - path, err = uritemplates.Expand("/_snapshot/_status", nil) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if v := s.ignoreUnavailable; v != nil { - params.Set("ignore_unavailable", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -// -// Validation only fails if snapshot names were provided but no repository was -// provided. -func (s *SnapshotStatusService) Validate() error { - if len(s.snapshot) > 0 && s.repository == "" { - return fmt.Errorf("snapshots were specified but repository is missing") - } - return nil -} - -// Do executes the operation. -func (s *SnapshotStatusService) Do(ctx context.Context) (*SnapshotStatusResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SnapshotStatusResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -type SnapshotStatusResponse struct { - Snapshots []SnapshotStatus `json:"snapshots"` -} - -type SnapshotStatus struct { - Snapshot string `json:"snapshot"` - Repository string `json:"repository"` - UUID string `json:"uuid"` - State string `json:"state"` - IncludeGlobalState bool `json:"include_global_state"` - ShardsStats SnapshotShardsStats `json:"shards_stats"` - Stats SnapshotStats `json:"stats"` - Indices map[string]SnapshotIndexStatus `json:"indices"` -} - -type SnapshotShardsStats struct { - Initializing int `json:"initializing"` - Started int `json:"started"` - Finalizing int `json:"finalizing"` - Done int `json:"done"` - Failed int `json:"failed"` - Total int `json:"total"` -} - -type SnapshotStats struct { - Incremental struct { - FileCount int `json:"file_count"` - Size string `json:"size"` - SizeInBytes int64 `json:"size_in_bytes"` - } `json:"incremental"` - - Processed struct { - FileCount int `json:"file_count"` - Size string `json:"size"` - SizeInBytes int64 `json:"size_in_bytes"` - } `json:"processed"` - - Total struct { - FileCount int `json:"file_count"` - Size string `json:"size"` - SizeInBytes int64 `json:"size_in_bytes"` - } `json:"total"` - - StartTime string `json:"start_time"` - StartTimeInMillis int64 `json:"start_time_in_millis"` - - Time string `json:"time"` - TimeInMillis int64 `json:"time_in_millis"` - - NumberOfFiles int `json:"number_of_files"` - ProcessedFiles int `json:"processed_files"` - - TotalSize string `json:"total_size"` - TotalSizeInBytes int64 `json:"total_size_in_bytes"` -} - -type SnapshotIndexStatus struct { - ShardsStats SnapshotShardsStats `json:"shards_stats"` - Stats SnapshotStats `json:"stats"` - Shards map[string]SnapshotIndexShardStatus `json:"shards"` -} - -type SnapshotIndexShardStatus struct { - Stage string `json:"stage"` // initializing, started, finalize, done, or failed - Stats SnapshotStats `json:"stats"` - Node string `json:"node"` - Reason string `json:"reason"` // reason for failure -} diff --git a/vendor/github.com/olivere/elastic/v7/snapshot_verify_repository.go b/vendor/github.com/olivere/elastic/v7/snapshot_verify_repository.go deleted file mode 100644 index ca8eab2..0000000 --- a/vendor/github.com/olivere/elastic/v7/snapshot_verify_repository.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// SnapshotVerifyRepositoryService verifies a snapshop repository. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-snapshots.html -// for details. -type SnapshotVerifyRepositoryService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - repository string - masterTimeout string - timeout string -} - -// NewSnapshotVerifyRepositoryService creates a new SnapshotVerifyRepositoryService. -func NewSnapshotVerifyRepositoryService(client *Client) *SnapshotVerifyRepositoryService { - return &SnapshotVerifyRepositoryService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *SnapshotVerifyRepositoryService) Pretty(pretty bool) *SnapshotVerifyRepositoryService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *SnapshotVerifyRepositoryService) Human(human bool) *SnapshotVerifyRepositoryService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *SnapshotVerifyRepositoryService) ErrorTrace(errorTrace bool) *SnapshotVerifyRepositoryService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *SnapshotVerifyRepositoryService) FilterPath(filterPath ...string) *SnapshotVerifyRepositoryService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *SnapshotVerifyRepositoryService) Header(name string, value string) *SnapshotVerifyRepositoryService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *SnapshotVerifyRepositoryService) Headers(headers http.Header) *SnapshotVerifyRepositoryService { - s.headers = headers - return s -} - -// Repository specifies the repository name. -func (s *SnapshotVerifyRepositoryService) Repository(repository string) *SnapshotVerifyRepositoryService { - s.repository = repository - return s -} - -// MasterTimeout is the explicit operation timeout for connection to master node. -func (s *SnapshotVerifyRepositoryService) MasterTimeout(masterTimeout string) *SnapshotVerifyRepositoryService { - s.masterTimeout = masterTimeout - return s -} - -// Timeout is an explicit operation timeout. -func (s *SnapshotVerifyRepositoryService) Timeout(timeout string) *SnapshotVerifyRepositoryService { - s.timeout = timeout - return s -} - -// buildURL builds the URL for the operation. -func (s *SnapshotVerifyRepositoryService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_snapshot/{repository}/_verify", map[string]string{ - "repository": s.repository, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *SnapshotVerifyRepositoryService) Validate() error { - var invalid []string - if s.repository == "" { - invalid = append(invalid, "Repository") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *SnapshotVerifyRepositoryService) Do(ctx context.Context) (*SnapshotVerifyRepositoryResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(SnapshotVerifyRepositoryResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// SnapshotVerifyRepositoryResponse is the response of SnapshotVerifyRepositoryService.Do. -type SnapshotVerifyRepositoryResponse struct { - Nodes map[string]*SnapshotVerifyRepositoryNode `json:"nodes"` -} - -type SnapshotVerifyRepositoryNode struct { - Name string `json:"name"` -} diff --git a/vendor/github.com/olivere/elastic/v7/sort.go b/vendor/github.com/olivere/elastic/v7/sort.go deleted file mode 100644 index 60cc0a1..0000000 --- a/vendor/github.com/olivere/elastic/v7/sort.go +++ /dev/null @@ -1,654 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// -- Sorter -- - -// Sorter is an interface for sorting strategies, e.g. ScoreSort or FieldSort. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-sort.html. -type Sorter interface { - Source() (interface{}, error) -} - -// -- SortInfo -- - -// SortInfo contains information about sorting a field. -type SortInfo struct { - Sorter - Field string - Ascending bool - Missing interface{} - IgnoreUnmapped *bool - UnmappedType string - SortMode string - NestedFilter Query // deprecated in 6.1 and replaced by Filter - Filter Query - NestedPath string // deprecated in 6.1 and replaced by Path - Path string - NestedSort *NestedSort // deprecated in 6.1 and replaced by Nested - Nested *NestedSort -} - -func (info SortInfo) Source() (interface{}, error) { - prop := make(map[string]interface{}) - if info.Ascending { - prop["order"] = "asc" - } else { - prop["order"] = "desc" - } - if info.Missing != nil { - prop["missing"] = info.Missing - } - if info.IgnoreUnmapped != nil { - prop["ignore_unmapped"] = *info.IgnoreUnmapped - } - if info.UnmappedType != "" { - prop["unmapped_type"] = info.UnmappedType - } - if info.SortMode != "" { - prop["mode"] = info.SortMode - } - if info.Filter != nil { - src, err := info.Filter.Source() - if err != nil { - return nil, err - } - prop["filter"] = src - } else if info.NestedFilter != nil { - src, err := info.NestedFilter.Source() - if err != nil { - return nil, err - } - prop["nested_filter"] = src // deprecated in 6.1 - } - if info.Path != "" { - prop["path"] = info.Path - } else if info.NestedPath != "" { - prop["nested_path"] = info.NestedPath // deprecated in 6.1 - } - if info.Nested != nil { - src, err := info.Nested.Source() - if err != nil { - return nil, err - } - prop["nested"] = src - } else if info.NestedSort != nil { - src, err := info.NestedSort.Source() - if err != nil { - return nil, err - } - prop["nested"] = src - } - source := make(map[string]interface{}) - source[info.Field] = prop - return source, nil -} - -// -- SortByDoc -- - -// SortByDoc sorts by the "_doc" field, as described in -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-scroll.html. -// -// Example: -// ss := elastic.NewSearchSource() -// ss = ss.SortBy(elastic.SortByDoc{}) -type SortByDoc struct { - Sorter -} - -// Source returns the JSON-serializable data. -func (s SortByDoc) Source() (interface{}, error) { - return "_doc", nil -} - -// -- ScoreSort -- - -// ScoreSort sorts by relevancy score. -type ScoreSort struct { - Sorter - ascending bool -} - -// NewScoreSort creates a new ScoreSort. -func NewScoreSort() *ScoreSort { - return &ScoreSort{ascending: false} // Descending by default! -} - -// Order defines whether sorting ascending (default) or descending. -func (s *ScoreSort) Order(ascending bool) *ScoreSort { - s.ascending = ascending - return s -} - -// Asc sets ascending sort order. -func (s *ScoreSort) Asc() *ScoreSort { - s.ascending = true - return s -} - -// Desc sets descending sort order. -func (s *ScoreSort) Desc() *ScoreSort { - s.ascending = false - return s -} - -// Source returns the JSON-serializable data. -func (s *ScoreSort) Source() (interface{}, error) { - source := make(map[string]interface{}) - x := make(map[string]interface{}) - source["_score"] = x - if s.ascending { - x["order"] = "asc" - } else { - x["order"] = "desc" - } - return source, nil -} - -// -- FieldSort -- - -// FieldSort sorts by a given field. -type FieldSort struct { - Sorter - fieldName string - ascending bool - missing interface{} - unmappedType *string - sortMode *string - filter Query - path *string - nested *NestedSort -} - -// NewFieldSort creates a new FieldSort. -func NewFieldSort(fieldName string) *FieldSort { - return &FieldSort{ - fieldName: fieldName, - ascending: true, - } -} - -// FieldName specifies the name of the field to be used for sorting. -func (s *FieldSort) FieldName(fieldName string) *FieldSort { - s.fieldName = fieldName - return s -} - -// Order defines whether sorting ascending (default) or descending. -func (s *FieldSort) Order(ascending bool) *FieldSort { - s.ascending = ascending - return s -} - -// Asc sets ascending sort order. -func (s *FieldSort) Asc() *FieldSort { - s.ascending = true - return s -} - -// Desc sets descending sort order. -func (s *FieldSort) Desc() *FieldSort { - s.ascending = false - return s -} - -// Missing sets the value to be used when a field is missing in a document. -// You can also use "_last" or "_first" to sort missing last or first -// respectively. -func (s *FieldSort) Missing(missing interface{}) *FieldSort { - s.missing = missing - return s -} - -// UnmappedType sets the type to use when the current field is not mapped -// in an index. -func (s *FieldSort) UnmappedType(typ string) *FieldSort { - s.unmappedType = &typ - return s -} - -// SortMode specifies what values to pick in case a document contains -// multiple values for the targeted sort field. Possible values are: -// min, max, sum, and avg. -func (s *FieldSort) SortMode(sortMode string) *FieldSort { - s.sortMode = &sortMode - return s -} - -// NestedFilter sets a filter that nested objects should match with -// in order to be taken into account for sorting. -// Deprecated: Use Filter instead. -func (s *FieldSort) NestedFilter(nestedFilter Query) *FieldSort { - s.filter = nestedFilter - return s -} - -// Filter sets a filter that nested objects should match with -// in order to be taken into account for sorting. -func (s *FieldSort) Filter(filter Query) *FieldSort { - s.filter = filter - return s -} - -// NestedPath is used if sorting occurs on a field that is inside a -// nested object. -// Deprecated: Use Path instead. -func (s *FieldSort) NestedPath(nestedPath string) *FieldSort { - s.path = &nestedPath - return s -} - -// Path is used if sorting occurs on a field that is inside a -// nested object. -func (s *FieldSort) Path(path string) *FieldSort { - s.path = &path - return s -} - -// NestedSort is available starting with 6.1 and will replace NestedFilter -// and NestedPath. -// Deprecated: Use Nested instead. -func (s *FieldSort) NestedSort(nestedSort *NestedSort) *FieldSort { - s.nested = nestedSort - return s -} - -// Nested is available starting with 6.1 and will replace Filter and Path. -func (s *FieldSort) Nested(nested *NestedSort) *FieldSort { - s.nested = nested - return s -} - -// Source returns the JSON-serializable data. -func (s *FieldSort) Source() (interface{}, error) { - source := make(map[string]interface{}) - x := make(map[string]interface{}) - source[s.fieldName] = x - if s.ascending { - x["order"] = "asc" - } else { - x["order"] = "desc" - } - if s.missing != nil { - x["missing"] = s.missing - } - if s.unmappedType != nil { - x["unmapped_type"] = *s.unmappedType - } - if s.sortMode != nil { - x["mode"] = *s.sortMode - } - if s.filter != nil { - src, err := s.filter.Source() - if err != nil { - return nil, err - } - x["filter"] = src - } - if s.path != nil { - x["path"] = *s.path - } - if s.nested != nil { - src, err := s.nested.Source() - if err != nil { - return nil, err - } - x["nested"] = src - } - return source, nil -} - -// -- GeoDistanceSort -- - -// GeoDistanceSort allows for sorting by geographic distance. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-sort.html#_geo_distance_sorting. -type GeoDistanceSort struct { - Sorter - fieldName string - points []*GeoPoint - geohashes []string - distanceType *string - unit string - ascending bool - sortMode *string - nestedFilter Query - nestedPath *string - nestedSort *NestedSort -} - -// NewGeoDistanceSort creates a new sorter for geo distances. -func NewGeoDistanceSort(fieldName string) *GeoDistanceSort { - return &GeoDistanceSort{ - fieldName: fieldName, - ascending: true, - } -} - -// FieldName specifies the name of the (geo) field to use for sorting. -func (s *GeoDistanceSort) FieldName(fieldName string) *GeoDistanceSort { - s.fieldName = fieldName - return s -} - -// Order defines whether sorting ascending (default) or descending. -func (s *GeoDistanceSort) Order(ascending bool) *GeoDistanceSort { - s.ascending = ascending - return s -} - -// Asc sets ascending sort order. -func (s *GeoDistanceSort) Asc() *GeoDistanceSort { - s.ascending = true - return s -} - -// Desc sets descending sort order. -func (s *GeoDistanceSort) Desc() *GeoDistanceSort { - s.ascending = false - return s -} - -// Point specifies a point to create the range distance aggregations from. -func (s *GeoDistanceSort) Point(lat, lon float64) *GeoDistanceSort { - s.points = append(s.points, GeoPointFromLatLon(lat, lon)) - return s -} - -// Points specifies the geo point(s) to create the range distance aggregations from. -func (s *GeoDistanceSort) Points(points ...*GeoPoint) *GeoDistanceSort { - s.points = append(s.points, points...) - return s -} - -// GeoHashes specifies the geo point to create the range distance aggregations from. -func (s *GeoDistanceSort) GeoHashes(geohashes ...string) *GeoDistanceSort { - s.geohashes = append(s.geohashes, geohashes...) - return s -} - -// Unit specifies the distance unit to use. It defaults to km. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/common-options.html#distance-units -// for details. -func (s *GeoDistanceSort) Unit(unit string) *GeoDistanceSort { - s.unit = unit - return s -} - -// GeoDistance is an alias for DistanceType. -func (s *GeoDistanceSort) GeoDistance(geoDistance string) *GeoDistanceSort { - return s.DistanceType(geoDistance) -} - -// DistanceType describes how to compute the distance, e.g. "arc" or "plane". -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-request-sort.html#geo-sorting -// for details. -func (s *GeoDistanceSort) DistanceType(distanceType string) *GeoDistanceSort { - s.distanceType = &distanceType - return s -} - -// SortMode specifies what values to pick in case a document contains -// multiple values for the targeted sort field. Possible values are: -// min, max, sum, and avg. -func (s *GeoDistanceSort) SortMode(sortMode string) *GeoDistanceSort { - s.sortMode = &sortMode - return s -} - -// NestedFilter sets a filter that nested objects should match with -// in order to be taken into account for sorting. -func (s *GeoDistanceSort) NestedFilter(nestedFilter Query) *GeoDistanceSort { - s.nestedFilter = nestedFilter - return s -} - -// NestedPath is used if sorting occurs on a field that is inside a -// nested object. -func (s *GeoDistanceSort) NestedPath(nestedPath string) *GeoDistanceSort { - s.nestedPath = &nestedPath - return s -} - -// NestedSort is available starting with 6.1 and will replace NestedFilter -// and NestedPath. -func (s *GeoDistanceSort) NestedSort(nestedSort *NestedSort) *GeoDistanceSort { - s.nestedSort = nestedSort - return s -} - -// Source returns the JSON-serializable data. -func (s *GeoDistanceSort) Source() (interface{}, error) { - source := make(map[string]interface{}) - x := make(map[string]interface{}) - source["_geo_distance"] = x - - // Points - var ptarr []interface{} - for _, pt := range s.points { - ptarr = append(ptarr, pt.Source()) - } - for _, geohash := range s.geohashes { - ptarr = append(ptarr, geohash) - } - x[s.fieldName] = ptarr - - if s.unit != "" { - x["unit"] = s.unit - } - if s.distanceType != nil { - x["distance_type"] = *s.distanceType - } - - if s.ascending { - x["order"] = "asc" - } else { - x["order"] = "desc" - } - if s.sortMode != nil { - x["mode"] = *s.sortMode - } - if s.nestedFilter != nil { - src, err := s.nestedFilter.Source() - if err != nil { - return nil, err - } - x["nested_filter"] = src - } - if s.nestedPath != nil { - x["nested_path"] = *s.nestedPath - } - if s.nestedSort != nil { - src, err := s.nestedSort.Source() - if err != nil { - return nil, err - } - x["nested"] = src - } - return source, nil -} - -// -- ScriptSort -- - -// ScriptSort sorts by a custom script. See -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/modules-scripting.html#modules-scripting -// for details about scripting. -type ScriptSort struct { - Sorter - script *Script - typ string - ascending bool - sortMode *string - nestedFilter Query - nestedPath *string - nestedSort *NestedSort -} - -// NewScriptSort creates and initializes a new ScriptSort. -// You must provide a script and a type, e.g. "string" or "number". -func NewScriptSort(script *Script, typ string) *ScriptSort { - return &ScriptSort{ - script: script, - typ: typ, - ascending: true, - } -} - -// Type sets the script type, which can be either "string" or "number". -func (s *ScriptSort) Type(typ string) *ScriptSort { - s.typ = typ - return s -} - -// Order defines whether sorting ascending (default) or descending. -func (s *ScriptSort) Order(ascending bool) *ScriptSort { - s.ascending = ascending - return s -} - -// Asc sets ascending sort order. -func (s *ScriptSort) Asc() *ScriptSort { - s.ascending = true - return s -} - -// Desc sets descending sort order. -func (s *ScriptSort) Desc() *ScriptSort { - s.ascending = false - return s -} - -// SortMode specifies what values to pick in case a document contains -// multiple values for the targeted sort field. Possible values are: -// min or max. -func (s *ScriptSort) SortMode(sortMode string) *ScriptSort { - s.sortMode = &sortMode - return s -} - -// NestedFilter sets a filter that nested objects should match with -// in order to be taken into account for sorting. -func (s *ScriptSort) NestedFilter(nestedFilter Query) *ScriptSort { - s.nestedFilter = nestedFilter - return s -} - -// NestedPath is used if sorting occurs on a field that is inside a -// nested object. -func (s *ScriptSort) NestedPath(nestedPath string) *ScriptSort { - s.nestedPath = &nestedPath - return s -} - -// NestedSort is available starting with 6.1 and will replace NestedFilter -// and NestedPath. -func (s *ScriptSort) NestedSort(nestedSort *NestedSort) *ScriptSort { - s.nestedSort = nestedSort - return s -} - -// Source returns the JSON-serializable data. -func (s *ScriptSort) Source() (interface{}, error) { - if s.script == nil { - return nil, errors.New("ScriptSort expected a script") - } - source := make(map[string]interface{}) - x := make(map[string]interface{}) - source["_script"] = x - - src, err := s.script.Source() - if err != nil { - return nil, err - } - x["script"] = src - - x["type"] = s.typ - - if s.ascending { - x["order"] = "asc" - } else { - x["order"] = "desc" - } - if s.sortMode != nil { - x["mode"] = *s.sortMode - } - if s.nestedFilter != nil { - src, err := s.nestedFilter.Source() - if err != nil { - return nil, err - } - x["nested_filter"] = src - } - if s.nestedPath != nil { - x["nested_path"] = *s.nestedPath - } - if s.nestedSort != nil { - src, err := s.nestedSort.Source() - if err != nil { - return nil, err - } - x["nested"] = src - } - return source, nil -} - -// -- NestedSort -- - -// NestedSort is used for fields that are inside a nested object. -// It takes a "path" argument and an optional nested filter that the -// nested objects should match with in order to be taken into account -// for sorting. -// -// NestedSort is available from 6.1 and replaces nestedFilter and nestedPath -// in the other sorters. -type NestedSort struct { - Sorter - path string - filter Query - nestedSort *NestedSort -} - -// NewNestedSort creates a new NestedSort. -func NewNestedSort(path string) *NestedSort { - return &NestedSort{path: path} -} - -// Filter sets the filter. -func (s *NestedSort) Filter(filter Query) *NestedSort { - s.filter = filter - return s -} - -// NestedSort embeds another level of nested sorting. -func (s *NestedSort) NestedSort(nestedSort *NestedSort) *NestedSort { - s.nestedSort = nestedSort - return s -} - -// Source returns the JSON-serializable data. -func (s *NestedSort) Source() (interface{}, error) { - source := make(map[string]interface{}) - - if s.path != "" { - source["path"] = s.path - } - if s.filter != nil { - src, err := s.filter.Source() - if err != nil { - return nil, err - } - source["filter"] = src - } - if s.nestedSort != nil { - src, err := s.nestedSort.Source() - if err != nil { - return nil, err - } - source["nested"] = src - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/suggest_field.go b/vendor/github.com/olivere/elastic/v7/suggest_field.go deleted file mode 100644 index 8405a6f..0000000 --- a/vendor/github.com/olivere/elastic/v7/suggest_field.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "encoding/json" - "errors" -) - -// SuggestField can be used by the caller to specify a suggest field -// at index time. For a detailed example, see e.g. -// https://www.elastic.co/blog/you-complete-me. -type SuggestField struct { - inputs []string - weight int - contextQueries []SuggesterContextQuery -} - -func NewSuggestField(input ...string) *SuggestField { - return &SuggestField{ - inputs: input, - weight: -1, - } -} - -func (f *SuggestField) Input(input ...string) *SuggestField { - if f.inputs == nil { - f.inputs = make([]string, 0) - } - f.inputs = append(f.inputs, input...) - return f -} - -func (f *SuggestField) Weight(weight int) *SuggestField { - f.weight = weight - return f -} - -func (f *SuggestField) ContextQuery(queries ...SuggesterContextQuery) *SuggestField { - f.contextQueries = append(f.contextQueries, queries...) - return f -} - -// MarshalJSON encodes SuggestField into JSON. -func (f *SuggestField) MarshalJSON() ([]byte, error) { - source := make(map[string]interface{}) - - if f.inputs != nil { - switch len(f.inputs) { - case 1: - source["input"] = f.inputs[0] - default: - source["input"] = f.inputs - } - } - - if f.weight >= 0 { - source["weight"] = f.weight - } - - switch len(f.contextQueries) { - case 0: - case 1: - src, err := f.contextQueries[0].Source() - if err != nil { - return nil, err - } - source["contexts"] = src - default: - ctxq := make(map[string]interface{}) - for _, query := range f.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - m, ok := src.(map[string]interface{}) - if !ok { - return nil, errors.New("SuggesterContextQuery must be of type map[string]interface{}") - } - for k, v := range m { - ctxq[k] = v - } - } - source["contexts"] = ctxq - } - - return json.Marshal(source) -} diff --git a/vendor/github.com/olivere/elastic/v7/suggester.go b/vendor/github.com/olivere/elastic/v7/suggester.go deleted file mode 100644 index f7dc48f..0000000 --- a/vendor/github.com/olivere/elastic/v7/suggester.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// Represents the generic suggester interface. -// A suggester's only purpose is to return the -// source of the query as a JSON-serializable -// object. Returning a map[string]interface{} -// will do. -type Suggester interface { - Name() string - Source(includeName bool) (interface{}, error) -} diff --git a/vendor/github.com/olivere/elastic/v7/suggester_completion.go b/vendor/github.com/olivere/elastic/v7/suggester_completion.go deleted file mode 100644 index 668f0c2..0000000 --- a/vendor/github.com/olivere/elastic/v7/suggester_completion.go +++ /dev/null @@ -1,358 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// CompletionSuggester is a fast suggester for e.g. type-ahead completion. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-completion.html -// for more details. -type CompletionSuggester struct { - Suggester - name string - text string - prefix string - regex string - field string - analyzer string - size *int - shardSize *int - contextQueries []SuggesterContextQuery - - fuzzyOptions *FuzzyCompletionSuggesterOptions - regexOptions *RegexCompletionSuggesterOptions - skipDuplicates *bool -} - -// Creates a new completion suggester. -func NewCompletionSuggester(name string) *CompletionSuggester { - return &CompletionSuggester{ - name: name, - } -} - -func (q *CompletionSuggester) Name() string { - return q.name -} - -func (q *CompletionSuggester) Text(text string) *CompletionSuggester { - q.text = text - return q -} - -func (q *CompletionSuggester) Prefix(prefix string) *CompletionSuggester { - q.prefix = prefix - return q -} - -func (q *CompletionSuggester) PrefixWithEditDistance(prefix string, editDistance interface{}) *CompletionSuggester { - q.prefix = prefix - q.fuzzyOptions = NewFuzzyCompletionSuggesterOptions().EditDistance(editDistance) - return q -} - -func (q *CompletionSuggester) PrefixWithOptions(prefix string, options *FuzzyCompletionSuggesterOptions) *CompletionSuggester { - q.prefix = prefix - q.fuzzyOptions = options - return q -} - -func (q *CompletionSuggester) FuzzyOptions(options *FuzzyCompletionSuggesterOptions) *CompletionSuggester { - q.fuzzyOptions = options - return q -} - -func (q *CompletionSuggester) Fuzziness(fuzziness interface{}) *CompletionSuggester { - if q.fuzzyOptions == nil { - q.fuzzyOptions = NewFuzzyCompletionSuggesterOptions() - } - q.fuzzyOptions = q.fuzzyOptions.EditDistance(fuzziness) - return q -} - -func (q *CompletionSuggester) Regex(regex string) *CompletionSuggester { - q.regex = regex - return q -} - -func (q *CompletionSuggester) RegexWithOptions(regex string, options *RegexCompletionSuggesterOptions) *CompletionSuggester { - q.regex = regex - q.regexOptions = options - return q -} - -func (q *CompletionSuggester) RegexOptions(options *RegexCompletionSuggesterOptions) *CompletionSuggester { - q.regexOptions = options - return q -} - -func (q *CompletionSuggester) SkipDuplicates(skipDuplicates bool) *CompletionSuggester { - q.skipDuplicates = &skipDuplicates - return q -} - -func (q *CompletionSuggester) Field(field string) *CompletionSuggester { - q.field = field - return q -} - -func (q *CompletionSuggester) Analyzer(analyzer string) *CompletionSuggester { - q.analyzer = analyzer - return q -} - -func (q *CompletionSuggester) Size(size int) *CompletionSuggester { - q.size = &size - return q -} - -func (q *CompletionSuggester) ShardSize(shardSize int) *CompletionSuggester { - q.shardSize = &shardSize - return q -} - -func (q *CompletionSuggester) ContextQuery(query SuggesterContextQuery) *CompletionSuggester { - q.contextQueries = append(q.contextQueries, query) - return q -} - -func (q *CompletionSuggester) ContextQueries(queries ...SuggesterContextQuery) *CompletionSuggester { - q.contextQueries = append(q.contextQueries, queries...) - return q -} - -// completionSuggesterRequest is necessary because the order in which -// the JSON elements are routed to Elasticsearch is relevant. -// We got into trouble when using plain maps because the text element -// needs to go before the completion element. -type completionSuggesterRequest struct { - Text string `json:"text,omitempty"` - Prefix string `json:"prefix,omitempty"` - Regex string `json:"regex,omitempty"` - Completion interface{} `json:"completion,omitempty"` -} - -// Source creates the JSON data for the completion suggester. -func (q *CompletionSuggester) Source(includeName bool) (interface{}, error) { - cs := &completionSuggesterRequest{} - - if q.text != "" { - cs.Text = q.text - } - if q.prefix != "" { - cs.Prefix = q.prefix - } - if q.regex != "" { - cs.Regex = q.regex - } - - suggester := make(map[string]interface{}) - cs.Completion = suggester - - if q.analyzer != "" { - suggester["analyzer"] = q.analyzer - } - if q.field != "" { - suggester["field"] = q.field - } - if q.size != nil { - suggester["size"] = *q.size - } - if q.shardSize != nil { - suggester["shard_size"] = *q.shardSize - } - switch len(q.contextQueries) { - case 0: - case 1: - src, err := q.contextQueries[0].Source() - if err != nil { - return nil, err - } - suggester["contexts"] = src - default: - ctxq := make(map[string]interface{}) - for _, query := range q.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - // Merge the dictionary into ctxq - m, ok := src.(map[string]interface{}) - if !ok { - return nil, errors.New("elastic: context query is not a map") - } - for k, v := range m { - ctxq[k] = v - } - } - suggester["contexts"] = ctxq - } - - // Fuzzy options - if q.fuzzyOptions != nil { - src, err := q.fuzzyOptions.Source() - if err != nil { - return nil, err - } - suggester["fuzzy"] = src - } - - // Regex options - if q.regexOptions != nil { - src, err := q.regexOptions.Source() - if err != nil { - return nil, err - } - suggester["regex"] = src - } - - if q.skipDuplicates != nil { - suggester["skip_duplicates"] = *q.skipDuplicates - } - - // TODO(oe) Add completion-suggester specific parameters here - - if !includeName { - return cs, nil - } - - source := make(map[string]interface{}) - source[q.name] = cs - return source, nil -} - -// -- Fuzzy options -- - -// FuzzyCompletionSuggesterOptions represents the options for fuzzy completion suggester. -type FuzzyCompletionSuggesterOptions struct { - editDistance interface{} - transpositions *bool - minLength *int - prefixLength *int - unicodeAware *bool - maxDeterminizedStates *int -} - -// NewFuzzyCompletionSuggesterOptions initializes a new FuzzyCompletionSuggesterOptions instance. -func NewFuzzyCompletionSuggesterOptions() *FuzzyCompletionSuggesterOptions { - return &FuzzyCompletionSuggesterOptions{} -} - -// EditDistance specifies the maximum number of edits, e.g. a number like "1" or "2" -// or a string like "0..2" or ">5". -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/common-options.html#fuzziness -// for details. -func (o *FuzzyCompletionSuggesterOptions) EditDistance(editDistance interface{}) *FuzzyCompletionSuggesterOptions { - o.editDistance = editDistance - return o -} - -// Transpositions, if set to true, are counted as one change instead of two (defaults to true). -func (o *FuzzyCompletionSuggesterOptions) Transpositions(transpositions bool) *FuzzyCompletionSuggesterOptions { - o.transpositions = &transpositions - return o -} - -// MinLength represents the minimum length of the input before fuzzy suggestions are returned (defaults to 3). -func (o *FuzzyCompletionSuggesterOptions) MinLength(minLength int) *FuzzyCompletionSuggesterOptions { - o.minLength = &minLength - return o -} - -// PrefixLength represents the minimum length of the input, which is not checked for -// fuzzy alternatives (defaults to 1). -func (o *FuzzyCompletionSuggesterOptions) PrefixLength(prefixLength int) *FuzzyCompletionSuggesterOptions { - o.prefixLength = &prefixLength - return o -} - -// UnicodeAware, if true, all measurements (like fuzzy edit distance, transpositions, and lengths) -// are measured in Unicode code points instead of in bytes. This is slightly slower than -// raw bytes, so it is set to false by default. -func (o *FuzzyCompletionSuggesterOptions) UnicodeAware(unicodeAware bool) *FuzzyCompletionSuggesterOptions { - o.unicodeAware = &unicodeAware - return o -} - -// MaxDeterminizedStates is currently undocumented in Elasticsearch. It represents -// the maximum automaton states allowed for fuzzy expansion. -func (o *FuzzyCompletionSuggesterOptions) MaxDeterminizedStates(max int) *FuzzyCompletionSuggesterOptions { - o.maxDeterminizedStates = &max - return o -} - -// Source creates the JSON data. -func (o *FuzzyCompletionSuggesterOptions) Source() (interface{}, error) { - out := make(map[string]interface{}) - - if o.editDistance != nil { - out["fuzziness"] = o.editDistance - } - if o.transpositions != nil { - out["transpositions"] = *o.transpositions - } - if o.minLength != nil { - out["min_length"] = *o.minLength - } - if o.prefixLength != nil { - out["prefix_length"] = *o.prefixLength - } - if o.unicodeAware != nil { - out["unicode_aware"] = *o.unicodeAware - } - if o.maxDeterminizedStates != nil { - out["max_determinized_states"] = *o.maxDeterminizedStates - } - - return out, nil -} - -// -- Regex options -- - -// RegexCompletionSuggesterOptions represents the options for regex completion suggester. -type RegexCompletionSuggesterOptions struct { - flags interface{} // string or int - maxDeterminizedStates *int -} - -// NewRegexCompletionSuggesterOptions initializes a new RegexCompletionSuggesterOptions instance. -func NewRegexCompletionSuggesterOptions() *RegexCompletionSuggesterOptions { - return &RegexCompletionSuggesterOptions{} -} - -// Flags represents internal regex flags. -// Possible flags are ALL (default), ANYSTRING, COMPLEMENT, EMPTY, INTERSECTION, INTERVAL, or NONE. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-completion.html#regex -// for details. -func (o *RegexCompletionSuggesterOptions) Flags(flags interface{}) *RegexCompletionSuggesterOptions { - o.flags = flags - return o -} - -// MaxDeterminizedStates represents the maximum automaton states allowed for regex expansion. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-completion.html#regex -// for details. -func (o *RegexCompletionSuggesterOptions) MaxDeterminizedStates(max int) *RegexCompletionSuggesterOptions { - o.maxDeterminizedStates = &max - return o -} - -// Source creates the JSON data. -func (o *RegexCompletionSuggesterOptions) Source() (interface{}, error) { - out := make(map[string]interface{}) - - if o.flags != nil { - out["flags"] = o.flags - } - if o.maxDeterminizedStates != nil { - out["max_determinized_states"] = *o.maxDeterminizedStates - } - - return out, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/suggester_context.go b/vendor/github.com/olivere/elastic/v7/suggester_context.go deleted file mode 100644 index fa6e445..0000000 --- a/vendor/github.com/olivere/elastic/v7/suggester_context.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import "errors" - -// SuggesterContextQuery is used to define context information within -// a suggestion request. -type SuggesterContextQuery interface { - Source() (interface{}, error) -} - -// ContextSuggester is a fast suggester for e.g. type-ahead completion that supports filtering and boosting based on contexts. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/suggester-context.html -// for more details. -type ContextSuggester struct { - Suggester - name string - prefix string - field string - size *int - contextQueries []SuggesterContextQuery -} - -// Creates a new context suggester. -func NewContextSuggester(name string) *ContextSuggester { - return &ContextSuggester{ - name: name, - contextQueries: make([]SuggesterContextQuery, 0), - } -} - -func (q *ContextSuggester) Name() string { - return q.name -} - -func (q *ContextSuggester) Prefix(prefix string) *ContextSuggester { - q.prefix = prefix - return q -} - -func (q *ContextSuggester) Field(field string) *ContextSuggester { - q.field = field - return q -} - -func (q *ContextSuggester) Size(size int) *ContextSuggester { - q.size = &size - return q -} - -func (q *ContextSuggester) ContextQuery(query SuggesterContextQuery) *ContextSuggester { - q.contextQueries = append(q.contextQueries, query) - return q -} - -func (q *ContextSuggester) ContextQueries(queries ...SuggesterContextQuery) *ContextSuggester { - q.contextQueries = append(q.contextQueries, queries...) - return q -} - -// contextSuggesterRequest is necessary because the order in which -// the JSON elements are routed to Elasticsearch is relevant. -// We got into trouble when using plain maps because the text element -// needs to go before the completion element. -type contextSuggesterRequest struct { - Prefix string `json:"prefix"` - Completion interface{} `json:"completion"` -} - -// Creates the source for the context suggester. -func (q *ContextSuggester) Source(includeName bool) (interface{}, error) { - cs := &contextSuggesterRequest{} - - if q.prefix != "" { - cs.Prefix = q.prefix - } - - suggester := make(map[string]interface{}) - cs.Completion = suggester - - if q.field != "" { - suggester["field"] = q.field - } - if q.size != nil { - suggester["size"] = *q.size - } - switch len(q.contextQueries) { - case 0: - case 1: - src, err := q.contextQueries[0].Source() - if err != nil { - return nil, err - } - suggester["contexts"] = src - default: - ctxq := make(map[string]interface{}) - for _, query := range q.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - // Merge the dictionary into ctxq - m, ok := src.(map[string]interface{}) - if !ok { - return nil, errors.New("elastic: context query is not a map") - } - for k, v := range m { - ctxq[k] = v - } - } - suggester["contexts"] = ctxq - } - - if !includeName { - return cs, nil - } - - source := make(map[string]interface{}) - source[q.name] = cs - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/suggester_context_category.go b/vendor/github.com/olivere/elastic/v7/suggester_context_category.go deleted file mode 100644 index ccca301..0000000 --- a/vendor/github.com/olivere/elastic/v7/suggester_context_category.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// -- SuggesterCategoryMapping -- - -// SuggesterCategoryMapping provides a mapping for a category context in a suggester. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/suggester-context.html#_category_mapping. -type SuggesterCategoryMapping struct { - name string - fieldName string - defaultValues []string -} - -// NewSuggesterCategoryMapping creates a new SuggesterCategoryMapping. -func NewSuggesterCategoryMapping(name string) *SuggesterCategoryMapping { - return &SuggesterCategoryMapping{ - name: name, - defaultValues: make([]string, 0), - } -} - -func (q *SuggesterCategoryMapping) DefaultValues(values ...string) *SuggesterCategoryMapping { - q.defaultValues = append(q.defaultValues, values...) - return q -} - -func (q *SuggesterCategoryMapping) FieldName(fieldName string) *SuggesterCategoryMapping { - q.fieldName = fieldName - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterCategoryMapping) Source() (interface{}, error) { - source := make(map[string]interface{}) - - x := make(map[string]interface{}) - source[q.name] = x - - x["type"] = "category" - - switch len(q.defaultValues) { - case 0: - x["default"] = q.defaultValues - case 1: - x["default"] = q.defaultValues[0] - default: - x["default"] = q.defaultValues - } - - if q.fieldName != "" { - x["path"] = q.fieldName - } - return source, nil -} - -// -- SuggesterCategoryQuery -- - -// SuggesterCategoryQuery provides querying a category context in a suggester. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/suggester-context.html#_category_query. -type SuggesterCategoryQuery struct { - name string - values map[string]*int -} - -// NewSuggesterCategoryQuery creates a new SuggesterCategoryQuery. -func NewSuggesterCategoryQuery(name string, values ...string) *SuggesterCategoryQuery { - q := &SuggesterCategoryQuery{ - name: name, - values: make(map[string]*int), - } - - if len(values) > 0 { - q.Values(values...) - } - return q -} - -func (q *SuggesterCategoryQuery) Value(val string) *SuggesterCategoryQuery { - q.values[val] = nil - return q -} - -func (q *SuggesterCategoryQuery) ValueWithBoost(val string, boost int) *SuggesterCategoryQuery { - q.values[val] = &boost - return q -} - -func (q *SuggesterCategoryQuery) Values(values ...string) *SuggesterCategoryQuery { - for _, val := range values { - q.values[val] = nil - } - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterCategoryQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - - switch len(q.values) { - case 0: - source[q.name] = make([]string, 0) - default: - contexts := make([]interface{}, 0) - for val, boost := range q.values { - context := make(map[string]interface{}) - context["context"] = val - if boost != nil { - context["boost"] = *boost - } - contexts = append(contexts, context) - } - source[q.name] = contexts - } - - return source, nil -} - -type SuggesterCategoryIndex struct { - name string - values []string -} - -// NewSuggesterCategoryIndex creates a new SuggesterCategoryIndex. -func NewSuggesterCategoryIndex(name string, values ...string) *SuggesterCategoryIndex { - q := &SuggesterCategoryIndex{ - name: name, - values: values, - } - return q -} - -func (q *SuggesterCategoryIndex) Values(values ...string) *SuggesterCategoryIndex { - q.values = append(q.values, values...) - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterCategoryIndex) Source() (interface{}, error) { - source := make(map[string]interface{}) - - switch len(q.values) { - case 0: - source[q.name] = make([]string, 0) - case 1: - source[q.name] = q.values[0] - default: - source[q.name] = q.values - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/suggester_context_geo.go b/vendor/github.com/olivere/elastic/v7/suggester_context_geo.go deleted file mode 100644 index 82ddd57..0000000 --- a/vendor/github.com/olivere/elastic/v7/suggester_context_geo.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// -- SuggesterGeoMapping -- - -// SuggesterGeoMapping provides a mapping for a geolocation context in a suggester. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/suggester-context.html#_geo_location_mapping. -type SuggesterGeoMapping struct { - name string - defaultLocations []*GeoPoint - precision []string - neighbors *bool - fieldName string -} - -// NewSuggesterGeoMapping creates a new SuggesterGeoMapping. -func NewSuggesterGeoMapping(name string) *SuggesterGeoMapping { - return &SuggesterGeoMapping{ - name: name, - } -} - -func (q *SuggesterGeoMapping) DefaultLocations(locations ...*GeoPoint) *SuggesterGeoMapping { - q.defaultLocations = append(q.defaultLocations, locations...) - return q -} - -func (q *SuggesterGeoMapping) Precision(precision ...string) *SuggesterGeoMapping { - q.precision = append(q.precision, precision...) - return q -} - -func (q *SuggesterGeoMapping) Neighbors(neighbors bool) *SuggesterGeoMapping { - q.neighbors = &neighbors - return q -} - -func (q *SuggesterGeoMapping) FieldName(fieldName string) *SuggesterGeoMapping { - q.fieldName = fieldName - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterGeoMapping) Source() (interface{}, error) { - source := make(map[string]interface{}) - - x := make(map[string]interface{}) - source[q.name] = x - - x["type"] = "geo" - - if len(q.precision) > 0 { - x["precision"] = q.precision - } - if q.neighbors != nil { - x["neighbors"] = *q.neighbors - } - - switch len(q.defaultLocations) { - case 0: - case 1: - x["default"] = q.defaultLocations[0].Source() - default: - var arr []interface{} - for _, p := range q.defaultLocations { - arr = append(arr, p.Source()) - } - x["default"] = arr - } - - if q.fieldName != "" { - x["path"] = q.fieldName - } - return source, nil -} - -// -- SuggesterGeoQuery -- - -// SuggesterGeoQuery provides querying a geolocation context in a suggester. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/suggester-context.html#_geo_location_query -type SuggesterGeoQuery struct { - name string - location *GeoPoint - precision string - neighbours []string - boost *int -} - -// NewSuggesterGeoQuery creates a new SuggesterGeoQuery. -func NewSuggesterGeoQuery(name string, location *GeoPoint) *SuggesterGeoQuery { - return &SuggesterGeoQuery{ - name: name, - location: location, - neighbours: make([]string, 0), - } -} - -func (q *SuggesterGeoQuery) Precision(precision string) *SuggesterGeoQuery { - q.precision = precision - return q -} - -func (q *SuggesterGeoQuery) Neighbours(neighbours ...string) *SuggesterGeoQuery { - q.neighbours = append(q.neighbours, neighbours...) - return q -} - -func (q *SuggesterGeoQuery) Boost(boost int) *SuggesterGeoQuery { - q.boost = &boost - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterGeoQuery) Source() (interface{}, error) { - source := make(map[string]interface{}) - - x := make(map[string]interface{}) - source[q.name] = x - - if q.location != nil { - x["context"] = q.location.Source() - } - if q.precision != "" { - x["precision"] = q.precision - } - if q.boost != nil { - x["boost"] = q.boost - } - switch len(q.neighbours) { - case 0: - case 1: - x["neighbours"] = q.neighbours[0] - default: - x["neighbours"] = q.neighbours - } - - return source, nil -} - -type SuggesterGeoIndex struct { - name string - locations []*GeoPoint -} - -// NewSuggesterGeoQuery creates a new SuggesterGeoQuery. -func NewSuggesterGeoIndex(name string) *SuggesterGeoIndex { - return &SuggesterGeoIndex{ - name: name, - } -} - -func (q *SuggesterGeoIndex) Locations(locations ...*GeoPoint) *SuggesterGeoIndex { - q.locations = append(q.locations, locations...) - return q -} - -// Source returns a map that will be used to serialize the context query as JSON. -func (q *SuggesterGeoIndex) Source() (interface{}, error) { - source := make(map[string]interface{}) - - switch len(q.locations) { - case 0: - source[q.name] = make([]string, 0) - case 1: - source[q.name] = q.locations[0].Source() - default: - var arr []interface{} - for _, p := range q.locations { - arr = append(arr, p.Source()) - } - source[q.name] = arr - } - - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/suggester_phrase.go b/vendor/github.com/olivere/elastic/v7/suggester_phrase.go deleted file mode 100644 index 82ce443..0000000 --- a/vendor/github.com/olivere/elastic/v7/suggester_phrase.go +++ /dev/null @@ -1,550 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// PhraseSuggester provides an API to access word alternatives -// on a per token basis within a certain string distance. -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-phrase.html. -type PhraseSuggester struct { - Suggester - name string - text string - field string - analyzer string - size *int - shardSize *int - contextQueries []SuggesterContextQuery - - // fields specific to a phrase suggester - maxErrors *float64 - separator *string - realWordErrorLikelihood *float64 - confidence *float64 - generators map[string][]CandidateGenerator - gramSize *int - smoothingModel SmoothingModel - forceUnigrams *bool - tokenLimit *int - preTag, postTag *string - collateQuery *Script - collatePreference *string - collateParams map[string]interface{} - collatePrune *bool -} - -// NewPhraseSuggester creates a new PhraseSuggester. -func NewPhraseSuggester(name string) *PhraseSuggester { - return &PhraseSuggester{ - name: name, - collateParams: make(map[string]interface{}), - } -} - -func (q *PhraseSuggester) Name() string { - return q.name -} - -func (q *PhraseSuggester) Text(text string) *PhraseSuggester { - q.text = text - return q -} - -func (q *PhraseSuggester) Field(field string) *PhraseSuggester { - q.field = field - return q -} - -func (q *PhraseSuggester) Analyzer(analyzer string) *PhraseSuggester { - q.analyzer = analyzer - return q -} - -func (q *PhraseSuggester) Size(size int) *PhraseSuggester { - q.size = &size - return q -} - -func (q *PhraseSuggester) ShardSize(shardSize int) *PhraseSuggester { - q.shardSize = &shardSize - return q -} - -func (q *PhraseSuggester) ContextQuery(query SuggesterContextQuery) *PhraseSuggester { - q.contextQueries = append(q.contextQueries, query) - return q -} - -func (q *PhraseSuggester) ContextQueries(queries ...SuggesterContextQuery) *PhraseSuggester { - q.contextQueries = append(q.contextQueries, queries...) - return q -} - -func (q *PhraseSuggester) GramSize(gramSize int) *PhraseSuggester { - if gramSize >= 1 { - q.gramSize = &gramSize - } - return q -} - -func (q *PhraseSuggester) MaxErrors(maxErrors float64) *PhraseSuggester { - q.maxErrors = &maxErrors - return q -} - -func (q *PhraseSuggester) Separator(separator string) *PhraseSuggester { - q.separator = &separator - return q -} - -func (q *PhraseSuggester) RealWordErrorLikelihood(realWordErrorLikelihood float64) *PhraseSuggester { - q.realWordErrorLikelihood = &realWordErrorLikelihood - return q -} - -func (q *PhraseSuggester) Confidence(confidence float64) *PhraseSuggester { - q.confidence = &confidence - return q -} - -func (q *PhraseSuggester) CandidateGenerator(generator CandidateGenerator) *PhraseSuggester { - if q.generators == nil { - q.generators = make(map[string][]CandidateGenerator) - } - typ := generator.Type() - if _, found := q.generators[typ]; !found { - q.generators[typ] = make([]CandidateGenerator, 0) - } - q.generators[typ] = append(q.generators[typ], generator) - return q -} - -func (q *PhraseSuggester) CandidateGenerators(generators ...CandidateGenerator) *PhraseSuggester { - for _, g := range generators { - q = q.CandidateGenerator(g) - } - return q -} - -func (q *PhraseSuggester) ClearCandidateGenerator() *PhraseSuggester { - q.generators = nil - return q -} - -func (q *PhraseSuggester) ForceUnigrams(forceUnigrams bool) *PhraseSuggester { - q.forceUnigrams = &forceUnigrams - return q -} - -func (q *PhraseSuggester) SmoothingModel(smoothingModel SmoothingModel) *PhraseSuggester { - q.smoothingModel = smoothingModel - return q -} - -func (q *PhraseSuggester) TokenLimit(tokenLimit int) *PhraseSuggester { - q.tokenLimit = &tokenLimit - return q -} - -func (q *PhraseSuggester) Highlight(preTag, postTag string) *PhraseSuggester { - q.preTag = &preTag - q.postTag = &postTag - return q -} - -func (q *PhraseSuggester) CollateQuery(collateQuery *Script) *PhraseSuggester { - q.collateQuery = collateQuery - return q -} - -func (q *PhraseSuggester) CollatePreference(collatePreference string) *PhraseSuggester { - q.collatePreference = &collatePreference - return q -} - -func (q *PhraseSuggester) CollateParams(collateParams map[string]interface{}) *PhraseSuggester { - q.collateParams = collateParams - return q -} - -func (q *PhraseSuggester) CollatePrune(collatePrune bool) *PhraseSuggester { - q.collatePrune = &collatePrune - return q -} - -// phraseSuggesterRequest is necessary because the order in which -// the JSON elements are routed to Elasticsearch is relevant. -// We got into trouble when using plain maps because the text element -// needs to go before the simple_phrase element. -type phraseSuggesterRequest struct { - Text string `json:"text"` - Phrase interface{} `json:"phrase"` -} - -// Source generates the source for the phrase suggester. -func (q *PhraseSuggester) Source(includeName bool) (interface{}, error) { - ps := &phraseSuggesterRequest{} - - if q.text != "" { - ps.Text = q.text - } - - suggester := make(map[string]interface{}) - ps.Phrase = suggester - - if q.analyzer != "" { - suggester["analyzer"] = q.analyzer - } - if q.field != "" { - suggester["field"] = q.field - } - if q.size != nil { - suggester["size"] = *q.size - } - if q.shardSize != nil { - suggester["shard_size"] = *q.shardSize - } - switch len(q.contextQueries) { - case 0: - case 1: - src, err := q.contextQueries[0].Source() - if err != nil { - return nil, err - } - suggester["contexts"] = src - default: - var ctxq []interface{} - for _, query := range q.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - ctxq = append(ctxq, src) - } - suggester["contexts"] = ctxq - } - - // Phase-specified parameters - if q.realWordErrorLikelihood != nil { - suggester["real_word_error_likelihood"] = *q.realWordErrorLikelihood - } - if q.confidence != nil { - suggester["confidence"] = *q.confidence - } - if q.separator != nil { - suggester["separator"] = *q.separator - } - if q.maxErrors != nil { - suggester["max_errors"] = *q.maxErrors - } - if q.gramSize != nil { - suggester["gram_size"] = *q.gramSize - } - if q.forceUnigrams != nil { - suggester["force_unigrams"] = *q.forceUnigrams - } - if q.tokenLimit != nil { - suggester["token_limit"] = *q.tokenLimit - } - if q.generators != nil && len(q.generators) > 0 { - for typ, generators := range q.generators { - var arr []interface{} - for _, g := range generators { - src, err := g.Source() - if err != nil { - return nil, err - } - arr = append(arr, src) - } - suggester[typ] = arr - } - } - if q.smoothingModel != nil { - src, err := q.smoothingModel.Source() - if err != nil { - return nil, err - } - x := make(map[string]interface{}) - x[q.smoothingModel.Type()] = src - suggester["smoothing"] = x - } - if q.preTag != nil { - hl := make(map[string]string) - hl["pre_tag"] = *q.preTag - if q.postTag != nil { - hl["post_tag"] = *q.postTag - } - suggester["highlight"] = hl - } - if q.collateQuery != nil { - collate := make(map[string]interface{}) - suggester["collate"] = collate - if q.collateQuery != nil { - src, err := q.collateQuery.Source() - if err != nil { - return nil, err - } - collate["query"] = src - } - if q.collatePreference != nil { - collate["preference"] = *q.collatePreference - } - if len(q.collateParams) > 0 { - collate["params"] = q.collateParams - } - if q.collatePrune != nil { - collate["prune"] = *q.collatePrune - } - } - - if !includeName { - return ps, nil - } - - source := make(map[string]interface{}) - source[q.name] = ps - return source, nil -} - -// -- Smoothing models -- - -type SmoothingModel interface { - Type() string - Source() (interface{}, error) -} - -// StupidBackoffSmoothingModel implements a stupid backoff smoothing model. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-phrase.html#_smoothing_models -// for details about smoothing models. -type StupidBackoffSmoothingModel struct { - discount float64 -} - -func NewStupidBackoffSmoothingModel(discount float64) *StupidBackoffSmoothingModel { - return &StupidBackoffSmoothingModel{ - discount: discount, - } -} - -func (sm *StupidBackoffSmoothingModel) Type() string { - return "stupid_backoff" -} - -func (sm *StupidBackoffSmoothingModel) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["discount"] = sm.discount - return source, nil -} - -// -- - -// LaplaceSmoothingModel implements a laplace smoothing model. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-phrase.html#_smoothing_models -// for details about smoothing models. -type LaplaceSmoothingModel struct { - alpha float64 -} - -func NewLaplaceSmoothingModel(alpha float64) *LaplaceSmoothingModel { - return &LaplaceSmoothingModel{ - alpha: alpha, - } -} - -func (sm *LaplaceSmoothingModel) Type() string { - return "laplace" -} - -func (sm *LaplaceSmoothingModel) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["alpha"] = sm.alpha - return source, nil -} - -// -- - -// LinearInterpolationSmoothingModel implements a linear interpolation -// smoothing model. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-phrase.html#_smoothing_models -// for details about smoothing models. -type LinearInterpolationSmoothingModel struct { - trigramLamda float64 - bigramLambda float64 - unigramLambda float64 -} - -func NewLinearInterpolationSmoothingModel(trigramLamda, bigramLambda, unigramLambda float64) *LinearInterpolationSmoothingModel { - return &LinearInterpolationSmoothingModel{ - trigramLamda: trigramLamda, - bigramLambda: bigramLambda, - unigramLambda: unigramLambda, - } -} - -func (sm *LinearInterpolationSmoothingModel) Type() string { - return "linear_interpolation" -} - -func (sm *LinearInterpolationSmoothingModel) Source() (interface{}, error) { - source := make(map[string]interface{}) - source["trigram_lambda"] = sm.trigramLamda - source["bigram_lambda"] = sm.bigramLambda - source["unigram_lambda"] = sm.unigramLambda - return source, nil -} - -// -- CandidateGenerator -- - -type CandidateGenerator interface { - Type() string - Source() (interface{}, error) -} - -// DirectCandidateGenerator implements a direct candidate generator. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-phrase.html#_smoothing_models -// for details about smoothing models. -type DirectCandidateGenerator struct { - field string - preFilter *string - postFilter *string - suggestMode *string - accuracy *float64 - size *int - sort *string - stringDistance *string - maxEdits *int - maxInspections *int - maxTermFreq *float64 - prefixLength *int - minWordLength *int - minDocFreq *float64 -} - -func NewDirectCandidateGenerator(field string) *DirectCandidateGenerator { - return &DirectCandidateGenerator{ - field: field, - } -} - -func (g *DirectCandidateGenerator) Type() string { - return "direct_generator" -} - -func (g *DirectCandidateGenerator) Field(field string) *DirectCandidateGenerator { - g.field = field - return g -} - -func (g *DirectCandidateGenerator) PreFilter(preFilter string) *DirectCandidateGenerator { - g.preFilter = &preFilter - return g -} - -func (g *DirectCandidateGenerator) PostFilter(postFilter string) *DirectCandidateGenerator { - g.postFilter = &postFilter - return g -} - -func (g *DirectCandidateGenerator) SuggestMode(suggestMode string) *DirectCandidateGenerator { - g.suggestMode = &suggestMode - return g -} - -func (g *DirectCandidateGenerator) Accuracy(accuracy float64) *DirectCandidateGenerator { - g.accuracy = &accuracy - return g -} - -func (g *DirectCandidateGenerator) Size(size int) *DirectCandidateGenerator { - g.size = &size - return g -} - -func (g *DirectCandidateGenerator) Sort(sort string) *DirectCandidateGenerator { - g.sort = &sort - return g -} - -func (g *DirectCandidateGenerator) StringDistance(stringDistance string) *DirectCandidateGenerator { - g.stringDistance = &stringDistance - return g -} - -func (g *DirectCandidateGenerator) MaxEdits(maxEdits int) *DirectCandidateGenerator { - g.maxEdits = &maxEdits - return g -} - -func (g *DirectCandidateGenerator) MaxInspections(maxInspections int) *DirectCandidateGenerator { - g.maxInspections = &maxInspections - return g -} - -func (g *DirectCandidateGenerator) MaxTermFreq(maxTermFreq float64) *DirectCandidateGenerator { - g.maxTermFreq = &maxTermFreq - return g -} - -func (g *DirectCandidateGenerator) PrefixLength(prefixLength int) *DirectCandidateGenerator { - g.prefixLength = &prefixLength - return g -} - -func (g *DirectCandidateGenerator) MinWordLength(minWordLength int) *DirectCandidateGenerator { - g.minWordLength = &minWordLength - return g -} - -func (g *DirectCandidateGenerator) MinDocFreq(minDocFreq float64) *DirectCandidateGenerator { - g.minDocFreq = &minDocFreq - return g -} - -func (g *DirectCandidateGenerator) Source() (interface{}, error) { - source := make(map[string]interface{}) - if g.field != "" { - source["field"] = g.field - } - if g.suggestMode != nil { - source["suggest_mode"] = *g.suggestMode - } - if g.accuracy != nil { - source["accuracy"] = *g.accuracy - } - if g.size != nil { - source["size"] = *g.size - } - if g.sort != nil { - source["sort"] = *g.sort - } - if g.stringDistance != nil { - source["string_distance"] = *g.stringDistance - } - if g.maxEdits != nil { - source["max_edits"] = *g.maxEdits - } - if g.maxInspections != nil { - source["max_inspections"] = *g.maxInspections - } - if g.maxTermFreq != nil { - source["max_term_freq"] = *g.maxTermFreq - } - if g.prefixLength != nil { - source["prefix_length"] = *g.prefixLength - } - if g.minWordLength != nil { - source["min_word_length"] = *g.minWordLength - } - if g.minDocFreq != nil { - source["min_doc_freq"] = *g.minDocFreq - } - if g.preFilter != nil { - source["pre_filter"] = *g.preFilter - } - if g.postFilter != nil { - source["post_filter"] = *g.postFilter - } - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/suggester_term.go b/vendor/github.com/olivere/elastic/v7/suggester_term.go deleted file mode 100644 index ecb6d9e..0000000 --- a/vendor/github.com/olivere/elastic/v7/suggester_term.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -// TermSuggester suggests terms based on edit distance. -// For more details, see -// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-suggesters-term.html. -type TermSuggester struct { - Suggester - name string - text string - field string - analyzer string - size *int - shardSize *int - contextQueries []SuggesterContextQuery - - // fields specific to term suggester - suggestMode string - accuracy *float64 - sort string - stringDistance string - maxEdits *int - maxInspections *int - maxTermFreq *float64 - prefixLength *int - minWordLength *int - minDocFreq *float64 -} - -// NewTermSuggester creates a new TermSuggester. -func NewTermSuggester(name string) *TermSuggester { - return &TermSuggester{ - name: name, - } -} - -func (q *TermSuggester) Name() string { - return q.name -} - -func (q *TermSuggester) Text(text string) *TermSuggester { - q.text = text - return q -} - -func (q *TermSuggester) Field(field string) *TermSuggester { - q.field = field - return q -} - -func (q *TermSuggester) Analyzer(analyzer string) *TermSuggester { - q.analyzer = analyzer - return q -} - -func (q *TermSuggester) Size(size int) *TermSuggester { - q.size = &size - return q -} - -func (q *TermSuggester) ShardSize(shardSize int) *TermSuggester { - q.shardSize = &shardSize - return q -} - -func (q *TermSuggester) ContextQuery(query SuggesterContextQuery) *TermSuggester { - q.contextQueries = append(q.contextQueries, query) - return q -} - -func (q *TermSuggester) ContextQueries(queries ...SuggesterContextQuery) *TermSuggester { - q.contextQueries = append(q.contextQueries, queries...) - return q -} - -func (q *TermSuggester) SuggestMode(suggestMode string) *TermSuggester { - q.suggestMode = suggestMode - return q -} - -func (q *TermSuggester) Accuracy(accuracy float64) *TermSuggester { - q.accuracy = &accuracy - return q -} - -func (q *TermSuggester) Sort(sort string) *TermSuggester { - q.sort = sort - return q -} - -func (q *TermSuggester) StringDistance(stringDistance string) *TermSuggester { - q.stringDistance = stringDistance - return q -} - -func (q *TermSuggester) MaxEdits(maxEdits int) *TermSuggester { - q.maxEdits = &maxEdits - return q -} - -func (q *TermSuggester) MaxInspections(maxInspections int) *TermSuggester { - q.maxInspections = &maxInspections - return q -} - -func (q *TermSuggester) MaxTermFreq(maxTermFreq float64) *TermSuggester { - q.maxTermFreq = &maxTermFreq - return q -} - -func (q *TermSuggester) PrefixLength(prefixLength int) *TermSuggester { - q.prefixLength = &prefixLength - return q -} - -func (q *TermSuggester) MinWordLength(minWordLength int) *TermSuggester { - q.minWordLength = &minWordLength - return q -} - -func (q *TermSuggester) MinDocFreq(minDocFreq float64) *TermSuggester { - q.minDocFreq = &minDocFreq - return q -} - -// termSuggesterRequest is necessary because the order in which -// the JSON elements are routed to Elasticsearch is relevant. -// We got into trouble when using plain maps because the text element -// needs to go before the term element. -type termSuggesterRequest struct { - Text string `json:"text"` - Term interface{} `json:"term"` -} - -// Source generates the source for the term suggester. -func (q *TermSuggester) Source(includeName bool) (interface{}, error) { - // "suggest" : { - // "my-suggest-1" : { - // "text" : "the amsterdma meetpu", - // "term" : { - // "field" : "body" - // } - // }, - // "my-suggest-2" : { - // "text" : "the rottredam meetpu", - // "term" : { - // "field" : "title", - // } - // } - // } - ts := &termSuggesterRequest{} - if q.text != "" { - ts.Text = q.text - } - - suggester := make(map[string]interface{}) - ts.Term = suggester - - if q.analyzer != "" { - suggester["analyzer"] = q.analyzer - } - if q.field != "" { - suggester["field"] = q.field - } - if q.size != nil { - suggester["size"] = *q.size - } - if q.shardSize != nil { - suggester["shard_size"] = *q.shardSize - } - switch len(q.contextQueries) { - case 0: - case 1: - src, err := q.contextQueries[0].Source() - if err != nil { - return nil, err - } - suggester["contexts"] = src - default: - ctxq := make([]interface{}, len(q.contextQueries)) - for i, query := range q.contextQueries { - src, err := query.Source() - if err != nil { - return nil, err - } - ctxq[i] = src - } - suggester["contexts"] = ctxq - } - - // Specific to term suggester - if q.suggestMode != "" { - suggester["suggest_mode"] = q.suggestMode - } - if q.accuracy != nil { - suggester["accuracy"] = *q.accuracy - } - if q.sort != "" { - suggester["sort"] = q.sort - } - if q.stringDistance != "" { - suggester["string_distance"] = q.stringDistance - } - if q.maxEdits != nil { - suggester["max_edits"] = *q.maxEdits - } - if q.maxInspections != nil { - suggester["max_inspections"] = *q.maxInspections - } - if q.maxTermFreq != nil { - suggester["max_term_freq"] = *q.maxTermFreq - } - if q.prefixLength != nil { - suggester["prefix_length"] = *q.prefixLength - } - if q.minWordLength != nil { - suggester["min_word_length"] = *q.minWordLength - } - if q.minDocFreq != nil { - suggester["min_doc_freq"] = *q.minDocFreq - } - - if !includeName { - return ts, nil - } - - source := make(map[string]interface{}) - source[q.name] = ts - return source, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/tasks_cancel.go b/vendor/github.com/olivere/elastic/v7/tasks_cancel.go deleted file mode 100644 index 1294004..0000000 --- a/vendor/github.com/olivere/elastic/v7/tasks_cancel.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// TasksCancelService can cancel long-running tasks. -// It is supported as of Elasticsearch 2.3.0. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/tasks.html#task-cancellation -// for details. -type TasksCancelService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - taskId string - actions []string - nodeId []string - parentTaskId string -} - -// NewTasksCancelService creates a new TasksCancelService. -func NewTasksCancelService(client *Client) *TasksCancelService { - return &TasksCancelService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *TasksCancelService) Pretty(pretty bool) *TasksCancelService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *TasksCancelService) Human(human bool) *TasksCancelService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *TasksCancelService) ErrorTrace(errorTrace bool) *TasksCancelService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *TasksCancelService) FilterPath(filterPath ...string) *TasksCancelService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *TasksCancelService) Header(name string, value string) *TasksCancelService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *TasksCancelService) Headers(headers http.Header) *TasksCancelService { - s.headers = headers - return s -} - -// TaskId specifies the task to cancel. Notice that the caller is responsible -// for using the correct format, i.e. node_id:task_number, as specified in -// the REST API. -func (s *TasksCancelService) TaskId(taskId string) *TasksCancelService { - s.taskId = taskId - return s -} - -// TaskIdFromNodeAndId specifies the task to cancel. Set id to -1 for all tasks. -func (s *TasksCancelService) TaskIdFromNodeAndId(nodeId string, id int64) *TasksCancelService { - if id != -1 { - s.taskId = fmt.Sprintf("%s:%d", nodeId, id) - } - return s -} - -// Actions is a list of actions that should be cancelled. Leave empty to cancel all. -func (s *TasksCancelService) Actions(actions ...string) *TasksCancelService { - s.actions = append(s.actions, actions...) - return s -} - -// NodeId is a list of node IDs or names to limit the returned information; -// use `_local` to return information from the node you're connecting to, -// leave empty to get information from all nodes. -func (s *TasksCancelService) NodeId(nodeId ...string) *TasksCancelService { - s.nodeId = append(s.nodeId, nodeId...) - return s -} - -// ParentTaskId specifies to cancel tasks with specified parent task id. -// Notice that the caller is responsible for using the correct format, -// i.e. node_id:task_number, as specified in the REST API. -func (s *TasksCancelService) ParentTaskId(parentTaskId string) *TasksCancelService { - s.parentTaskId = parentTaskId - return s -} - -// ParentTaskIdFromNodeAndId specifies to cancel tasks with specified parent task id. -func (s *TasksCancelService) ParentTaskIdFromNodeAndId(nodeId string, id int64) *TasksCancelService { - if id != -1 { - s.parentTaskId = fmt.Sprintf("%s:%d", nodeId, id) - } - return s -} - -// buildURL builds the URL for the operation. -func (s *TasksCancelService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if s.taskId != "" { - path, err = uritemplates.Expand("/_tasks/{task_id}/_cancel", map[string]string{ - "task_id": s.taskId, - }) - } else { - path = "/_tasks/_cancel" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if len(s.actions) > 0 { - params.Set("actions", strings.Join(s.actions, ",")) - } - if len(s.nodeId) > 0 { - params.Set("nodes", strings.Join(s.nodeId, ",")) - } - if s.parentTaskId != "" { - params.Set("parent_task_id", s.parentTaskId) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *TasksCancelService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *TasksCancelService) Do(ctx context.Context) (*TasksListResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(TasksListResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/tasks_get_task.go b/vendor/github.com/olivere/elastic/v7/tasks_get_task.go deleted file mode 100644 index d9f8a8b..0000000 --- a/vendor/github.com/olivere/elastic/v7/tasks_get_task.go +++ /dev/null @@ -1,169 +0,0 @@ -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// TasksGetTaskService retrieves the state of a task in the cluster. It is part of the Task Management API -// documented at https://www.elastic.co/guide/en/elasticsearch/reference/7.0/tasks.html#_current_tasks_information. -type TasksGetTaskService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - taskId string - waitForCompletion *bool -} - -// NewTasksGetTaskService creates a new TasksGetTaskService. -func NewTasksGetTaskService(client *Client) *TasksGetTaskService { - return &TasksGetTaskService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *TasksGetTaskService) Pretty(pretty bool) *TasksGetTaskService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *TasksGetTaskService) Human(human bool) *TasksGetTaskService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *TasksGetTaskService) ErrorTrace(errorTrace bool) *TasksGetTaskService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *TasksGetTaskService) FilterPath(filterPath ...string) *TasksGetTaskService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *TasksGetTaskService) Header(name string, value string) *TasksGetTaskService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *TasksGetTaskService) Headers(headers http.Header) *TasksGetTaskService { - s.headers = headers - return s -} - -// TaskId specifies the task to return. Notice that the caller is responsible -// for using the correct format, i.e. node_id:task_number, as specified in -// the REST API. -func (s *TasksGetTaskService) TaskId(taskId string) *TasksGetTaskService { - s.taskId = taskId - return s -} - -// TaskIdFromNodeAndId indicates to return the task on the given node with specified id. -func (s *TasksGetTaskService) TaskIdFromNodeAndId(nodeId string, id int64) *TasksGetTaskService { - s.taskId = fmt.Sprintf("%s:%d", nodeId, id) - return s -} - -// WaitForCompletion indicates whether to wait for the matching tasks -// to complete (default: false). -func (s *TasksGetTaskService) WaitForCompletion(waitForCompletion bool) *TasksGetTaskService { - s.waitForCompletion = &waitForCompletion - return s -} - -// buildURL builds the URL for the operation. -func (s *TasksGetTaskService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_tasks/{task_id}", map[string]string{ - "task_id": s.taskId, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.waitForCompletion; v != nil { - params.Set("wait_for_completion", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *TasksGetTaskService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *TasksGetTaskService) Do(ctx context.Context) (*TasksGetTaskResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(TasksGetTaskResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - ret.Header = res.Header - return ret, nil -} - -type TasksGetTaskResponse struct { - Header http.Header `json:"-"` - Completed bool `json:"completed"` - Task *TaskInfo `json:"task,omitempty"` - Error *ErrorDetails `json:"error,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/tasks_list.go b/vendor/github.com/olivere/elastic/v7/tasks_list.go deleted file mode 100644 index dc1ad49..0000000 --- a/vendor/github.com/olivere/elastic/v7/tasks_list.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// TasksListService retrieves the list of currently executing tasks -// on one ore more nodes in the cluster. It is part of the Task Management API -// documented at https://www.elastic.co/guide/en/elasticsearch/reference/7.0/tasks.html. -// -// It is supported as of Elasticsearch 2.3.0. -type TasksListService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - taskId []string - actions []string - detailed *bool - nodeId []string - parentTaskId string - waitForCompletion *bool - groupBy string -} - -// NewTasksListService creates a new TasksListService. -func NewTasksListService(client *Client) *TasksListService { - return &TasksListService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *TasksListService) Pretty(pretty bool) *TasksListService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *TasksListService) Human(human bool) *TasksListService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *TasksListService) ErrorTrace(errorTrace bool) *TasksListService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *TasksListService) FilterPath(filterPath ...string) *TasksListService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *TasksListService) Header(name string, value string) *TasksListService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *TasksListService) Headers(headers http.Header) *TasksListService { - s.headers = headers - return s -} - -// TaskId indicates to returns the task(s) with specified id(s). -// Notice that the caller is responsible for using the correct format, -// i.e. node_id:task_number, as specified in the REST API. -func (s *TasksListService) TaskId(taskId ...string) *TasksListService { - s.taskId = append(s.taskId, taskId...) - return s -} - -// Actions is a list of actions that should be returned. Leave empty to return all. -func (s *TasksListService) Actions(actions ...string) *TasksListService { - s.actions = append(s.actions, actions...) - return s -} - -// Detailed indicates whether to return detailed task information (default: false). -func (s *TasksListService) Detailed(detailed bool) *TasksListService { - s.detailed = &detailed - return s -} - -// NodeId is a list of node IDs or names to limit the returned information; -// use `_local` to return information from the node you're connecting to, -// leave empty to get information from all nodes. -func (s *TasksListService) NodeId(nodeId ...string) *TasksListService { - s.nodeId = append(s.nodeId, nodeId...) - return s -} - -// ParentTaskId returns tasks with specified parent task id. -// Notice that the caller is responsible for using the correct format, -// i.e. node_id:task_number, as specified in the REST API. -func (s *TasksListService) ParentTaskId(parentTaskId string) *TasksListService { - s.parentTaskId = parentTaskId - return s -} - -// WaitForCompletion indicates whether to wait for the matching tasks -// to complete (default: false). -func (s *TasksListService) WaitForCompletion(waitForCompletion bool) *TasksListService { - s.waitForCompletion = &waitForCompletion - return s -} - -// GroupBy groups tasks by nodes or parent/child relationships. -// As of now, it can either be "nodes" (default) or "parents" or "none". -func (s *TasksListService) GroupBy(groupBy string) *TasksListService { - s.groupBy = groupBy - return s -} - -// buildURL builds the URL for the operation. -func (s *TasksListService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.taskId) > 0 { - path, err = uritemplates.Expand("/_tasks/{task_id}", map[string]string{ - "task_id": strings.Join(s.taskId, ","), - }) - } else { - path = "/_tasks" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if len(s.actions) > 0 { - params.Set("actions", strings.Join(s.actions, ",")) - } - if v := s.detailed; v != nil { - params.Set("detailed", fmt.Sprint(*v)) - } - if len(s.nodeId) > 0 { - params.Set("nodes", strings.Join(s.nodeId, ",")) - } - if s.parentTaskId != "" { - params.Set("parent_task_id", s.parentTaskId) - } - if v := s.waitForCompletion; v != nil { - params.Set("wait_for_completion", fmt.Sprint(*v)) - } - if s.groupBy != "" { - params.Set("group_by", s.groupBy) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *TasksListService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *TasksListService) Do(ctx context.Context) (*TasksListResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(TasksListResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - ret.Header = res.Header - return ret, nil -} - -// TasksListResponse is the response of TasksListService.Do. -type TasksListResponse struct { - Header http.Header `json:"-"` - TaskFailures []*TaskOperationFailure `json:"task_failures"` - NodeFailures []*FailedNodeException `json:"node_failures"` - // Nodes returns the tasks per node. The key is the node id. - Nodes map[string]*DiscoveryNode `json:"nodes"` -} - -type TaskOperationFailure struct { - TaskId int64 `json:"task_id"` // this is a long in the Java source - NodeId string `json:"node_id"` - Status string `json:"status"` - Reason *ErrorDetails `json:"reason"` -} - -type FailedNodeException struct { - *ErrorDetails - NodeId string `json:"node_id"` -} - -type DiscoveryNode struct { - Name string `json:"name"` - TransportAddress string `json:"transport_address"` - Host string `json:"host"` - IP string `json:"ip"` - Roles []string `json:"roles"` // "master", "data", or "ingest" - Attributes map[string]interface{} `json:"attributes"` - // Tasks returns the tasks by its id (as a string). - Tasks map[string]*TaskInfo `json:"tasks"` -} - -// TaskInfo represents information about a currently running task. -type TaskInfo struct { - Node string `json:"node"` - Id int64 `json:"id"` // the task id (yes, this is a long in the Java source) - Type string `json:"type"` - Action string `json:"action"` - Status interface{} `json:"status"` // has separate implementations of Task.Status in Java for reindexing, replication, and "RawTaskStatus" - Description interface{} `json:"description"` // same as Status - StartTime string `json:"start_time"` - StartTimeInMillis int64 `json:"start_time_in_millis"` - RunningTime string `json:"running_time"` - RunningTimeInNanos int64 `json:"running_time_in_nanos"` - Cancellable bool `json:"cancellable"` - ParentTaskId string `json:"parent_task_id"` // like "YxJnVYjwSBm_AUbzddTajQ:12356" - Headers map[string]string `json:"headers"` -} - -// StartTaskResult is used in cases where a task gets started asynchronously and -// the operation simply returnes a TaskID to watch for via the Task Management API. -type StartTaskResult struct { - Header http.Header `json:"-"` - TaskId string `json:"task"` -} diff --git a/vendor/github.com/olivere/elastic/v7/termvectors.go b/vendor/github.com/olivere/elastic/v7/termvectors.go deleted file mode 100644 index a196810..0000000 --- a/vendor/github.com/olivere/elastic/v7/termvectors.go +++ /dev/null @@ -1,520 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// TermvectorsService returns information and statistics on terms in the -// fields of a particular document. The document could be stored in the -// index or artificially provided by the user. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-termvectors.html -// for documentation. -type TermvectorsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - index string - typ string - dfs *bool - doc interface{} - fieldStatistics *bool - fields []string - filter *TermvectorsFilterSettings - perFieldAnalyzer map[string]string - offsets *bool - parent string - payloads *bool - positions *bool - preference string - realtime *bool - routing string - termStatistics *bool - version interface{} - versionType string - bodyJson interface{} - bodyString string -} - -// NewTermvectorsService creates a new TermvectorsService. -func NewTermvectorsService(client *Client) *TermvectorsService { - return &TermvectorsService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *TermvectorsService) Pretty(pretty bool) *TermvectorsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *TermvectorsService) Human(human bool) *TermvectorsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *TermvectorsService) ErrorTrace(errorTrace bool) *TermvectorsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *TermvectorsService) FilterPath(filterPath ...string) *TermvectorsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *TermvectorsService) Header(name string, value string) *TermvectorsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *TermvectorsService) Headers(headers http.Header) *TermvectorsService { - s.headers = headers - return s -} - -// Index in which the document resides. -func (s *TermvectorsService) Index(index string) *TermvectorsService { - s.index = index - return s -} - -// Type of the document. -// -// Deprecated: Types are in the process of being removed. -func (s *TermvectorsService) Type(typ string) *TermvectorsService { - s.typ = typ - return s -} - -// Id of the document. -func (s *TermvectorsService) Id(id string) *TermvectorsService { - s.id = id - return s -} - -// Dfs specifies if distributed frequencies should be returned instead -// shard frequencies. -func (s *TermvectorsService) Dfs(dfs bool) *TermvectorsService { - s.dfs = &dfs - return s -} - -// Doc is the document to analyze. -func (s *TermvectorsService) Doc(doc interface{}) *TermvectorsService { - s.doc = doc - return s -} - -// FieldStatistics specifies if document count, sum of document frequencies -// and sum of total term frequencies should be returned. -func (s *TermvectorsService) FieldStatistics(fieldStatistics bool) *TermvectorsService { - s.fieldStatistics = &fieldStatistics - return s -} - -// Fields a list of fields to return. -func (s *TermvectorsService) Fields(fields ...string) *TermvectorsService { - if s.fields == nil { - s.fields = make([]string, 0) - } - s.fields = append(s.fields, fields...) - return s -} - -// Filter adds terms filter settings. -func (s *TermvectorsService) Filter(filter *TermvectorsFilterSettings) *TermvectorsService { - s.filter = filter - return s -} - -// PerFieldAnalyzer allows to specify a different analyzer than the one -// at the field. -func (s *TermvectorsService) PerFieldAnalyzer(perFieldAnalyzer map[string]string) *TermvectorsService { - s.perFieldAnalyzer = perFieldAnalyzer - return s -} - -// Offsets specifies if term offsets should be returned. -func (s *TermvectorsService) Offsets(offsets bool) *TermvectorsService { - s.offsets = &offsets - return s -} - -// Parent id of documents. -func (s *TermvectorsService) Parent(parent string) *TermvectorsService { - s.parent = parent - return s -} - -// Payloads specifies if term payloads should be returned. -func (s *TermvectorsService) Payloads(payloads bool) *TermvectorsService { - s.payloads = &payloads - return s -} - -// Positions specifies if term positions should be returned. -func (s *TermvectorsService) Positions(positions bool) *TermvectorsService { - s.positions = &positions - return s -} - -// Preference specify the node or shard the operation -// should be performed on (default: random). -func (s *TermvectorsService) Preference(preference string) *TermvectorsService { - s.preference = preference - return s -} - -// Realtime specifies if request is real-time as opposed to -// near-real-time (default: true). -func (s *TermvectorsService) Realtime(realtime bool) *TermvectorsService { - s.realtime = &realtime - return s -} - -// Routing is a specific routing value. -func (s *TermvectorsService) Routing(routing string) *TermvectorsService { - s.routing = routing - return s -} - -// TermStatistics specifies if total term frequency and document frequency -// should be returned. -func (s *TermvectorsService) TermStatistics(termStatistics bool) *TermvectorsService { - s.termStatistics = &termStatistics - return s -} - -// Version an explicit version number for concurrency control. -func (s *TermvectorsService) Version(version interface{}) *TermvectorsService { - s.version = version - return s -} - -// VersionType specifies a version type ("internal", "external", or "external_gte"). -func (s *TermvectorsService) VersionType(versionType string) *TermvectorsService { - s.versionType = versionType - return s -} - -// BodyJson defines the body parameters. See documentation. -func (s *TermvectorsService) BodyJson(body interface{}) *TermvectorsService { - s.bodyJson = body - return s -} - -// BodyString defines the body parameters as a string. See documentation. -func (s *TermvectorsService) BodyString(body string) *TermvectorsService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *TermvectorsService) buildURL() (string, url.Values, error) { - var pathParam = map[string]string{ - "index": s.index, - } - path := "/{index}" - var err error - - if s.typ != "" { - pathParam["type"] = s.typ - path += "/{type}" - } else { - path += "/_termvectors" - } - if s.id != "" { - pathParam["id"] = s.id - path += "/{id}" - } - if s.typ != "" { - path += "/_termvectors" - } - - path, err = uritemplates.Expand(path, pathParam) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.dfs; v != nil { - params.Set("dfs", fmt.Sprint(*v)) - } - if v := s.fieldStatistics; v != nil { - params.Set("field_statistics", fmt.Sprint(*v)) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if v := s.offsets; v != nil { - params.Set("offsets", fmt.Sprint(*v)) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if v := s.payloads; v != nil { - params.Set("payloads", fmt.Sprint(*v)) - } - if v := s.positions; v != nil { - params.Set("positions", fmt.Sprint(*v)) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if v := s.realtime; v != nil { - params.Set("realtime", fmt.Sprint(*v)) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if v := s.termStatistics; v != nil { - params.Set("term_statistics", fmt.Sprint(*v)) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%v", s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *TermvectorsService) Validate() error { - var invalid []string - if s.index == "" { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *TermvectorsService) Do(ctx context.Context) (*TermvectorsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else if s.bodyString != "" { - body = s.bodyString - } else { - data := make(map[string]interface{}) - if s.doc != nil { - data["doc"] = s.doc - } - if len(s.perFieldAnalyzer) > 0 { - data["per_field_analyzer"] = s.perFieldAnalyzer - } - if s.filter != nil { - src, err := s.filter.Source() - if err != nil { - return nil, err - } - data["filter"] = src - } - if len(data) > 0 { - body = data - } - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(TermvectorsResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// -- Filter settings -- - -// TermvectorsFilterSettings adds additional filters to a Termsvector request. -// It allows to filter terms based on their tf-idf scores. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-termvectors.html#_terms_filtering -// for more information. -type TermvectorsFilterSettings struct { - maxNumTerms *int64 - minTermFreq *int64 - maxTermFreq *int64 - minDocFreq *int64 - maxDocFreq *int64 - minWordLength *int64 - maxWordLength *int64 -} - -// NewTermvectorsFilterSettings creates and initializes a new TermvectorsFilterSettings struct. -func NewTermvectorsFilterSettings() *TermvectorsFilterSettings { - return &TermvectorsFilterSettings{} -} - -// MaxNumTerms specifies the maximum number of terms the must be returned per field. -func (fs *TermvectorsFilterSettings) MaxNumTerms(value int64) *TermvectorsFilterSettings { - fs.maxNumTerms = &value - return fs -} - -// MinTermFreq ignores words with less than this frequency in the source doc. -func (fs *TermvectorsFilterSettings) MinTermFreq(value int64) *TermvectorsFilterSettings { - fs.minTermFreq = &value - return fs -} - -// MaxTermFreq ignores words with more than this frequency in the source doc. -func (fs *TermvectorsFilterSettings) MaxTermFreq(value int64) *TermvectorsFilterSettings { - fs.maxTermFreq = &value - return fs -} - -// MinDocFreq ignores terms which do not occur in at least this many docs. -func (fs *TermvectorsFilterSettings) MinDocFreq(value int64) *TermvectorsFilterSettings { - fs.minDocFreq = &value - return fs -} - -// MaxDocFreq ignores terms which occur in more than this many docs. -func (fs *TermvectorsFilterSettings) MaxDocFreq(value int64) *TermvectorsFilterSettings { - fs.maxDocFreq = &value - return fs -} - -// MinWordLength specifies the minimum word length below which words will be ignored. -func (fs *TermvectorsFilterSettings) MinWordLength(value int64) *TermvectorsFilterSettings { - fs.minWordLength = &value - return fs -} - -// MaxWordLength specifies the maximum word length above which words will be ignored. -func (fs *TermvectorsFilterSettings) MaxWordLength(value int64) *TermvectorsFilterSettings { - fs.maxWordLength = &value - return fs -} - -// Source returns JSON for the query. -func (fs *TermvectorsFilterSettings) Source() (interface{}, error) { - source := make(map[string]interface{}) - if fs.maxNumTerms != nil { - source["max_num_terms"] = *fs.maxNumTerms - } - if fs.minTermFreq != nil { - source["min_term_freq"] = *fs.minTermFreq - } - if fs.maxTermFreq != nil { - source["max_term_freq"] = *fs.maxTermFreq - } - if fs.minDocFreq != nil { - source["min_doc_freq"] = *fs.minDocFreq - } - if fs.maxDocFreq != nil { - source["max_doc_freq"] = *fs.maxDocFreq - } - if fs.minWordLength != nil { - source["min_word_length"] = *fs.minWordLength - } - if fs.maxWordLength != nil { - source["max_word_length"] = *fs.maxWordLength - } - return source, nil -} - -// -- Response types -- - -type TokenInfo struct { - StartOffset int64 `json:"start_offset"` - EndOffset int64 `json:"end_offset"` - Position int64 `json:"position"` - Payload string `json:"payload"` -} - -type TermsInfo struct { - DocFreq int64 `json:"doc_freq"` - Score float64 `json:"score"` - TermFreq int64 `json:"term_freq"` - Ttf int64 `json:"ttf"` - Tokens []TokenInfo `json:"tokens"` -} - -type FieldStatistics struct { - DocCount int64 `json:"doc_count"` - SumDocFreq int64 `json:"sum_doc_freq"` - SumTtf int64 `json:"sum_ttf"` -} - -type TermVectorsFieldInfo struct { - FieldStatistics FieldStatistics `json:"field_statistics"` - Terms map[string]TermsInfo `json:"terms"` -} - -// TermvectorsResponse is the response of TermvectorsService.Do. -type TermvectorsResponse struct { - Index string `json:"_index"` - Type string `json:"_type"` - Id string `json:"_id,omitempty"` - Version int `json:"_version"` - Found bool `json:"found"` - Took int64 `json:"took"` - TermVectors map[string]TermVectorsFieldInfo `json:"term_vectors"` -} diff --git a/vendor/github.com/olivere/elastic/v7/update.go b/vendor/github.com/olivere/elastic/v7/update.go deleted file mode 100644 index 468032e..0000000 --- a/vendor/github.com/olivere/elastic/v7/update.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// UpdateService updates a document in Elasticsearch. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-update.html -// for details. -type UpdateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index string - typ string - id string - routing string - parent string - script *Script - fields []string - fsc *FetchSourceContext - version *int64 - versionType string - retryOnConflict *int - refresh string - waitForActiveShards string - upsert interface{} - scriptedUpsert *bool - docAsUpsert *bool - detectNoop *bool - doc interface{} - timeout string - ifSeqNo *int64 - ifPrimaryTerm *int64 -} - -// NewUpdateService creates the service to update documents in Elasticsearch. -func NewUpdateService(client *Client) *UpdateService { - return &UpdateService{ - client: client, - typ: "_doc", - fields: make([]string, 0), - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *UpdateService) Pretty(pretty bool) *UpdateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *UpdateService) Human(human bool) *UpdateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *UpdateService) ErrorTrace(errorTrace bool) *UpdateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *UpdateService) FilterPath(filterPath ...string) *UpdateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *UpdateService) Header(name string, value string) *UpdateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *UpdateService) Headers(headers http.Header) *UpdateService { - s.headers = headers - return s -} - -// Index is the name of the Elasticsearch index (required). -func (s *UpdateService) Index(name string) *UpdateService { - s.index = name - return s -} - -// Type is the type of the document. -// -// Deprecated: Types are in the process of being removed. -func (s *UpdateService) Type(typ string) *UpdateService { - s.typ = typ - return s -} - -// Id is the identifier of the document to update (required). -func (s *UpdateService) Id(id string) *UpdateService { - s.id = id - return s -} - -// Routing specifies a specific routing value. -func (s *UpdateService) Routing(routing string) *UpdateService { - s.routing = routing - return s -} - -// Parent sets the id of the parent document. -func (s *UpdateService) Parent(parent string) *UpdateService { - s.parent = parent - return s -} - -// Script is the script definition. -func (s *UpdateService) Script(script *Script) *UpdateService { - s.script = script - return s -} - -// RetryOnConflict specifies how many times the operation should be retried -// when a conflict occurs (default: 0). -func (s *UpdateService) RetryOnConflict(retryOnConflict int) *UpdateService { - s.retryOnConflict = &retryOnConflict - return s -} - -// Fields is a list of fields to return in the response. -func (s *UpdateService) Fields(fields ...string) *UpdateService { - s.fields = make([]string, 0, len(fields)) - s.fields = append(s.fields, fields...) - return s -} - -// Version defines the explicit version number for concurrency control. -func (s *UpdateService) Version(version int64) *UpdateService { - s.version = &version - return s -} - -// VersionType is e.g. "internal". -func (s *UpdateService) VersionType(versionType string) *UpdateService { - s.versionType = versionType - return s -} - -// Refresh the index after performing the update. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *UpdateService) Refresh(refresh string) *UpdateService { - s.refresh = refresh - return s -} - -// WaitForActiveShards sets the number of shard copies that must be active before -// proceeding with the update operation. Defaults to 1, meaning the primary shard only. -// Set to `all` for all shard copies, otherwise set to any non-negative value less than -// or equal to the total number of copies for the shard (number of replicas + 1). -func (s *UpdateService) WaitForActiveShards(waitForActiveShards string) *UpdateService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// Doc allows for updating a partial document. -func (s *UpdateService) Doc(doc interface{}) *UpdateService { - s.doc = doc - return s -} - -// Upsert can be used to index the document when it doesn't exist yet. -// Use this e.g. to initialize a document with a default value. -func (s *UpdateService) Upsert(doc interface{}) *UpdateService { - s.upsert = doc - return s -} - -// DocAsUpsert can be used to insert the document if it doesn't already exist. -func (s *UpdateService) DocAsUpsert(docAsUpsert bool) *UpdateService { - s.docAsUpsert = &docAsUpsert - return s -} - -// DetectNoop will instruct Elasticsearch to check if changes will occur -// when updating via Doc. It there aren't any changes, the request will -// turn into a no-op. -func (s *UpdateService) DetectNoop(detectNoop bool) *UpdateService { - s.detectNoop = &detectNoop - return s -} - -// ScriptedUpsert should be set to true if the referenced script -// (defined in Script or ScriptId) should be called to perform an insert. -// The default is false. -func (s *UpdateService) ScriptedUpsert(scriptedUpsert bool) *UpdateService { - s.scriptedUpsert = &scriptedUpsert - return s -} - -// Timeout is an explicit timeout for the operation, e.g. "1000", "1s" or "500ms". -func (s *UpdateService) Timeout(timeout string) *UpdateService { - s.timeout = timeout - return s -} - -// IfSeqNo indicates to only perform the update operation if the last -// operation that has changed the document has the specified sequence number. -func (s *UpdateService) IfSeqNo(seqNo int64) *UpdateService { - s.ifSeqNo = &seqNo - return s -} - -// IfPrimaryTerm indicates to only perform the update operation if the -// last operation that has changed the document has the specified primary term. -func (s *UpdateService) IfPrimaryTerm(primaryTerm int64) *UpdateService { - s.ifPrimaryTerm = &primaryTerm - return s -} - -// FetchSource asks Elasticsearch to return the updated _source in the response. -func (s *UpdateService) FetchSource(fetchSource bool) *UpdateService { - if s.fsc == nil { - s.fsc = NewFetchSourceContext(fetchSource) - } else { - s.fsc.SetFetchSource(fetchSource) - } - return s -} - -// FetchSourceContext indicates that _source should be returned in the response, -// allowing wildcard patterns to be defined via FetchSourceContext. -func (s *UpdateService) FetchSourceContext(fetchSourceContext *FetchSourceContext) *UpdateService { - s.fsc = fetchSourceContext - return s -} - -// url returns the URL part of the document request. -func (s *UpdateService) url() (string, url.Values, error) { - // Build url - var path string - var err error - if s.typ == "" || s.typ == "_doc" { - path, err = uritemplates.Expand("/{index}/_update/{id}", map[string]string{ - "index": s.index, - "id": s.id, - }) - } else { - path, err = uritemplates.Expand("/{index}/{type}/{id}/_update", map[string]string{ - "index": s.index, - "type": s.typ, - "id": s.id, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.routing != "" { - params.Set("routing", s.routing) - } - if s.parent != "" { - params.Set("parent", s.parent) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - if len(s.fields) > 0 { - params.Set("fields", strings.Join(s.fields, ",")) - } - if s.version != nil { - params.Set("version", fmt.Sprintf("%d", *s.version)) - } - if s.versionType != "" { - params.Set("version_type", s.versionType) - } - if s.retryOnConflict != nil { - params.Set("retry_on_conflict", fmt.Sprintf("%v", *s.retryOnConflict)) - } - if v := s.ifSeqNo; v != nil { - params.Set("if_seq_no", fmt.Sprintf("%d", *v)) - } - if v := s.ifPrimaryTerm; v != nil { - params.Set("if_primary_term", fmt.Sprintf("%d", *v)) - } - return path, params, nil -} - -// body returns the body part of the document request. -func (s *UpdateService) body() (interface{}, error) { - source := make(map[string]interface{}) - - if s.script != nil { - src, err := s.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - - if v := s.scriptedUpsert; v != nil { - source["scripted_upsert"] = *v - } - - if s.upsert != nil { - source["upsert"] = s.upsert - } - - if s.doc != nil { - source["doc"] = s.doc - } - if v := s.docAsUpsert; v != nil { - source["doc_as_upsert"] = *v - } - if v := s.detectNoop; v != nil { - source["detect_noop"] = *v - } - if s.fsc != nil { - src, err := s.fsc.Source() - if err != nil { - return nil, err - } - source["_source"] = src - } - - return source, nil -} - -// Do executes the update operation. -func (s *UpdateService) Do(ctx context.Context) (*UpdateResponse, error) { - path, params, err := s.url() - if err != nil { - return nil, err - } - - // Get body of the request - body, err := s.body() - if err != nil { - return nil, err - } - - // Get response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return result - ret := new(UpdateResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// UpdateResponse is the result of updating a document in Elasticsearch. -type UpdateResponse struct { - Index string `json:"_index,omitempty"` - Type string `json:"_type,omitempty"` - Id string `json:"_id,omitempty"` - Version int64 `json:"_version,omitempty"` - Result string `json:"result,omitempty"` - Shards *ShardsInfo `json:"_shards,omitempty"` - SeqNo int64 `json:"_seq_no,omitempty"` - PrimaryTerm int64 `json:"_primary_term,omitempty"` - Status int `json:"status,omitempty"` - ForcedRefresh bool `json:"forced_refresh,omitempty"` - GetResult *GetResult `json:"get,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/update_by_query.go b/vendor/github.com/olivere/elastic/v7/update_by_query.go deleted file mode 100644 index 8fdffd3..0000000 --- a/vendor/github.com/olivere/elastic/v7/update_by_query.go +++ /dev/null @@ -1,773 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// UpdateByQueryService is documented at https://www.elastic.co/guide/en/elasticsearch/plugins/master/plugins-reindex.html. -type UpdateByQueryService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - typ []string - script *Script - query Query - body interface{} - xSource []string - xSourceExclude []string - xSourceInclude []string - allowNoIndices *bool - analyzeWildcard *bool - analyzer string - conflicts string - defaultOperator string - docvalueFields []string - df string - expandWildcards string - explain *bool - fielddataFields []string - from *int - ignoreUnavailable *bool - lenient *bool - lowercaseExpandedTerms *bool - pipeline string - preference string - q string - refresh string - requestCache *bool - requestsPerSecond *int - routing []string - scroll string - scrollSize *int - searchTimeout string - searchType string - size *int - slices interface{} - sort []string - stats []string - storedFields []string - suggestField string - suggestMode string - suggestSize *int - suggestText string - terminateAfter *int - timeout string - trackScores *bool - version *bool - versionType *bool - waitForActiveShards string - waitForCompletion *bool -} - -// NewUpdateByQueryService creates a new UpdateByQueryService. -func NewUpdateByQueryService(client *Client) *UpdateByQueryService { - return &UpdateByQueryService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *UpdateByQueryService) Pretty(pretty bool) *UpdateByQueryService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *UpdateByQueryService) Human(human bool) *UpdateByQueryService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *UpdateByQueryService) ErrorTrace(errorTrace bool) *UpdateByQueryService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *UpdateByQueryService) FilterPath(filterPath ...string) *UpdateByQueryService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *UpdateByQueryService) Header(name string, value string) *UpdateByQueryService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *UpdateByQueryService) Headers(headers http.Header) *UpdateByQueryService { - s.headers = headers - return s -} - -// Index is a list of index names to search; use `_all` or empty string to -// perform the operation on all indices. -func (s *UpdateByQueryService) Index(index ...string) *UpdateByQueryService { - s.index = append(s.index, index...) - return s -} - -// Type is a list of document types to search; leave empty to perform -// the operation on all types. -func (s *UpdateByQueryService) Type(typ ...string) *UpdateByQueryService { - s.typ = append(s.typ, typ...) - return s -} - -// Script sets an update script. -func (s *UpdateByQueryService) Script(script *Script) *UpdateByQueryService { - s.script = script - return s -} - -// Body specifies the body of the request. It overrides data being specified via -// SearchService or Script. -func (s *UpdateByQueryService) Body(body string) *UpdateByQueryService { - s.body = body - return s -} - -// XSource is true or false to return the _source field or not, -// or a list of fields to return. -func (s *UpdateByQueryService) XSource(xSource ...string) *UpdateByQueryService { - s.xSource = append(s.xSource, xSource...) - return s -} - -// XSourceExclude represents a list of fields to exclude from the returned _source field. -func (s *UpdateByQueryService) XSourceExclude(xSourceExclude ...string) *UpdateByQueryService { - s.xSourceExclude = append(s.xSourceExclude, xSourceExclude...) - return s -} - -// XSourceInclude represents a list of fields to extract and return from the _source field. -func (s *UpdateByQueryService) XSourceInclude(xSourceInclude ...string) *UpdateByQueryService { - s.xSourceInclude = append(s.xSourceInclude, xSourceInclude...) - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices expression -// resolves into no concrete indices. (This includes `_all` string or when -// no indices have been specified). -func (s *UpdateByQueryService) AllowNoIndices(allowNoIndices bool) *UpdateByQueryService { - s.allowNoIndices = &allowNoIndices - return s -} - -// AnalyzeWildcard specifies whether wildcard and prefix queries should be -// analyzed (default: false). -func (s *UpdateByQueryService) AnalyzeWildcard(analyzeWildcard bool) *UpdateByQueryService { - s.analyzeWildcard = &analyzeWildcard - return s -} - -// Analyzer specifies the analyzer to use for the query string. -func (s *UpdateByQueryService) Analyzer(analyzer string) *UpdateByQueryService { - s.analyzer = analyzer - return s -} - -// Conflicts indicates what to do when the process detects version conflicts. -// Possible values are "proceed" and "abort". -func (s *UpdateByQueryService) Conflicts(conflicts string) *UpdateByQueryService { - s.conflicts = conflicts - return s -} - -// AbortOnVersionConflict aborts the request on version conflicts. -// It is an alias to setting Conflicts("abort"). -func (s *UpdateByQueryService) AbortOnVersionConflict() *UpdateByQueryService { - s.conflicts = "abort" - return s -} - -// ProceedOnVersionConflict aborts the request on version conflicts. -// It is an alias to setting Conflicts("proceed"). -func (s *UpdateByQueryService) ProceedOnVersionConflict() *UpdateByQueryService { - s.conflicts = "proceed" - return s -} - -// DefaultOperator is the default operator for query string query (AND or OR). -func (s *UpdateByQueryService) DefaultOperator(defaultOperator string) *UpdateByQueryService { - s.defaultOperator = defaultOperator - return s -} - -// DF specifies the field to use as default where no field prefix is given in the query string. -func (s *UpdateByQueryService) DF(df string) *UpdateByQueryService { - s.df = df - return s -} - -// DocvalueFields specifies the list of fields to return as the docvalue representation of a field for each hit. -func (s *UpdateByQueryService) DocvalueFields(docvalueFields ...string) *UpdateByQueryService { - s.docvalueFields = docvalueFields - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *UpdateByQueryService) ExpandWildcards(expandWildcards string) *UpdateByQueryService { - s.expandWildcards = expandWildcards - return s -} - -// Explain specifies whether to return detailed information about score -// computation as part of a hit. -func (s *UpdateByQueryService) Explain(explain bool) *UpdateByQueryService { - s.explain = &explain - return s -} - -// FielddataFields is a list of fields to return as the field data -// representation of a field for each hit. -func (s *UpdateByQueryService) FielddataFields(fielddataFields ...string) *UpdateByQueryService { - s.fielddataFields = append(s.fielddataFields, fielddataFields...) - return s -} - -// From is the starting offset (default: 0). -func (s *UpdateByQueryService) From(from int) *UpdateByQueryService { - s.from = &from - return s -} - -// IgnoreUnavailable indicates whether specified concrete indices should be -// ignored when unavailable (missing or closed). -func (s *UpdateByQueryService) IgnoreUnavailable(ignoreUnavailable bool) *UpdateByQueryService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// Lenient specifies whether format-based query failures -// (such as providing text to a numeric field) should be ignored. -func (s *UpdateByQueryService) Lenient(lenient bool) *UpdateByQueryService { - s.lenient = &lenient - return s -} - -// LowercaseExpandedTerms specifies whether query terms should be lowercased. -func (s *UpdateByQueryService) LowercaseExpandedTerms(lowercaseExpandedTerms bool) *UpdateByQueryService { - s.lowercaseExpandedTerms = &lowercaseExpandedTerms - return s -} - -// Pipeline specifies the ingest pipeline to set on index requests made by this action (default: none). -func (s *UpdateByQueryService) Pipeline(pipeline string) *UpdateByQueryService { - s.pipeline = pipeline - return s -} - -// Preference specifies the node or shard the operation should be performed on -// (default: random). -func (s *UpdateByQueryService) Preference(preference string) *UpdateByQueryService { - s.preference = preference - return s -} - -// Q specifies the query in the Lucene query string syntax. -func (s *UpdateByQueryService) Q(q string) *UpdateByQueryService { - s.q = q - return s -} - -// Query sets a query definition using the Query DSL. -func (s *UpdateByQueryService) Query(query Query) *UpdateByQueryService { - s.query = query - return s -} - -// Refresh indicates whether the effected indexes should be refreshed. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-refresh.html -// for details. -func (s *UpdateByQueryService) Refresh(refresh string) *UpdateByQueryService { - s.refresh = refresh - return s -} - -// RequestCache specifies if request cache should be used for this request -// or not, defaults to index level setting. -func (s *UpdateByQueryService) RequestCache(requestCache bool) *UpdateByQueryService { - s.requestCache = &requestCache - return s -} - -// RequestsPerSecond sets the throttle on this request in sub-requests per second. -// -1 means set no throttle as does "unlimited" which is the only non-float this accepts. -func (s *UpdateByQueryService) RequestsPerSecond(requestsPerSecond int) *UpdateByQueryService { - s.requestsPerSecond = &requestsPerSecond - return s -} - -// Routing is a list of specific routing values. -func (s *UpdateByQueryService) Routing(routing ...string) *UpdateByQueryService { - s.routing = append(s.routing, routing...) - return s -} - -// Scroll specifies how long a consistent view of the index should be maintained -// for scrolled search. -func (s *UpdateByQueryService) Scroll(scroll string) *UpdateByQueryService { - s.scroll = scroll - return s -} - -// ScrollSize is the size on the scroll request powering the update_by_query. -func (s *UpdateByQueryService) ScrollSize(scrollSize int) *UpdateByQueryService { - s.scrollSize = &scrollSize - return s -} - -// SearchTimeout defines an explicit timeout for each search request. -// Defaults to no timeout. -func (s *UpdateByQueryService) SearchTimeout(searchTimeout string) *UpdateByQueryService { - s.searchTimeout = searchTimeout - return s -} - -// SearchType is the search operation type. Possible values are -// "query_then_fetch" and "dfs_query_then_fetch". -func (s *UpdateByQueryService) SearchType(searchType string) *UpdateByQueryService { - s.searchType = searchType - return s -} - -// Size represents the number of hits to return (default: 10). -func (s *UpdateByQueryService) Size(size int) *UpdateByQueryService { - s.size = &size - return s -} - -// Slices represents the number of slices (default: 1). -// It used to be a number, but can be set to "auto" as of 6.7. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-update-by-query.html#docs-update-by-query-slice -// for details. -func (s *UpdateByQueryService) Slices(slices interface{}) *UpdateByQueryService { - s.slices = slices - return s -} - -// Sort is a list of : pairs. -func (s *UpdateByQueryService) Sort(sort ...string) *UpdateByQueryService { - s.sort = append(s.sort, sort...) - return s -} - -// SortByField adds a sort order. -func (s *UpdateByQueryService) SortByField(field string, ascending bool) *UpdateByQueryService { - if ascending { - s.sort = append(s.sort, fmt.Sprintf("%s:asc", field)) - } else { - s.sort = append(s.sort, fmt.Sprintf("%s:desc", field)) - } - return s -} - -// Stats specifies specific tag(s) of the request for logging and statistical purposes. -func (s *UpdateByQueryService) Stats(stats ...string) *UpdateByQueryService { - s.stats = append(s.stats, stats...) - return s -} - -// StoredFields specifies the list of stored fields to return as part of a hit. -func (s *UpdateByQueryService) StoredFields(storedFields ...string) *UpdateByQueryService { - s.storedFields = storedFields - return s -} - -// SuggestField specifies which field to use for suggestions. -func (s *UpdateByQueryService) SuggestField(suggestField string) *UpdateByQueryService { - s.suggestField = suggestField - return s -} - -// SuggestMode specifies the suggest mode. Possible values are -// "missing", "popular", and "always". -func (s *UpdateByQueryService) SuggestMode(suggestMode string) *UpdateByQueryService { - s.suggestMode = suggestMode - return s -} - -// SuggestSize specifies how many suggestions to return in response. -func (s *UpdateByQueryService) SuggestSize(suggestSize int) *UpdateByQueryService { - s.suggestSize = &suggestSize - return s -} - -// SuggestText specifies the source text for which the suggestions should be returned. -func (s *UpdateByQueryService) SuggestText(suggestText string) *UpdateByQueryService { - s.suggestText = suggestText - return s -} - -// TerminateAfter indicates the maximum number of documents to collect -// for each shard, upon reaching which the query execution will terminate early. -func (s *UpdateByQueryService) TerminateAfter(terminateAfter int) *UpdateByQueryService { - s.terminateAfter = &terminateAfter - return s -} - -// Timeout is the time each individual bulk request should wait for shards -// that are unavailable. -func (s *UpdateByQueryService) Timeout(timeout string) *UpdateByQueryService { - s.timeout = timeout - return s -} - -// TimeoutInMillis sets the timeout in milliseconds. -func (s *UpdateByQueryService) TimeoutInMillis(timeoutInMillis int) *UpdateByQueryService { - s.timeout = fmt.Sprintf("%dms", timeoutInMillis) - return s -} - -// TrackScores indicates whether to calculate and return scores even if -// they are not used for sorting. -func (s *UpdateByQueryService) TrackScores(trackScores bool) *UpdateByQueryService { - s.trackScores = &trackScores - return s -} - -// Version specifies whether to return document version as part of a hit. -func (s *UpdateByQueryService) Version(version bool) *UpdateByQueryService { - s.version = &version - return s -} - -// VersionType indicates if the document increment the version number (internal) -// on hit or not (reindex). -func (s *UpdateByQueryService) VersionType(versionType bool) *UpdateByQueryService { - s.versionType = &versionType - return s -} - -// WaitForActiveShards sets the number of shard copies that must be active before proceeding -// with the update by query operation. Defaults to 1, meaning the primary shard only. -// Set to `all` for all shard copies, otherwise set to any non-negative value less than or equal -// to the total number of copies for the shard (number of replicas + 1). -func (s *UpdateByQueryService) WaitForActiveShards(waitForActiveShards string) *UpdateByQueryService { - s.waitForActiveShards = waitForActiveShards - return s -} - -// WaitForCompletion indicates if the request should block until the reindex is complete. -func (s *UpdateByQueryService) WaitForCompletion(waitForCompletion bool) *UpdateByQueryService { - s.waitForCompletion = &waitForCompletion - return s -} - -// buildURL builds the URL for the operation. -func (s *UpdateByQueryService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.typ) > 0 { - path, err = uritemplates.Expand("/{index}/{type}/_update_by_query", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - } else { - path, err = uritemplates.Expand("/{index}/_update_by_query", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if len(s.xSource) > 0 { - params.Set("_source", strings.Join(s.xSource, ",")) - } - if len(s.xSourceExclude) > 0 { - params.Set("_source_excludes", strings.Join(s.xSourceExclude, ",")) - } - if len(s.xSourceInclude) > 0 { - params.Set("_source_includes", strings.Join(s.xSourceInclude, ",")) - } - if s.allowNoIndices != nil { - params.Set("allow_no_indices", fmt.Sprintf("%v", *s.allowNoIndices)) - } - if s.analyzer != "" { - params.Set("analyzer", s.analyzer) - } - if v := s.analyzeWildcard; v != nil { - params.Set("analyze_wildcard", fmt.Sprint(*v)) - } - if s.conflicts != "" { - params.Set("conflicts", s.conflicts) - } - if s.defaultOperator != "" { - params.Set("default_operator", s.defaultOperator) - } - if s.df != "" { - params.Set("df", s.df) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if v := s.explain; v != nil { - params.Set("explain", fmt.Sprint(*v)) - } - if len(s.storedFields) > 0 { - params.Set("stored_fields", strings.Join(s.storedFields, ",")) - } - if len(s.docvalueFields) > 0 { - params.Set("docvalue_fields", strings.Join(s.docvalueFields, ",")) - } - if len(s.fielddataFields) > 0 { - params.Set("fielddata_fields", strings.Join(s.fielddataFields, ",")) - } - if s.from != nil { - params.Set("from", fmt.Sprintf("%d", *s.from)) - } - if v := s.ignoreUnavailable; v != nil { - params.Set("ignore_unavailable", fmt.Sprint(*v)) - } - if v := s.lenient; v != nil { - params.Set("lenient", fmt.Sprint(*v)) - } - if v := s.lowercaseExpandedTerms; v != nil { - params.Set("lowercase_expanded_terms", fmt.Sprint(*v)) - } - if s.pipeline != "" { - params.Set("pipeline", s.pipeline) - } - if s.preference != "" { - params.Set("preference", s.preference) - } - if s.q != "" { - params.Set("q", s.q) - } - if s.refresh != "" { - params.Set("refresh", s.refresh) - } - if v := s.requestCache; v != nil { - params.Set("request_cache", fmt.Sprint(*v)) - } - if len(s.routing) > 0 { - params.Set("routing", strings.Join(s.routing, ",")) - } - if s.scroll != "" { - params.Set("scroll", s.scroll) - } - if s.scrollSize != nil { - params.Set("scroll_size", fmt.Sprintf("%d", *s.scrollSize)) - } - if s.searchTimeout != "" { - params.Set("search_timeout", s.searchTimeout) - } - if s.searchType != "" { - params.Set("search_type", s.searchType) - } - if s.size != nil { - params.Set("size", fmt.Sprintf("%d", *s.size)) - } - if s.slices != nil { - params.Set("slices", fmt.Sprintf("%v", s.slices)) - } - if len(s.sort) > 0 { - params.Set("sort", strings.Join(s.sort, ",")) - } - if len(s.stats) > 0 { - params.Set("stats", strings.Join(s.stats, ",")) - } - if s.suggestField != "" { - params.Set("suggest_field", s.suggestField) - } - if s.suggestMode != "" { - params.Set("suggest_mode", s.suggestMode) - } - if s.suggestSize != nil { - params.Set("suggest_size", fmt.Sprintf("%v", *s.suggestSize)) - } - if s.suggestText != "" { - params.Set("suggest_text", s.suggestText) - } - if s.terminateAfter != nil { - params.Set("terminate_after", fmt.Sprintf("%v", *s.terminateAfter)) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if v := s.trackScores; v != nil { - params.Set("track_scores", fmt.Sprint(*v)) - } - if v := s.version; v != nil { - params.Set("version", fmt.Sprint(*v)) - } - if v := s.versionType; v != nil { - params.Set("version_type", fmt.Sprint(*v)) - } - if s.waitForActiveShards != "" { - params.Set("wait_for_active_shards", s.waitForActiveShards) - } - if v := s.waitForCompletion; v != nil { - params.Set("wait_for_completion", fmt.Sprint(*v)) - } - if s.requestsPerSecond != nil { - params.Set("requests_per_second", fmt.Sprintf("%v", *s.requestsPerSecond)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *UpdateByQueryService) Validate() error { - var invalid []string - if len(s.index) == 0 { - invalid = append(invalid, "Index") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// getBody returns the body part of the document request. -func (s *UpdateByQueryService) getBody() (interface{}, error) { - if s.body != nil { - return s.body, nil - } - source := make(map[string]interface{}) - if s.script != nil { - src, err := s.script.Source() - if err != nil { - return nil, err - } - source["script"] = src - } - if s.query != nil { - src, err := s.query.Source() - if err != nil { - return nil, err - } - source["query"] = src - } - return source, nil -} - -// Do executes the operation. -func (s *UpdateByQueryService) Do(ctx context.Context) (*BulkIndexByScrollResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - body, err := s.getBody() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - IgnoreErrors: []int{http.StatusConflict}, - }) - if err != nil { - return nil, err - } - - // Return operation response (BulkIndexByScrollResponse is defined in DeleteByQuery) - ret := new(BulkIndexByScrollResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// DoAsync executes the update-by-query operation asynchronously by starting a new task. -// Callers need to use the Task Management API to watch the outcome of the reindexing -// operation. -func (s *UpdateByQueryService) DoAsync(ctx context.Context) (*StartTaskResult, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // DoAsync only makes sense with WaitForCompletion set to true - if s.waitForCompletion != nil && *s.waitForCompletion { - return nil, fmt.Errorf("cannot start a task with WaitForCompletion set to true") - } - f := false - s.waitForCompletion = &f - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - body, err := s.getBody() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - IgnoreErrors: []int{http.StatusConflict}, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(StartTaskResult) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} diff --git a/vendor/github.com/olivere/elastic/v7/uritemplates/LICENSE b/vendor/github.com/olivere/elastic/v7/uritemplates/LICENSE deleted file mode 100644 index de9c88c..0000000 --- a/vendor/github.com/olivere/elastic/v7/uritemplates/LICENSE +++ /dev/null @@ -1,18 +0,0 @@ -Copyright (c) 2013 Joshua Tacoma - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/olivere/elastic/v7/uritemplates/uritemplates.go b/vendor/github.com/olivere/elastic/v7/uritemplates/uritemplates.go deleted file mode 100644 index fdb9cb4..0000000 --- a/vendor/github.com/olivere/elastic/v7/uritemplates/uritemplates.go +++ /dev/null @@ -1,359 +0,0 @@ -// Copyright 2013 Joshua Tacoma. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uritemplates is a level 4 implementation of RFC 6570 (URI -// Template, http://tools.ietf.org/html/rfc6570). -// -// To use uritemplates, parse a template string and expand it with a value -// map: -// -// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}") -// values := make(map[string]interface{}) -// values["user"] = "jtacoma" -// values["repo"] = "uritemplates" -// expanded, _ := template.Expand(values) -// fmt.Printf(expanded) -// -package uritemplates - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]") - reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]") - validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$") - hex = []byte("0123456789ABCDEF") -) - -func pctEncode(src []byte) []byte { - dst := make([]byte, len(src)*3) - for i, b := range src { - buf := dst[i*3 : i*3+3] - buf[0] = 0x25 - buf[1] = hex[b/16] - buf[2] = hex[b%16] - } - return dst -} - -func escape(s string, allowReserved bool) (escaped string) { - if allowReserved { - escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode)) - } else { - escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode)) - } - return escaped -} - -// A UriTemplate is a parsed representation of a URI template. -type UriTemplate struct { - raw string - parts []templatePart -} - -// Parse parses a URI template string into a UriTemplate object. -func Parse(rawtemplate string) (template *UriTemplate, err error) { - template = new(UriTemplate) - template.raw = rawtemplate - split := strings.Split(rawtemplate, "{") - template.parts = make([]templatePart, len(split)*2-1) - for i, s := range split { - if i == 0 { - if strings.Contains(s, "}") { - err = errors.New("unexpected }") - break - } - template.parts[i].raw = s - } else { - subsplit := strings.Split(s, "}") - if len(subsplit) != 2 { - err = errors.New("malformed template") - break - } - expression := subsplit[0] - template.parts[i*2-1], err = parseExpression(expression) - if err != nil { - break - } - template.parts[i*2].raw = subsplit[1] - } - } - if err != nil { - template = nil - } - return template, err -} - -type templatePart struct { - raw string - terms []templateTerm - first string - sep string - named bool - ifemp string - allowReserved bool -} - -type templateTerm struct { - name string - explode bool - truncate int -} - -func parseExpression(expression string) (result templatePart, err error) { - switch expression[0] { - case '+': - result.sep = "," - result.allowReserved = true - expression = expression[1:] - case '.': - result.first = "." - result.sep = "." - expression = expression[1:] - case '/': - result.first = "/" - result.sep = "/" - expression = expression[1:] - case ';': - result.first = ";" - result.sep = ";" - result.named = true - expression = expression[1:] - case '?': - result.first = "?" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '&': - result.first = "&" - result.sep = "&" - result.named = true - result.ifemp = "=" - expression = expression[1:] - case '#': - result.first = "#" - result.sep = "," - result.allowReserved = true - expression = expression[1:] - default: - result.sep = "," - } - rawterms := strings.Split(expression, ",") - result.terms = make([]templateTerm, len(rawterms)) - for i, raw := range rawterms { - result.terms[i], err = parseTerm(raw) - if err != nil { - break - } - } - return result, err -} - -func parseTerm(term string) (result templateTerm, err error) { - if strings.HasSuffix(term, "*") { - result.explode = true - term = term[:len(term)-1] - } - split := strings.Split(term, ":") - if len(split) == 1 { - result.name = term - } else if len(split) == 2 { - result.name = split[0] - var parsed int64 - parsed, err = strconv.ParseInt(split[1], 10, 0) - result.truncate = int(parsed) - } else { - err = errors.New("multiple colons in same term") - } - if !validname.MatchString(result.name) { - err = errors.New("not a valid name: " + result.name) - } - if result.explode && result.truncate > 0 { - err = errors.New("both explode and prefix modifers on same term") - } - return result, err -} - -// Expand expands a URI template with a set of values to produce a string. -func (self *UriTemplate) Expand(value interface{}) (string, error) { - values, ismap := value.(map[string]interface{}) - if !ismap { - if m, ismap := struct2map(value); !ismap { - return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.") - } else { - return self.Expand(m) - } - } - var buf bytes.Buffer - for _, p := range self.parts { - err := p.expand(&buf, values) - if err != nil { - return "", err - } - } - return buf.String(), nil -} - -func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error { - if len(self.raw) > 0 { - buf.WriteString(self.raw) - return nil - } - var zeroLen = buf.Len() - buf.WriteString(self.first) - var firstLen = buf.Len() - for _, term := range self.terms { - value, exists := values[term.name] - if !exists { - continue - } - if buf.Len() != firstLen { - buf.WriteString(self.sep) - } - switch v := value.(type) { - case string: - self.expandString(buf, term, v) - case []interface{}: - self.expandArray(buf, term, v) - case map[string]interface{}: - if term.truncate > 0 { - return errors.New("cannot truncate a map expansion") - } - self.expandMap(buf, term, v) - default: - if m, ismap := struct2map(value); ismap { - if term.truncate > 0 { - return errors.New("cannot truncate a map expansion") - } - self.expandMap(buf, term, m) - } else { - str := fmt.Sprintf("%v", value) - self.expandString(buf, term, str) - } - } - } - if buf.Len() == firstLen { - original := buf.Bytes()[:zeroLen] - buf.Reset() - buf.Write(original) - } - return nil -} - -func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) { - if self.named { - buf.WriteString(name) - if empty { - buf.WriteString(self.ifemp) - } else { - buf.WriteString("=") - } - } -} - -func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) { - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - self.expandName(buf, t.name, len(s) == 0) - buf.WriteString(escape(s, self.allowReserved)) -} - -func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) { - if len(a) == 0 { - return - } else if !t.explode { - self.expandName(buf, t.name, false) - } - for i, value := range a { - if t.explode && i > 0 { - buf.WriteString(self.sep) - } else if i > 0 { - buf.WriteString(",") - } - var s string - switch v := value.(type) { - case string: - s = v - default: - s = fmt.Sprintf("%v", v) - } - if len(s) > t.truncate && t.truncate > 0 { - s = s[:t.truncate] - } - if self.named && t.explode { - self.expandName(buf, t.name, len(s) == 0) - } - buf.WriteString(escape(s, self.allowReserved)) - } -} - -func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) { - if len(m) == 0 { - return - } - if !t.explode { - self.expandName(buf, t.name, len(m) == 0) - } - var firstLen = buf.Len() - for k, value := range m { - if firstLen != buf.Len() { - if t.explode { - buf.WriteString(self.sep) - } else { - buf.WriteString(",") - } - } - var s string - switch v := value.(type) { - case string: - s = v - default: - s = fmt.Sprintf("%v", v) - } - if t.explode { - buf.WriteString(escape(k, self.allowReserved)) - buf.WriteRune('=') - buf.WriteString(escape(s, self.allowReserved)) - } else { - buf.WriteString(escape(k, self.allowReserved)) - buf.WriteRune(',') - buf.WriteString(escape(s, self.allowReserved)) - } - } -} - -func struct2map(v interface{}) (map[string]interface{}, bool) { - value := reflect.ValueOf(v) - switch value.Type().Kind() { - case reflect.Ptr: - return struct2map(value.Elem().Interface()) - case reflect.Struct: - m := make(map[string]interface{}) - for i := 0; i < value.NumField(); i++ { - tag := value.Type().Field(i).Tag - var name string - if strings.Contains(string(tag), ":") { - name = tag.Get("uri") - } else { - name = strings.TrimSpace(string(tag)) - } - if len(name) == 0 { - name = value.Type().Field(i).Name - } - m[name] = value.Field(i).Interface() - } - return m, true - } - return nil, false -} diff --git a/vendor/github.com/olivere/elastic/v7/uritemplates/utils.go b/vendor/github.com/olivere/elastic/v7/uritemplates/utils.go deleted file mode 100644 index 399ef46..0000000 --- a/vendor/github.com/olivere/elastic/v7/uritemplates/utils.go +++ /dev/null @@ -1,13 +0,0 @@ -package uritemplates - -func Expand(path string, expansions map[string]string) (string, error) { - template, err := Parse(path) - if err != nil { - return "", err - } - values := make(map[string]interface{}) - for k, v := range expansions { - values[k] = v - } - return template.Expand(values) -} diff --git a/vendor/github.com/olivere/elastic/v7/validate.go b/vendor/github.com/olivere/elastic/v7/validate.go deleted file mode 100644 index 7ed9158..0000000 --- a/vendor/github.com/olivere/elastic/v7/validate.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// ValidateService allows a user to validate a potentially -// expensive query without executing it. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/search-validate.html. -type ValidateService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - index []string - typ []string - q string - explain *bool - rewrite *bool - allShards *bool - lenient *bool - analyzer string - df string - analyzeWildcard *bool - defaultOperator string - ignoreUnavailable *bool - allowNoIndices *bool - expandWildcards string - bodyJson interface{} - bodyString string -} - -// NewValidateService creates a new ValidateService. -func NewValidateService(client *Client) *ValidateService { - return &ValidateService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *ValidateService) Pretty(pretty bool) *ValidateService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *ValidateService) Human(human bool) *ValidateService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *ValidateService) ErrorTrace(errorTrace bool) *ValidateService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *ValidateService) FilterPath(filterPath ...string) *ValidateService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *ValidateService) Header(name string, value string) *ValidateService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *ValidateService) Headers(headers http.Header) *ValidateService { - s.headers = headers - return s -} - -// Index sets the names of the indices to use for search. -func (s *ValidateService) Index(index ...string) *ValidateService { - s.index = append(s.index, index...) - return s -} - -// Type adds search restrictions for a list of types. -// -// Deprecated: Types are in the process of being removed. Instead of using a type, prefer to -// filter on a field on the document. -func (s *ValidateService) Type(typ ...string) *ValidateService { - s.typ = append(s.typ, typ...) - return s -} - -// Lenient specifies whether format-based query failures -// (such as providing text to a numeric field) should be ignored. -func (s *ValidateService) Lenient(lenient bool) *ValidateService { - s.lenient = &lenient - return s -} - -// Query in the Lucene query string syntax. -func (s *ValidateService) Q(q string) *ValidateService { - s.q = q - return s -} - -// An explain parameter can be specified to get more detailed information about why a query failed. -func (s *ValidateService) Explain(explain *bool) *ValidateService { - s.explain = explain - return s -} - -// Provide a more detailed explanation showing the actual Lucene query that will be executed. -func (s *ValidateService) Rewrite(rewrite *bool) *ValidateService { - s.rewrite = rewrite - return s -} - -// Execute validation on all shards instead of one random shard per index. -func (s *ValidateService) AllShards(allShards *bool) *ValidateService { - s.allShards = allShards - return s -} - -// AnalyzeWildcard specifies whether wildcards and prefix queries -// in the query string query should be analyzed (default: false). -func (s *ValidateService) AnalyzeWildcard(analyzeWildcard bool) *ValidateService { - s.analyzeWildcard = &analyzeWildcard - return s -} - -// Analyzer is the analyzer for the query string query. -func (s *ValidateService) Analyzer(analyzer string) *ValidateService { - s.analyzer = analyzer - return s -} - -// Df is the default field for query string query (default: _all). -func (s *ValidateService) Df(df string) *ValidateService { - s.df = df - return s -} - -// DefaultOperator is the default operator for query string query (AND or OR). -func (s *ValidateService) DefaultOperator(defaultOperator string) *ValidateService { - s.defaultOperator = defaultOperator - return s -} - -// Query sets a query definition using the Query DSL. -func (s *ValidateService) Query(query Query) *ValidateService { - src, err := query.Source() - if err != nil { - // Do nothing in case of an error - return s - } - body := make(map[string]interface{}) - body["query"] = src - s.bodyJson = body - return s -} - -// IgnoreUnavailable indicates whether the specified concrete indices -// should be ignored when unavailable (missing or closed). -func (s *ValidateService) IgnoreUnavailable(ignoreUnavailable bool) *ValidateService { - s.ignoreUnavailable = &ignoreUnavailable - return s -} - -// AllowNoIndices indicates whether to ignore if a wildcard indices -// expression resolves into no concrete indices. (This includes `_all` string -// or when no indices have been specified). -func (s *ValidateService) AllowNoIndices(allowNoIndices bool) *ValidateService { - s.allowNoIndices = &allowNoIndices - return s -} - -// ExpandWildcards indicates whether to expand wildcard expression to -// concrete indices that are open, closed or both. -func (s *ValidateService) ExpandWildcards(expandWildcards string) *ValidateService { - s.expandWildcards = expandWildcards - return s -} - -// BodyJson sets the query definition using the Query DSL. -func (s *ValidateService) BodyJson(body interface{}) *ValidateService { - s.bodyJson = body - return s -} - -// BodyString sets the query definition using the Query DSL as a string. -func (s *ValidateService) BodyString(body string) *ValidateService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *ValidateService) buildURL() (string, url.Values, error) { - var err error - var path string - // Build URL - if len(s.index) > 0 && len(s.typ) > 0 { - path, err = uritemplates.Expand("/{index}/{type}/_validate/query", map[string]string{ - "index": strings.Join(s.index, ","), - "type": strings.Join(s.typ, ","), - }) - } else if len(s.index) > 0 { - path, err = uritemplates.Expand("/{index}/_validate/query", map[string]string{ - "index": strings.Join(s.index, ","), - }) - } else { - path, err = uritemplates.Expand("/_validate/query", map[string]string{ - "type": strings.Join(s.typ, ","), - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.explain != nil { - params.Set("explain", fmt.Sprintf("%v", *s.explain)) - } - if s.rewrite != nil { - params.Set("rewrite", fmt.Sprintf("%v", *s.rewrite)) - } - if s.allShards != nil { - params.Set("all_shards", fmt.Sprintf("%v", *s.allShards)) - } - if s.defaultOperator != "" { - params.Set("default_operator", s.defaultOperator) - } - if v := s.lenient; v != nil { - params.Set("lenient", fmt.Sprint(*v)) - } - if s.q != "" { - params.Set("q", s.q) - } - if v := s.analyzeWildcard; v != nil { - params.Set("analyze_wildcard", fmt.Sprint(*v)) - } - if s.analyzer != "" { - params.Set("analyzer", s.analyzer) - } - if s.df != "" { - params.Set("df", s.df) - } - if v := s.allowNoIndices; v != nil { - params.Set("allow_no_indices", fmt.Sprint(*v)) - } - if s.expandWildcards != "" { - params.Set("expand_wildcards", s.expandWildcards) - } - if v := s.ignoreUnavailable; v != nil { - params.Set("ignore_unavailable", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *ValidateService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *ValidateService) Do(ctx context.Context) (*ValidateResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(ValidateResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// ValidateResponse is the response of ValidateService.Do. -type ValidateResponse struct { - Valid bool `json:"valid"` - Shards map[string]interface{} `json:"_shards"` - Explanations []interface{} `json:"explanations"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_ilm_delete_lifecycle.go b/vendor/github.com/olivere/elastic/v7/xpack_ilm_delete_lifecycle.go deleted file mode 100644 index bd60fde..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_ilm_delete_lifecycle.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// See the documentation at -// https://www.elastic.co/guide/en/elasticsearch/reference/6.7/ilm-get-lifecycle.html. -type XPackIlmDeleteLifecycleService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - policy string - timeout string - masterTimeout string - flatSettings *bool - local *bool -} - -// NewXPackIlmDeleteLifecycleService creates a new XPackIlmDeleteLifecycleService. -func NewXPackIlmDeleteLifecycleService(client *Client) *XPackIlmDeleteLifecycleService { - return &XPackIlmDeleteLifecycleService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackIlmDeleteLifecycleService) Pretty(pretty bool) *XPackIlmDeleteLifecycleService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackIlmDeleteLifecycleService) Human(human bool) *XPackIlmDeleteLifecycleService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackIlmDeleteLifecycleService) ErrorTrace(errorTrace bool) *XPackIlmDeleteLifecycleService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackIlmDeleteLifecycleService) FilterPath(filterPath ...string) *XPackIlmDeleteLifecycleService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackIlmDeleteLifecycleService) Header(name string, value string) *XPackIlmDeleteLifecycleService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackIlmDeleteLifecycleService) Headers(headers http.Header) *XPackIlmDeleteLifecycleService { - s.headers = headers - return s -} - -// Policy is the name of the index lifecycle policy. -func (s *XPackIlmDeleteLifecycleService) Policy(policy string) *XPackIlmDeleteLifecycleService { - s.policy = policy - return s -} - -// Timeout is an explicit operation timeout. -func (s *XPackIlmDeleteLifecycleService) Timeout(timeout string) *XPackIlmDeleteLifecycleService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *XPackIlmDeleteLifecycleService) MasterTimeout(masterTimeout string) *XPackIlmDeleteLifecycleService { - s.masterTimeout = masterTimeout - return s -} - -// FlatSettings is returns settings in flat format (default: false). -func (s *XPackIlmDeleteLifecycleService) FlatSettings(flatSettings bool) *XPackIlmDeleteLifecycleService { - s.flatSettings = &flatSettings - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackIlmDeleteLifecycleService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - path, err = uritemplates.Expand("/_ilm/policy/{policy}", map[string]string{ - "policy": s.policy, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.flatSettings; v != nil { - params.Set("flat_settings", fmt.Sprint(*v)) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackIlmDeleteLifecycleService) Validate() error { - var invalid []string - if s.policy == "" { - invalid = append(invalid, "Policy") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackIlmDeleteLifecycleService) Do(ctx context.Context) (*XPackIlmDeleteLifecycleResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Delete URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Delete HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackIlmDeleteLifecycleResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackIlmDeleteLifecycleResponse is the response of XPackIlmDeleteLifecycleService.Do. -type XPackIlmDeleteLifecycleResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_ilm_get_lifecycle.go b/vendor/github.com/olivere/elastic/v7/xpack_ilm_get_lifecycle.go deleted file mode 100644 index 3a52c36..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_ilm_get_lifecycle.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// See the documentation at -// https://www.elastic.co/guide/en/elasticsearch/reference/6.7/ilm-get-lifecycle.html. -type XPackIlmGetLifecycleService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - policy []string - timeout string - masterTimeout string - flatSettings *bool - local *bool -} - -// NewXPackIlmGetLifecycleService creates a new XPackIlmGetLifecycleService. -func NewXPackIlmGetLifecycleService(client *Client) *XPackIlmGetLifecycleService { - return &XPackIlmGetLifecycleService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackIlmGetLifecycleService) Pretty(pretty bool) *XPackIlmGetLifecycleService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackIlmGetLifecycleService) Human(human bool) *XPackIlmGetLifecycleService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackIlmGetLifecycleService) ErrorTrace(errorTrace bool) *XPackIlmGetLifecycleService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackIlmGetLifecycleService) FilterPath(filterPath ...string) *XPackIlmGetLifecycleService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackIlmGetLifecycleService) Header(name string, value string) *XPackIlmGetLifecycleService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackIlmGetLifecycleService) Headers(headers http.Header) *XPackIlmGetLifecycleService { - s.headers = headers - return s -} - -// Policy is the name of the index lifecycle policy. -func (s *XPackIlmGetLifecycleService) Policy(policies ...string) *XPackIlmGetLifecycleService { - s.policy = append(s.policy, policies...) - return s -} - -// Timeout is an explicit operation timeout. -func (s *XPackIlmGetLifecycleService) Timeout(timeout string) *XPackIlmGetLifecycleService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *XPackIlmGetLifecycleService) MasterTimeout(masterTimeout string) *XPackIlmGetLifecycleService { - s.masterTimeout = masterTimeout - return s -} - -// FlatSettings is returns settings in flat format (default: false). -func (s *XPackIlmGetLifecycleService) FlatSettings(flatSettings bool) *XPackIlmGetLifecycleService { - s.flatSettings = &flatSettings - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackIlmGetLifecycleService) buildURL() (string, url.Values, error) { - // Build URL - var err error - var path string - if len(s.policy) > 0 { - path, err = uritemplates.Expand("/_ilm/policy/{policy}", map[string]string{ - "policy": strings.Join(s.policy, ","), - }) - } else { - path = "/_ilm/policy" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.flatSettings; v != nil { - params.Set("flat_settings", fmt.Sprint(*v)) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if v := s.local; v != nil { - params.Set("local", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackIlmGetLifecycleService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *XPackIlmGetLifecycleService) Do(ctx context.Context) (map[string]*XPackIlmGetLifecycleResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - var ret map[string]*XPackIlmGetLifecycleResponse - if err := s.client.decoder.Decode(res.Body, &ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackIlmGetLifecycleResponse is the response of XPackIlmGetLifecycleService.Do. -type XPackIlmGetLifecycleResponse struct { - Version int `json:"version,omitempty"` - ModifiedDate string `json:"modified_date,omitempty"` // e.g. "2019-10-03T17:43:42.720Z" - Policy map[string]interface{} `json:"policy,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_ilm_put_lifecycle.go b/vendor/github.com/olivere/elastic/v7/xpack_ilm_put_lifecycle.go deleted file mode 100644 index f2e8ba1..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_ilm_put_lifecycle.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2012-present Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// See the documentation at -// https://www.elastic.co/guide/en/elasticsearch/reference/6.7/ilm-put-lifecycle.html -type XPackIlmPutLifecycleService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - policy string - timeout string - masterTimeout string - flatSettings *bool - bodyJson interface{} - bodyString string -} - -// NewXPackIlmPutLifecycleService creates a new XPackIlmPutLifecycleService. -func NewXPackIlmPutLifecycleService(client *Client) *XPackIlmPutLifecycleService { - return &XPackIlmPutLifecycleService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackIlmPutLifecycleService) Pretty(pretty bool) *XPackIlmPutLifecycleService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackIlmPutLifecycleService) Human(human bool) *XPackIlmPutLifecycleService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackIlmPutLifecycleService) ErrorTrace(errorTrace bool) *XPackIlmPutLifecycleService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackIlmPutLifecycleService) FilterPath(filterPath ...string) *XPackIlmPutLifecycleService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackIlmPutLifecycleService) Header(name string, value string) *XPackIlmPutLifecycleService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackIlmPutLifecycleService) Headers(headers http.Header) *XPackIlmPutLifecycleService { - s.headers = headers - return s -} - -// Policy is the name of the index lifecycle policy. -func (s *XPackIlmPutLifecycleService) Policy(policy string) *XPackIlmPutLifecycleService { - s.policy = policy - return s -} - -// Timeout is an explicit operation timeout. -func (s *XPackIlmPutLifecycleService) Timeout(timeout string) *XPackIlmPutLifecycleService { - s.timeout = timeout - return s -} - -// MasterTimeout specifies the timeout for connection to master. -func (s *XPackIlmPutLifecycleService) MasterTimeout(masterTimeout string) *XPackIlmPutLifecycleService { - s.masterTimeout = masterTimeout - return s -} - -// FlatSettings indicates whether to return settings in flat format (default: false). -func (s *XPackIlmPutLifecycleService) FlatSettings(flatSettings bool) *XPackIlmPutLifecycleService { - s.flatSettings = &flatSettings - return s -} - -// BodyJson is documented as: The template definition. -func (s *XPackIlmPutLifecycleService) BodyJson(body interface{}) *XPackIlmPutLifecycleService { - s.bodyJson = body - return s -} - -// BodyString is documented as: The template definition. -func (s *XPackIlmPutLifecycleService) BodyString(body string) *XPackIlmPutLifecycleService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackIlmPutLifecycleService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_ilm/policy/{policy}", map[string]string{ - "policy": s.policy, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.timeout != "" { - params.Set("timeout", s.timeout) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if v := s.flatSettings; v != nil { - params.Set("flat_settings", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackIlmPutLifecycleService) Validate() error { - var invalid []string - if s.policy == "" { - invalid = append(invalid, "Policy") - } - if s.bodyString == "" && s.bodyJson == nil { - invalid = append(invalid, "BodyJson") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackIlmPutLifecycleService) Do(ctx context.Context) (*XPackIlmPutLifecycleResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackIlmPutLifecycleResponse) - if err := s.client.decoder.Decode(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackIlmPutLifecycleSResponse is the response of XPackIlmPutLifecycleService.Do. -type XPackIlmPutLifecycleResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_info.go b/vendor/github.com/olivere/elastic/v7/xpack_info.go deleted file mode 100644 index d9cbeed..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_info.go +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackInfoService retrieves xpack info. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/info-api.html. -type XPackInfoService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers -} - -// NewXPackInfoService creates a new XPackInfoService. -func NewXPackInfoService(client *Client) *XPackInfoService { - return &XPackInfoService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackInfoService) Pretty(pretty bool) *XPackInfoService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackInfoService) Human(human bool) *XPackInfoService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackInfoService) ErrorTrace(errorTrace bool) *XPackInfoService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackInfoService) FilterPath(filterPath ...string) *XPackInfoService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackInfoService) Header(name string, value string) *XPackInfoService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackInfoService) Headers(headers http.Header) *XPackInfoService { - s.headers = headers - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackInfoService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_xpack", map[string]string{}) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackInfoService) Validate() error { - var invalid []string - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackInfoService) Do(ctx context.Context) (*XPackInfoServiceResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := XPackInfoServiceResponse{} - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return &ret, nil -} - -// XPackInfoServiceResponse is the response of XPackInfoService.Do. -type XPackInfoServiceResponse struct { - Build XPackInfoBuild `json:"build"` - License XPackInfoLicense `json:"license"` - Features XPackInfoFeatures `json:"features"` - Tagline string `json:"tagline"` -} - -// XPackInfoBuild is the xpack build info -type XPackInfoBuild struct { - Hash string `json:"hash"` - Date string `json:"date"` -} - -// XPackInfoLicense is the xpack license info -type XPackInfoLicense struct { - UID string `json:"uid"` - Type string `json:"type"` - Mode string `json:"mode"` - Status string `json:"status"` - ExpiryMilis int `json:"expiry_date_in_millis"` -} - -// XPackInfoFeatures is the xpack feature info object -type XPackInfoFeatures struct { - Graph XPackInfoGraph `json:"graph"` - Logstash XPackInfoLogstash `json:"logstash"` - MachineLearning XPackInfoML `json:"ml"` - Monitoring XPackInfoMonitoring `json:"monitoring"` - Rollup XPackInfoRollup `json:"rollup"` - Security XPackInfoSecurity `json:"security"` - Watcher XPackInfoWatcher `json:"watcher"` -} - -// XPackInfoGraph is the xpack graph plugin info -type XPackInfoGraph struct { - Description string `json:"description"` - Available bool `json:"available"` - Enabled bool `json:"enabled"` -} - -// XPackInfoLogstash is the xpack logstash plugin info -type XPackInfoLogstash struct { - Description string `json:"description"` - Available bool `json:"available"` - Enabled bool `json:"enabled"` -} - -// XPackInfoML is the xpack machine learning plugin info -type XPackInfoML struct { - Description string `json:"description"` - Available bool `json:"available"` - Enabled bool `json:"enabled"` - NativeCodeInfo map[string]string `json:"native_code_info"` -} - -// XPackInfoMonitoring is the xpack monitoring plugin info -type XPackInfoMonitoring struct { - Description string `json:"description"` - Available bool `json:"available"` - Enabled bool `json:"enabled"` -} - -// XPackInfoRollup is the xpack rollup plugin info -type XPackInfoRollup struct { - Description string `json:"description"` - Available bool `json:"available"` - Enabled bool `json:"enabled"` -} - -// XPackInfoSecurity is the xpack security plugin info -type XPackInfoSecurity struct { - Description string `json:"description"` - Available bool `json:"available"` - Enabled bool `json:"enabled"` -} - -// XPackInfoWatcher is the xpack watcher plugin info -type XPackInfoWatcher struct { - Description string `json:"description"` - Available bool `json:"available"` - Enabled bool `json:"enabled"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_change_password.go b/vendor/github.com/olivere/elastic/v7/xpack_security_change_password.go deleted file mode 100644 index 34174a1..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_change_password.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityChangePasswordService changes a native user's password. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.1/security-api-change-password.html. -type XPackSecurityChangePasswordService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - username string - password string - refresh string - body interface{} -} - -// NewXPackSecurityChangePasswordService creates a new XPackSecurityChangePasswordService. -func NewXPackSecurityChangePasswordService(client *Client) *XPackSecurityChangePasswordService { - return &XPackSecurityChangePasswordService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityChangePasswordService) Pretty(pretty bool) *XPackSecurityChangePasswordService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityChangePasswordService) Human(human bool) *XPackSecurityChangePasswordService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityChangePasswordService) ErrorTrace(errorTrace bool) *XPackSecurityChangePasswordService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityChangePasswordService) FilterPath(filterPath ...string) *XPackSecurityChangePasswordService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityChangePasswordService) Header(name string, value string) *XPackSecurityChangePasswordService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityChangePasswordService) Headers(headers http.Header) *XPackSecurityChangePasswordService { - s.headers = headers - return s -} - -// Username is name of the user to change. -func (s *XPackSecurityChangePasswordService) Username(username string) *XPackSecurityChangePasswordService { - s.username = username - return s -} - -// Password is the new value of the password. -func (s *XPackSecurityChangePasswordService) Password(password string) *XPackSecurityChangePasswordService { - s.password = password - return s -} - -// Refresh, if "true" (the default), refreshes the affected shards to make this operation -// visible to search, if "wait_for" then wait for a refresh to make this operation visible -// to search, if "false" then do nothing with refreshes. -func (s *XPackSecurityChangePasswordService) Refresh(refresh string) *XPackSecurityChangePasswordService { - s.refresh = refresh - return s -} - -// Body specifies the password. Use a string or a type that will get serialized as JSON. -func (s *XPackSecurityChangePasswordService) Body(body interface{}) *XPackSecurityChangePasswordService { - s.body = body - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityChangePasswordService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_xpack/security/user/{username}/_password", map[string]string{ - "username": s.username, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.refresh; v != "" { - params.Set("refresh", v) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityChangePasswordService) Validate() error { - var invalid []string - if s.username == "" { - invalid = append(invalid, "Userame") - } - if s.password == "" && s.body == nil { - invalid = append(invalid, "Body") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityChangePasswordService) Do(ctx context.Context) (*XPackSecurityChangeUserPasswordResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - var body interface{} - if s.body != nil { - body = s.body - } else { - body = map[string]interface{}{ - "password": s.password, - } - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityChangeUserPasswordResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityChangeUserPasswordResponse is the response of -// XPackSecurityChangePasswordService.Do. -// -// A successful call returns an empty JSON structure: {}. -type XPackSecurityChangeUserPasswordResponse struct { -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_delete_role.go b/vendor/github.com/olivere/elastic/v7/xpack_security_delete_role.go deleted file mode 100644 index d9c7cb9..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_delete_role.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityDeleteRoleService delete a role by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-delete-role.html. -type XPackSecurityDeleteRoleService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string -} - -// NewXPackSecurityDeleteRoleService creates a new XPackSecurityDeleteRoleService. -func NewXPackSecurityDeleteRoleService(client *Client) *XPackSecurityDeleteRoleService { - return &XPackSecurityDeleteRoleService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityDeleteRoleService) Pretty(pretty bool) *XPackSecurityDeleteRoleService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityDeleteRoleService) Human(human bool) *XPackSecurityDeleteRoleService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityDeleteRoleService) ErrorTrace(errorTrace bool) *XPackSecurityDeleteRoleService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityDeleteRoleService) FilterPath(filterPath ...string) *XPackSecurityDeleteRoleService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityDeleteRoleService) Header(name string, value string) *XPackSecurityDeleteRoleService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityDeleteRoleService) Headers(headers http.Header) *XPackSecurityDeleteRoleService { - s.headers = headers - return s -} - -// Name is name of the role to delete. -func (s *XPackSecurityDeleteRoleService) Name(name string) *XPackSecurityDeleteRoleService { - s.name = name - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityDeleteRoleService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/role/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityDeleteRoleService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityDeleteRoleService) Do(ctx context.Context) (*XPackSecurityDeleteRoleResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityDeleteRoleResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityDeleteRoleResponse is the response of XPackSecurityDeleteRoleService.Do. -type XPackSecurityDeleteRoleResponse struct { - Found bool `json:"found"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_delete_role_mapping.go b/vendor/github.com/olivere/elastic/v7/xpack_security_delete_role_mapping.go deleted file mode 100644 index 05b564b..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_delete_role_mapping.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityDeleteRoleMappingService delete a role mapping by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-delete-role-mapping.html. -type XPackSecurityDeleteRoleMappingService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string -} - -// NewXPackSecurityDeleteRoleMappingService creates a new XPackSecurityDeleteRoleMappingService. -func NewXPackSecurityDeleteRoleMappingService(client *Client) *XPackSecurityDeleteRoleMappingService { - return &XPackSecurityDeleteRoleMappingService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityDeleteRoleMappingService) Pretty(pretty bool) *XPackSecurityDeleteRoleMappingService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityDeleteRoleMappingService) Human(human bool) *XPackSecurityDeleteRoleMappingService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityDeleteRoleMappingService) ErrorTrace(errorTrace bool) *XPackSecurityDeleteRoleMappingService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityDeleteRoleMappingService) FilterPath(filterPath ...string) *XPackSecurityDeleteRoleMappingService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityDeleteRoleMappingService) Header(name string, value string) *XPackSecurityDeleteRoleMappingService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityDeleteRoleMappingService) Headers(headers http.Header) *XPackSecurityDeleteRoleMappingService { - s.headers = headers - return s -} - -// Name is name of the role mapping to delete. -func (s *XPackSecurityDeleteRoleMappingService) Name(name string) *XPackSecurityDeleteRoleMappingService { - s.name = name - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityDeleteRoleMappingService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/role_mapping/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityDeleteRoleMappingService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityDeleteRoleMappingService) Do(ctx context.Context) (*XPackSecurityDeleteRoleMappingResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityDeleteRoleMappingResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityDeleteRoleMappingResponse is the response of XPackSecurityDeleteRoleMappingService.Do. -type XPackSecurityDeleteRoleMappingResponse struct { - Found bool `json:"found"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_delete_user.go b/vendor/github.com/olivere/elastic/v7/xpack_security_delete_user.go deleted file mode 100644 index 1e079f7..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_delete_user.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2012-2019 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityDeleteUserService delete a user by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.4/security-api-delete-user.html. -type XPackSecurityDeleteUserService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - username string - refresh string -} - -// NewXPackSecurityDeleteUserService creates a new XPackSecurityDeleteUserService. -func NewXPackSecurityDeleteUserService(client *Client) *XPackSecurityDeleteUserService { - return &XPackSecurityDeleteUserService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityDeleteUserService) Pretty(pretty bool) *XPackSecurityDeleteUserService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityDeleteUserService) Human(human bool) *XPackSecurityDeleteUserService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityDeleteUserService) ErrorTrace(errorTrace bool) *XPackSecurityDeleteUserService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityDeleteUserService) FilterPath(filterPath ...string) *XPackSecurityDeleteUserService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityDeleteUserService) Header(name string, value string) *XPackSecurityDeleteUserService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityDeleteUserService) Headers(headers http.Header) *XPackSecurityDeleteUserService { - s.headers = headers - return s -} - -// Username is name of the user to delete. -func (s *XPackSecurityDeleteUserService) Username(username string) *XPackSecurityDeleteUserService { - s.username = username - return s -} - -// Refresh specifies if and how to wait for refreshing the shards after the request. -// Possible values are "true" (default), "false" and "wait_for", all of type string. -func (s *XPackSecurityDeleteUserService) Refresh(refresh string) *XPackSecurityDeleteUserService { - s.refresh = refresh - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityDeleteUserService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/user/{username}", map[string]string{ - "username": s.username, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.refresh; v != "" { - params.Set("refresh", v) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityDeleteUserService) Validate() error { - var invalid []string - if s.username == "" { - invalid = append(invalid, "Username") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityDeleteUserService) Do(ctx context.Context) (*XPackSecurityDeleteUserResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityDeleteUserResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityDeleteUserResponse is the response of XPackSecurityDeleteUserService.Do. -type XPackSecurityDeleteUserResponse struct { - Found bool `json:"found"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_disable_user.go b/vendor/github.com/olivere/elastic/v7/xpack_security_disable_user.go deleted file mode 100644 index 846c0e2..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_disable_user.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2012-2019 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityDisableUserService retrieves a user by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-get-user.html. -type XPackSecurityDisableUserService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - username string - refresh string -} - -// NewXPackSecurityDisableUserService creates a new XPackSecurityDisableUserService. -func NewXPackSecurityDisableUserService(client *Client) *XPackSecurityDisableUserService { - return &XPackSecurityDisableUserService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityDisableUserService) Pretty(pretty bool) *XPackSecurityDisableUserService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityDisableUserService) Human(human bool) *XPackSecurityDisableUserService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityDisableUserService) ErrorTrace(errorTrace bool) *XPackSecurityDisableUserService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityDisableUserService) FilterPath(filterPath ...string) *XPackSecurityDisableUserService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityDisableUserService) Header(name string, value string) *XPackSecurityDisableUserService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityDisableUserService) Headers(headers http.Header) *XPackSecurityDisableUserService { - s.headers = headers - return s -} - -// Username is name of the user to disable. -func (s *XPackSecurityDisableUserService) Username(username string) *XPackSecurityDisableUserService { - s.username = username - return s -} - -// Refresh specifies if and how to wait for refreshing the shards after the request. -// Possible values are "true" (default), "false" and "wait_for", all of type string. -func (s *XPackSecurityDisableUserService) Refresh(refresh string) *XPackSecurityDisableUserService { - s.refresh = refresh - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityDisableUserService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/user/{username}/_disable", map[string]string{ - "username": s.username, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.refresh; v != "" { - params.Set("refresh", v) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityDisableUserService) Validate() error { - var invalid []string - if s.username == "" { - invalid = append(invalid, "Username") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityDisableUserService) Do(ctx context.Context) (*XPackSecurityDisableUserResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityDisableUserResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityDisableUserResponse is the response of XPackSecurityDisableUserService.Do. -type XPackSecurityDisableUserResponse struct { -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_enable_user.go b/vendor/github.com/olivere/elastic/v7/xpack_security_enable_user.go deleted file mode 100644 index c04b917..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_enable_user.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2012-2019 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityEnableUserService retrieves a user by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-get-user.html. -type XPackSecurityEnableUserService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - username string - refresh string -} - -// NewXPackSecurityEnableUserService creates a new XPackSecurityEnableUserService. -func NewXPackSecurityEnableUserService(client *Client) *XPackSecurityEnableUserService { - return &XPackSecurityEnableUserService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityEnableUserService) Pretty(pretty bool) *XPackSecurityEnableUserService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityEnableUserService) Human(human bool) *XPackSecurityEnableUserService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityEnableUserService) ErrorTrace(errorTrace bool) *XPackSecurityEnableUserService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityEnableUserService) FilterPath(filterPath ...string) *XPackSecurityEnableUserService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityEnableUserService) Header(name string, value string) *XPackSecurityEnableUserService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityEnableUserService) Headers(headers http.Header) *XPackSecurityEnableUserService { - s.headers = headers - return s -} - -// Username is name of the user to enable. -func (s *XPackSecurityEnableUserService) Username(username string) *XPackSecurityEnableUserService { - s.username = username - return s -} - -// Refresh specifies if and how to wait for refreshing the shards after the request. -// Possible values are "true" (default), "false" and "wait_for", all of type string. -func (s *XPackSecurityEnableUserService) Refresh(refresh string) *XPackSecurityEnableUserService { - s.refresh = refresh - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityEnableUserService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/user/{username}/_enable", map[string]string{ - "username": s.username, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.refresh; v != "" { - params.Set("refresh", v) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityEnableUserService) Validate() error { - var invalid []string - if s.username == "" { - invalid = append(invalid, "Username") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityEnableUserService) Do(ctx context.Context) (*XPackSecurityEnableUserResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityEnableUserResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityEnableUserResponse is the response of XPackSecurityEnableUserService.Do. -type XPackSecurityEnableUserResponse struct { -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_get_role.go b/vendor/github.com/olivere/elastic/v7/xpack_security_get_role.go deleted file mode 100644 index c2ab0fd..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_get_role.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityGetRoleService retrieves a role by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-get-role.html. -type XPackSecurityGetRoleService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string -} - -// NewXPackSecurityGetRoleService creates a new XPackSecurityGetRoleService. -func NewXPackSecurityGetRoleService(client *Client) *XPackSecurityGetRoleService { - return &XPackSecurityGetRoleService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityGetRoleService) Pretty(pretty bool) *XPackSecurityGetRoleService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityGetRoleService) Human(human bool) *XPackSecurityGetRoleService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityGetRoleService) ErrorTrace(errorTrace bool) *XPackSecurityGetRoleService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityGetRoleService) FilterPath(filterPath ...string) *XPackSecurityGetRoleService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityGetRoleService) Header(name string, value string) *XPackSecurityGetRoleService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityGetRoleService) Headers(headers http.Header) *XPackSecurityGetRoleService { - s.headers = headers - return s -} - -// Name is name of the role to retrieve. -func (s *XPackSecurityGetRoleService) Name(name string) *XPackSecurityGetRoleService { - s.name = name - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityGetRoleService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/role/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityGetRoleService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityGetRoleService) Do(ctx context.Context) (*XPackSecurityGetRoleResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := XPackSecurityGetRoleResponse{} - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return &ret, nil -} - -// XPackSecurityGetRoleResponse is the response of XPackSecurityGetRoleService.Do. -type XPackSecurityGetRoleResponse map[string]XPackSecurityRole - -// XPackSecurityRole is the role object. -// -// The Java source for this struct is defined here: -// https://github.com/elastic/elasticsearch/blob/6.7/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java -type XPackSecurityRole struct { - Cluster []string `json:"cluster"` - Indices []XPackSecurityIndicesPermissions `json:"indices"` - Applications []XPackSecurityApplicationPrivileges `json:"applications"` - RunAs []string `json:"run_as"` - Global map[string]interface{} `json:"global"` - Metadata map[string]interface{} `json:"metadata"` - TransientMetadata map[string]interface{} `json:"transient_metadata"` -} - -// XPackSecurityApplicationPrivileges is the application privileges object -type XPackSecurityApplicationPrivileges struct { - Application string `json:"application"` - Privileges []string `json:"privileges"` - Ressources []string `json:"resources"` -} - -// XPackSecurityIndicesPermissions is the indices permission object -type XPackSecurityIndicesPermissions struct { - Names []string `json:"names"` - Privileges []string `json:"privileges"` - FieldSecurity interface{} `json:"field_security,omitempty"` - Query string `json:"query"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_get_role_mapping.go b/vendor/github.com/olivere/elastic/v7/xpack_security_get_role_mapping.go deleted file mode 100644 index 24230be..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_get_role_mapping.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityGetRoleMappingService retrieves a role mapping by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-get-role-mapping.html. -type XPackSecurityGetRoleMappingService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string -} - -// NewXPackSecurityGetRoleMappingService creates a new XPackSecurityGetRoleMappingService. -func NewXPackSecurityGetRoleMappingService(client *Client) *XPackSecurityGetRoleMappingService { - return &XPackSecurityGetRoleMappingService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityGetRoleMappingService) Pretty(pretty bool) *XPackSecurityGetRoleMappingService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityGetRoleMappingService) Human(human bool) *XPackSecurityGetRoleMappingService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityGetRoleMappingService) ErrorTrace(errorTrace bool) *XPackSecurityGetRoleMappingService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityGetRoleMappingService) FilterPath(filterPath ...string) *XPackSecurityGetRoleMappingService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityGetRoleMappingService) Header(name string, value string) *XPackSecurityGetRoleMappingService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityGetRoleMappingService) Headers(headers http.Header) *XPackSecurityGetRoleMappingService { - s.headers = headers - return s -} - -// Name is name of the role mapping to retrieve. -func (s *XPackSecurityGetRoleMappingService) Name(name string) *XPackSecurityGetRoleMappingService { - s.name = name - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityGetRoleMappingService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/role_mapping/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityGetRoleMappingService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityGetRoleMappingService) Do(ctx context.Context) (*XPackSecurityGetRoleMappingResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := XPackSecurityGetRoleMappingResponse{} - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return &ret, nil -} - -// XPackSecurityGetRoleMappingResponse is the response of XPackSecurityGetRoleMappingService.Do. -type XPackSecurityGetRoleMappingResponse map[string]XPackSecurityRoleMapping - -// XPackSecurityRoleMapping is the role mapping object -type XPackSecurityRoleMapping struct { - Enabled bool `json:"enabled"` - Roles []string `json:"roles"` - Rules map[string]interface{} `json:"rules"` - Metadata interface{} `json:"metadata"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_get_user.go b/vendor/github.com/olivere/elastic/v7/xpack_security_get_user.go deleted file mode 100644 index 50a179b..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_get_user.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright 2012-2019 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityGetUserService retrieves a user by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-get-user.html. -type XPackSecurityGetUserService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - usernames []string -} - -// NewXPackSecurityGetUserService creates a new XPackSecurityGetUserService. -func NewXPackSecurityGetUserService(client *Client) *XPackSecurityGetUserService { - return &XPackSecurityGetUserService{ - client: client, - } -} - -// Pretty indicates that the JSON response be indented and human readable. -func (s *XPackSecurityGetUserService) Pretty(pretty bool) *XPackSecurityGetUserService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityGetUserService) Human(human bool) *XPackSecurityGetUserService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityGetUserService) ErrorTrace(errorTrace bool) *XPackSecurityGetUserService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityGetUserService) FilterPath(filterPath ...string) *XPackSecurityGetUserService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityGetUserService) Header(name string, value string) *XPackSecurityGetUserService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityGetUserService) Headers(headers http.Header) *XPackSecurityGetUserService { - s.headers = headers - return s -} - -// Usernames are the names of one or more users to retrieve. -func (s *XPackSecurityGetUserService) Usernames(usernames ...string) *XPackSecurityGetUserService { - for _, username := range usernames { - if v := strings.TrimSpace(username); v != "" { - s.usernames = append(s.usernames, v) - } - } - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityGetUserService) buildURL() (string, url.Values, error) { - // Build URL - var ( - path string - err error - ) - if len(s.usernames) > 0 { - path, err = uritemplates.Expand("/_security/user/{username}", map[string]string{ - "username": strings.Join(s.usernames, ","), - }) - } else { - path = "/_security/user" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityGetUserService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *XPackSecurityGetUserService) Do(ctx context.Context) (*XPackSecurityGetUserResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := XPackSecurityGetUserResponse{} - if err := json.Unmarshal(res.Body, &ret); err != nil { - return nil, err - } - return &ret, nil -} - -// XPackSecurityGetUserResponse is the response of XPackSecurityGetUserService.Do. -type XPackSecurityGetUserResponse map[string]XPackSecurityUser - -// XPackSecurityUser is the user object. -// -// The Java source for this struct is defined here: -// https://github.com/elastic/elasticsearch/blob/7.3/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/User.java -type XPackSecurityUser struct { - Username string `json:"username"` - Roles []string `json:"roles"` - Fullname string `json:"full_name"` - Email string `json:"email"` - Metadata map[string]interface{} `json:"metadata"` - Enabled bool `json:"enabled"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_put_role.go b/vendor/github.com/olivere/elastic/v7/xpack_security_put_role.go deleted file mode 100644 index f3c869f..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_put_role.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityPutRoleService retrieves a role by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-put-role.html. -type XPackSecurityPutRoleService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string - body interface{} -} - -// NewXPackSecurityPutRoleService creates a new XPackSecurityPutRoleService. -func NewXPackSecurityPutRoleService(client *Client) *XPackSecurityPutRoleService { - return &XPackSecurityPutRoleService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityPutRoleService) Pretty(pretty bool) *XPackSecurityPutRoleService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityPutRoleService) Human(human bool) *XPackSecurityPutRoleService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityPutRoleService) ErrorTrace(errorTrace bool) *XPackSecurityPutRoleService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityPutRoleService) FilterPath(filterPath ...string) *XPackSecurityPutRoleService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityPutRoleService) Header(name string, value string) *XPackSecurityPutRoleService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityPutRoleService) Headers(headers http.Header) *XPackSecurityPutRoleService { - s.headers = headers - return s -} - -// Name is name of the role to create. -func (s *XPackSecurityPutRoleService) Name(name string) *XPackSecurityPutRoleService { - s.name = name - return s -} - -// Body specifies the role. Use a string or a type that will get serialized as JSON. -func (s *XPackSecurityPutRoleService) Body(body interface{}) *XPackSecurityPutRoleService { - s.body = body - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityPutRoleService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/role/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityPutRoleService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if s.body == nil { - invalid = append(invalid, "Body") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityPutRoleService) Do(ctx context.Context) (*XPackSecurityPutRoleResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: s.body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityPutRoleResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityPutRoleResponse is the response of XPackSecurityPutRoleService.Do. -type XPackSecurityPutRoleResponse struct { - Role XPackSecurityPutRole -} - -type XPackSecurityPutRole struct { - Created bool `json:"created"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_put_role_mapping.go b/vendor/github.com/olivere/elastic/v7/xpack_security_put_role_mapping.go deleted file mode 100644 index 0850d5e..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_put_role_mapping.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityPutRoleMappingService create or update a role mapping by its name. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-put-role-mapping.html. -type XPackSecurityPutRoleMappingService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - name string - body interface{} -} - -// NewXPackSecurityPutRoleMappingService creates a new XPackSecurityPutRoleMappingService. -func NewXPackSecurityPutRoleMappingService(client *Client) *XPackSecurityPutRoleMappingService { - return &XPackSecurityPutRoleMappingService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityPutRoleMappingService) Pretty(pretty bool) *XPackSecurityPutRoleMappingService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityPutRoleMappingService) Human(human bool) *XPackSecurityPutRoleMappingService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityPutRoleMappingService) ErrorTrace(errorTrace bool) *XPackSecurityPutRoleMappingService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityPutRoleMappingService) FilterPath(filterPath ...string) *XPackSecurityPutRoleMappingService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityPutRoleMappingService) Header(name string, value string) *XPackSecurityPutRoleMappingService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityPutRoleMappingService) Headers(headers http.Header) *XPackSecurityPutRoleMappingService { - s.headers = headers - return s -} - -// Name is name of the role mapping to create/update. -func (s *XPackSecurityPutRoleMappingService) Name(name string) *XPackSecurityPutRoleMappingService { - s.name = name - return s -} - -// Body specifies the role mapping. Use a string or a type that will get serialized as JSON. -func (s *XPackSecurityPutRoleMappingService) Body(body interface{}) *XPackSecurityPutRoleMappingService { - s.body = body - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityPutRoleMappingService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/role_mapping/{name}", map[string]string{ - "name": s.name, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityPutRoleMappingService) Validate() error { - var invalid []string - if s.name == "" { - invalid = append(invalid, "Name") - } - if s.body == nil { - invalid = append(invalid, "Body") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityPutRoleMappingService) Do(ctx context.Context) (*XPackSecurityPutRoleMappingResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: s.body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityPutRoleMappingResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityPutRoleMappingResponse is the response of XPackSecurityPutRoleMappingService.Do. -type XPackSecurityPutRoleMappingResponse struct { - Role_Mapping XPackSecurityPutRoleMapping -} - -type XPackSecurityPutRoleMapping struct { - Created bool `json:"created"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_security_put_user.go b/vendor/github.com/olivere/elastic/v7/xpack_security_put_user.go deleted file mode 100644 index f7ab77e..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_security_put_user.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2012-2019 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackSecurityPutUserService adds a user. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.4/security-api-put-user.html. -type XPackSecurityPutUserService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - username string - refresh string - - user *XPackSecurityPutUserRequest - body interface{} -} - -// NewXPackSecurityPutUserService creates a new XPackSecurityPutUserService. -func NewXPackSecurityPutUserService(client *Client) *XPackSecurityPutUserService { - return &XPackSecurityPutUserService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackSecurityPutUserService) Pretty(pretty bool) *XPackSecurityPutUserService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackSecurityPutUserService) Human(human bool) *XPackSecurityPutUserService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackSecurityPutUserService) ErrorTrace(errorTrace bool) *XPackSecurityPutUserService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackSecurityPutUserService) FilterPath(filterPath ...string) *XPackSecurityPutUserService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackSecurityPutUserService) Header(name string, value string) *XPackSecurityPutUserService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackSecurityPutUserService) Headers(headers http.Header) *XPackSecurityPutUserService { - s.headers = headers - return s -} - -// Username is the name of the user to add. -func (s *XPackSecurityPutUserService) Username(username string) *XPackSecurityPutUserService { - s.username = username - return s -} - -// User specifies the data of the new user. -// -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.4/security-api-put-user.html -// for details. -func (s *XPackSecurityPutUserService) User(user *XPackSecurityPutUserRequest) *XPackSecurityPutUserService { - s.user = user - return s -} - -// Refresh specifies if and how to wait for refreshing the shards after the request. -// Possible values are "true" (default), "false" and "wait_for", all of type string. -func (s *XPackSecurityPutUserService) Refresh(refresh string) *XPackSecurityPutUserService { - s.refresh = refresh - return s -} - -// Body specifies the user. Use a string or a type that will get serialized as JSON. -func (s *XPackSecurityPutUserService) Body(body interface{}) *XPackSecurityPutUserService { - s.body = body - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackSecurityPutUserService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_security/user/{username}", map[string]string{ - "username": s.username, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.refresh; v != "" { - params.Set("refresh", v) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackSecurityPutUserService) Validate() error { - var invalid []string - if s.username == "" { - invalid = append(invalid, "Username") - } - if s.user == nil && s.body == nil { - invalid = append(invalid, "User") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackSecurityPutUserService) Do(ctx context.Context) (*XPackSecurityPutUserResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - var body interface{} - if s.user != nil { - body = s.user - } else { - body = s.body - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackSecurityPutUserResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackSecurityPutUserRequest specifies the data required/allowed to add -// a new user. -type XPackSecurityPutUserRequest struct { - Enabled bool `json:"enabled"` - Email string `json:"email,omitempty"` - FullName string `json:"full_name,omitempty"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - Password string `json:"password,omitempty"` - PasswordHash string `json:"password_hash,omitempty"` - Roles []string `json:"roles"` -} - -// XPackSecurityPutUserResponse is the response of XPackSecurityPutUserService.Do. -type XPackSecurityPutUserResponse struct { - User XPackSecurityPutUser `json:"user"` -} - -// XPackSecurityPutUser is the response containing the creation information -type XPackSecurityPutUser struct { - Created bool `json:"created"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_ack_watch.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_ack_watch.go deleted file mode 100644 index d1ae925..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_ack_watch.go +++ /dev/null @@ -1,198 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackWatcherAckWatchService enables you to manually throttle execution of the watch’s actions. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-ack-watch.html. -type XPackWatcherAckWatchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - watchId string - actionId []string - masterTimeout string -} - -// NewXPackWatcherAckWatchService creates a new XPackWatcherAckWatchService. -func NewXPackWatcherAckWatchService(client *Client) *XPackWatcherAckWatchService { - return &XPackWatcherAckWatchService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherAckWatchService) Pretty(pretty bool) *XPackWatcherAckWatchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherAckWatchService) Human(human bool) *XPackWatcherAckWatchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherAckWatchService) ErrorTrace(errorTrace bool) *XPackWatcherAckWatchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherAckWatchService) FilterPath(filterPath ...string) *XPackWatcherAckWatchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherAckWatchService) Header(name string, value string) *XPackWatcherAckWatchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherAckWatchService) Headers(headers http.Header) *XPackWatcherAckWatchService { - s.headers = headers - return s -} - -// WatchId is the unique ID of the watch. -func (s *XPackWatcherAckWatchService) WatchId(watchId string) *XPackWatcherAckWatchService { - s.watchId = watchId - return s -} - -// ActionId is a slice of action ids to be acked. -func (s *XPackWatcherAckWatchService) ActionId(actionId ...string) *XPackWatcherAckWatchService { - s.actionId = append(s.actionId, actionId...) - return s -} - -// MasterTimeout indicates an explicit operation timeout for -// connection to master node. -func (s *XPackWatcherAckWatchService) MasterTimeout(masterTimeout string) *XPackWatcherAckWatchService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherAckWatchService) buildURL() (string, url.Values, error) { - // Build URL - var ( - path string - err error - ) - if len(s.actionId) > 0 { - path, err = uritemplates.Expand("/_watcher/watch/{watch_id}/_ack/{action_id}", map[string]string{ - "watch_id": s.watchId, - "action_id": strings.Join(s.actionId, ","), - }) - } else { - path, err = uritemplates.Expand("/_watcher/watch/{watch_id}/_ack", map[string]string{ - "watch_id": s.watchId, - }) - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherAckWatchService) Validate() error { - var invalid []string - if s.watchId == "" { - invalid = append(invalid, "WatchId") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackWatcherAckWatchService) Do(ctx context.Context) (*XPackWatcherAckWatchResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherAckWatchResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherAckWatchResponse is the response of XPackWatcherAckWatchService.Do. -type XPackWatcherAckWatchResponse struct { - Status *XPackWatcherAckWatchStatus `json:"status"` -} - -// XPackWatcherAckWatchStatus is the status of a XPackWatcherAckWatchResponse. -type XPackWatcherAckWatchStatus struct { - State map[string]interface{} `json:"state"` - LastChecked string `json:"last_checked"` - LastMetCondition string `json:"last_met_condition"` - Actions map[string]map[string]interface{} `json:"actions"` - ExecutionState string `json:"execution_state"` - Version int `json:"version"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_activate_watch.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_activate_watch.go deleted file mode 100644 index 9a49605..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_activate_watch.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackWatcherActivateWatchService enables you to activate a currently inactive watch. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-activate-watch.html. -type XPackWatcherActivateWatchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - watchId string - masterTimeout string -} - -// NewXPackWatcherActivateWatchService creates a new XPackWatcherActivateWatchService. -func NewXPackWatcherActivateWatchService(client *Client) *XPackWatcherActivateWatchService { - return &XPackWatcherActivateWatchService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherActivateWatchService) Pretty(pretty bool) *XPackWatcherActivateWatchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherActivateWatchService) Human(human bool) *XPackWatcherActivateWatchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherActivateWatchService) ErrorTrace(errorTrace bool) *XPackWatcherActivateWatchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherActivateWatchService) FilterPath(filterPath ...string) *XPackWatcherActivateWatchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherActivateWatchService) Header(name string, value string) *XPackWatcherActivateWatchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherActivateWatchService) Headers(headers http.Header) *XPackWatcherActivateWatchService { - s.headers = headers - return s -} - -// WatchId is the ID of the watch to activate. -func (s *XPackWatcherActivateWatchService) WatchId(watchId string) *XPackWatcherActivateWatchService { - s.watchId = watchId - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *XPackWatcherActivateWatchService) MasterTimeout(masterTimeout string) *XPackWatcherActivateWatchService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherActivateWatchService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_watcher/watch/{watch_id}/_activate", map[string]string{ - "watch_id": s.watchId, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherActivateWatchService) Validate() error { - var invalid []string - if s.watchId == "" { - invalid = append(invalid, "WatchId") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackWatcherActivateWatchService) Do(ctx context.Context) (*XPackWatcherActivateWatchResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherActivateWatchResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherActivateWatchResponse is the response of XPackWatcherActivateWatchService.Do. -type XPackWatcherActivateWatchResponse struct { - Status *XPackWatchStatus `json:"status"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_deactivate_watch.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_deactivate_watch.go deleted file mode 100644 index 42c5e55..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_deactivate_watch.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackWatcherDeactivateWatchService enables you to deactivate a currently active watch. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-deactivate-watch.html. -type XPackWatcherDeactivateWatchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - watchId string - masterTimeout string -} - -// NewXPackWatcherDeactivateWatchService creates a new XPackWatcherDeactivateWatchService. -func NewXPackWatcherDeactivateWatchService(client *Client) *XPackWatcherDeactivateWatchService { - return &XPackWatcherDeactivateWatchService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherDeactivateWatchService) Pretty(pretty bool) *XPackWatcherDeactivateWatchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherDeactivateWatchService) Human(human bool) *XPackWatcherDeactivateWatchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherDeactivateWatchService) ErrorTrace(errorTrace bool) *XPackWatcherDeactivateWatchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherDeactivateWatchService) FilterPath(filterPath ...string) *XPackWatcherDeactivateWatchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherDeactivateWatchService) Header(name string, value string) *XPackWatcherDeactivateWatchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherDeactivateWatchService) Headers(headers http.Header) *XPackWatcherDeactivateWatchService { - s.headers = headers - return s -} - -// WatchId is the ID of the watch to deactivate. -func (s *XPackWatcherDeactivateWatchService) WatchId(watchId string) *XPackWatcherDeactivateWatchService { - s.watchId = watchId - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *XPackWatcherDeactivateWatchService) MasterTimeout(masterTimeout string) *XPackWatcherDeactivateWatchService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherDeactivateWatchService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_watcher/watch/{watch_id}/_deactivate", map[string]string{ - "watch_id": s.watchId, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherDeactivateWatchService) Validate() error { - var invalid []string - if s.watchId == "" { - invalid = append(invalid, "WatchId") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackWatcherDeactivateWatchService) Do(ctx context.Context) (*XPackWatcherDeactivateWatchResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherDeactivateWatchResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherDeactivateWatchResponse is the response of XPackWatcherDeactivateWatchService.Do. -type XPackWatcherDeactivateWatchResponse struct { - Status *XPackWatchStatus `json:"status"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_delete_watch.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_delete_watch.go deleted file mode 100644 index dc7e7f9..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_delete_watch.go +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackWatcherDeleteWatchService removes a watch. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-delete-watch.html. -type XPackWatcherDeleteWatchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - masterTimeout string -} - -// NewXPackWatcherDeleteWatchService creates a new XPackWatcherDeleteWatchService. -func NewXPackWatcherDeleteWatchService(client *Client) *XPackWatcherDeleteWatchService { - return &XPackWatcherDeleteWatchService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherDeleteWatchService) Pretty(pretty bool) *XPackWatcherDeleteWatchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherDeleteWatchService) Human(human bool) *XPackWatcherDeleteWatchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherDeleteWatchService) ErrorTrace(errorTrace bool) *XPackWatcherDeleteWatchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherDeleteWatchService) FilterPath(filterPath ...string) *XPackWatcherDeleteWatchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherDeleteWatchService) Header(name string, value string) *XPackWatcherDeleteWatchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherDeleteWatchService) Headers(headers http.Header) *XPackWatcherDeleteWatchService { - s.headers = headers - return s -} - -// Id of the watch to delete. -func (s *XPackWatcherDeleteWatchService) Id(id string) *XPackWatcherDeleteWatchService { - s.id = id - return s -} - -// MasterTimeout specifies an explicit operation timeout for connection to master node. -func (s *XPackWatcherDeleteWatchService) MasterTimeout(masterTimeout string) *XPackWatcherDeleteWatchService { - s.masterTimeout = masterTimeout - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherDeleteWatchService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_watcher/watch/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherDeleteWatchService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackWatcherDeleteWatchService) Do(ctx context.Context) (*XPackWatcherDeleteWatchResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "DELETE", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherDeleteWatchResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherDeleteWatchResponse is the response of XPackWatcherDeleteWatchService.Do. -type XPackWatcherDeleteWatchResponse struct { - Found bool `json:"found"` - Id string `json:"_id"` - Version int `json:"_version"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_execute_watch.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_execute_watch.go deleted file mode 100644 index 9a31c51..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_execute_watch.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackWatcherExecuteWatchService forces the execution of a stored watch. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-execute-watch.html. -type XPackWatcherExecuteWatchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - debug *bool - bodyJson interface{} - bodyString string -} - -// NewXPackWatcherExecuteWatchService creates a new XPackWatcherExecuteWatchService. -func NewXPackWatcherExecuteWatchService(client *Client) *XPackWatcherExecuteWatchService { - return &XPackWatcherExecuteWatchService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherExecuteWatchService) Pretty(pretty bool) *XPackWatcherExecuteWatchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherExecuteWatchService) Human(human bool) *XPackWatcherExecuteWatchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherExecuteWatchService) ErrorTrace(errorTrace bool) *XPackWatcherExecuteWatchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherExecuteWatchService) FilterPath(filterPath ...string) *XPackWatcherExecuteWatchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherExecuteWatchService) Header(name string, value string) *XPackWatcherExecuteWatchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherExecuteWatchService) Headers(headers http.Header) *XPackWatcherExecuteWatchService { - s.headers = headers - return s -} - -// Id of the watch to execute on. -func (s *XPackWatcherExecuteWatchService) Id(id string) *XPackWatcherExecuteWatchService { - s.id = id - return s -} - -// Debug indicates whether the watch should execute in debug mode. -func (s *XPackWatcherExecuteWatchService) Debug(debug bool) *XPackWatcherExecuteWatchService { - s.debug = &debug - return s -} - -// BodyJson is documented as: Execution control. -func (s *XPackWatcherExecuteWatchService) BodyJson(body interface{}) *XPackWatcherExecuteWatchService { - s.bodyJson = body - return s -} - -// BodyString is documented as: Execution control. -func (s *XPackWatcherExecuteWatchService) BodyString(body string) *XPackWatcherExecuteWatchService { - s.bodyString = body - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherExecuteWatchService) buildURL() (string, url.Values, error) { - // Build URL - var ( - path string - err error - ) - if s.id != "" { - path, err = uritemplates.Expand("/_watcher/watch/{id}/_execute", map[string]string{ - "id": s.id, - }) - } else { - path = "/_watcher/watch/_execute" - } - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.debug; v != nil { - params.Set("debug", fmt.Sprint(*v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherExecuteWatchService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *XPackWatcherExecuteWatchService) Do(ctx context.Context) (*XPackWatcherExecuteWatchResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Setup HTTP request body - var body interface{} - if s.bodyJson != nil { - body = s.bodyJson - } else { - body = s.bodyString - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherExecuteWatchResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherExecuteWatchResponse is the response of XPackWatcherExecuteWatchService.Do. -type XPackWatcherExecuteWatchResponse struct { - Id string `json:"_id"` - WatchRecord *XPackWatchRecord `json:"watch_record"` -} - -type XPackWatchRecord struct { - WatchId string `json:"watch_id"` - Node string `json:"node"` - Messages []string `json:"messages"` - State string `json:"state"` - Status *XPackWatchRecordStatus `json:"status"` - Input map[string]map[string]interface{} `json:"input"` - Condition map[string]map[string]interface{} `json:"condition"` - Result map[string]interface{} `json:"Result"` -} - -type XPackWatchRecordStatus struct { - Version int `json:"version"` - State map[string]interface{} `json:"state"` - LastChecked string `json:"last_checked"` - LastMetCondition string `json:"last_met_condition"` - Actions map[string]map[string]interface{} `json:"actions"` - ExecutionState string `json:"execution_state"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_get_watch.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_get_watch.go deleted file mode 100644 index b0ce092..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_get_watch.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - "time" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackWatcherGetWatchService retrieves a watch by its ID. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-get-watch.html. -type XPackWatcherGetWatchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string -} - -// NewXPackWatcherGetWatchService creates a new XPackWatcherGetWatchService. -func NewXPackWatcherGetWatchService(client *Client) *XPackWatcherGetWatchService { - return &XPackWatcherGetWatchService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherGetWatchService) Pretty(pretty bool) *XPackWatcherGetWatchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherGetWatchService) Human(human bool) *XPackWatcherGetWatchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherGetWatchService) ErrorTrace(errorTrace bool) *XPackWatcherGetWatchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherGetWatchService) FilterPath(filterPath ...string) *XPackWatcherGetWatchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherGetWatchService) Header(name string, value string) *XPackWatcherGetWatchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherGetWatchService) Headers(headers http.Header) *XPackWatcherGetWatchService { - s.headers = headers - return s -} - -// Id is ID of the watch to retrieve. -func (s *XPackWatcherGetWatchService) Id(id string) *XPackWatcherGetWatchService { - s.id = id - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherGetWatchService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_watcher/watch/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherGetWatchService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackWatcherGetWatchService) Do(ctx context.Context) (*XPackWatcherGetWatchResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherGetWatchResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherGetWatchResponse is the response of XPackWatcherGetWatchService.Do. -type XPackWatcherGetWatchResponse struct { - Found bool `json:"found"` - Id string `json:"_id"` - Version int64 `json:"_version,omitempty"` - Status *XPackWatchStatus `json:"status,omitempty"` - Watch *XPackWatch `json:"watch,omitempty"` -} - -type XPackWatchStatus struct { - State *XPackWatchExecutionState `json:"state,omitempty"` - LastChecked *time.Time `json:"last_checked,omitempty"` - LastMetCondition *time.Time `json:"last_met_condition,omitempty"` - Actions map[string]*XPackWatchActionStatus `json:"actions,omitempty"` - ExecutionState string `json:"execution_state,omitempty"` - Headers map[string]string `json:"headers,omitempty"` - Version int64 `json:"version"` -} - -type XPackWatchExecutionState struct { - Active bool `json:"active"` - Timestamp time.Time `json:"timestamp"` -} - -type XPackWatchActionStatus struct { - AckStatus *XPackWatchActionAckStatus `json:"ack"` - LastExecution *XPackWatchActionExecutionState `json:"last_execution,omitempty"` - LastSuccessfulExecution *XPackWatchActionExecutionState `json:"last_successful_execution,omitempty"` - LastThrottle *XPackWatchActionThrottle `json:"last_throttle,omitempty"` -} - -type XPackWatchActionAckStatus struct { - Timestamp time.Time `json:"timestamp"` - AckStatusState string `json:"ack_status_state"` -} - -type XPackWatchActionExecutionState struct { - Timestamp time.Time `json:"timestamp"` - Successful bool `json:"successful"` - Reason string `json:"reason,omitempty"` -} - -type XPackWatchActionThrottle struct { - Timestamp time.Time `json:"timestamp"` - Reason string `json:"reason,omitempty"` -} - -type XPackWatch struct { - Trigger map[string]map[string]interface{} `json:"trigger"` - Input map[string]map[string]interface{} `json:"input"` - Condition map[string]map[string]interface{} `json:"condition"` - Transform map[string]interface{} `json:"transform,omitempty"` - ThrottlePeriod string `json:"throttle_period,omitempty"` - ThrottlePeriodInMillis int64 `json:"throttle_period_in_millis,omitempty"` - Actions map[string]*XPackWatchActionStatus `json:"actions"` - Metadata map[string]interface{} `json:"metadata,omitempty"` - Status *XPackWatchStatus `json:"status,omitempty"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_put_watch.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_put_watch.go deleted file mode 100644 index 0329825..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_put_watch.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/olivere/elastic/v7/uritemplates" -) - -// XPackWatcherPutWatchService either registers a new watch in Watcher -// or update an existing one. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-put-watch.html. -type XPackWatcherPutWatchService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - id string - active *bool - masterTimeout string - ifSeqNo *int64 - ifPrimaryTerm *int64 - body interface{} -} - -// NewXPackWatcherPutWatchService creates a new XPackWatcherPutWatchService. -func NewXPackWatcherPutWatchService(client *Client) *XPackWatcherPutWatchService { - return &XPackWatcherPutWatchService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherPutWatchService) Pretty(pretty bool) *XPackWatcherPutWatchService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherPutWatchService) Human(human bool) *XPackWatcherPutWatchService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherPutWatchService) ErrorTrace(errorTrace bool) *XPackWatcherPutWatchService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherPutWatchService) FilterPath(filterPath ...string) *XPackWatcherPutWatchService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherPutWatchService) Header(name string, value string) *XPackWatcherPutWatchService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherPutWatchService) Headers(headers http.Header) *XPackWatcherPutWatchService { - s.headers = headers - return s -} - -// Id of the watch to upsert. -func (s *XPackWatcherPutWatchService) Id(id string) *XPackWatcherPutWatchService { - s.id = id - return s -} - -// Active specifies whether the watch is in/active by default. -func (s *XPackWatcherPutWatchService) Active(active bool) *XPackWatcherPutWatchService { - s.active = &active - return s -} - -// MasterTimeout is an explicit operation timeout for connection to master node. -func (s *XPackWatcherPutWatchService) MasterTimeout(masterTimeout string) *XPackWatcherPutWatchService { - s.masterTimeout = masterTimeout - return s -} - -// IfSeqNo indicates to update the watch only if the last operation that -// has changed the watch has the specified sequence number. -func (s *XPackWatcherPutWatchService) IfSeqNo(seqNo int64) *XPackWatcherPutWatchService { - s.ifSeqNo = &seqNo - return s -} - -// IfPrimaryTerm indicates to update the watch only if the last operation that -// has changed the watch has the specified primary term. -func (s *XPackWatcherPutWatchService) IfPrimaryTerm(primaryTerm int64) *XPackWatcherPutWatchService { - s.ifPrimaryTerm = &primaryTerm - return s -} - -// Body specifies the watch. Use a string or a type that will get serialized as JSON. -func (s *XPackWatcherPutWatchService) Body(body interface{}) *XPackWatcherPutWatchService { - s.body = body - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherPutWatchService) buildURL() (string, url.Values, error) { - // Build URL - path, err := uritemplates.Expand("/_watcher/watch/{id}", map[string]string{ - "id": s.id, - }) - if err != nil { - return "", url.Values{}, err - } - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.active; v != nil { - params.Set("active", fmt.Sprint(*v)) - } - if s.masterTimeout != "" { - params.Set("master_timeout", s.masterTimeout) - } - if v := s.ifSeqNo; v != nil { - params.Set("if_seq_no", fmt.Sprintf("%d", *v)) - } - if v := s.ifPrimaryTerm; v != nil { - params.Set("if_primary_term", fmt.Sprintf("%d", *v)) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherPutWatchService) Validate() error { - var invalid []string - if s.id == "" { - invalid = append(invalid, "Id") - } - if s.body == nil { - invalid = append(invalid, "Body") - } - if len(invalid) > 0 { - return fmt.Errorf("missing required fields: %v", invalid) - } - return nil -} - -// Do executes the operation. -func (s *XPackWatcherPutWatchService) Do(ctx context.Context) (*XPackWatcherPutWatchResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "PUT", - Path: path, - Params: params, - Body: s.body, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherPutWatchResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherPutWatchResponse is the response of XPackWatcherPutWatchService.Do. -type XPackWatcherPutWatchResponse struct { -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_start.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_start.go deleted file mode 100644 index 3fb29a7..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_start.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// XPackWatcherStartService starts the watcher service if it is not already running. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-start.html. -type XPackWatcherStartService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers -} - -// NewXPackWatcherStartService creates a new XPackWatcherStartService. -func NewXPackWatcherStartService(client *Client) *XPackWatcherStartService { - return &XPackWatcherStartService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherStartService) Pretty(pretty bool) *XPackWatcherStartService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherStartService) Human(human bool) *XPackWatcherStartService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherStartService) ErrorTrace(errorTrace bool) *XPackWatcherStartService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherStartService) FilterPath(filterPath ...string) *XPackWatcherStartService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherStartService) Header(name string, value string) *XPackWatcherStartService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherStartService) Headers(headers http.Header) *XPackWatcherStartService { - s.headers = headers - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherStartService) buildURL() (string, url.Values, error) { - // Build URL path - path := "/_watcher/_start" - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherStartService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *XPackWatcherStartService) Do(ctx context.Context) (*XPackWatcherStartResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherStartResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherStartResponse is the response of XPackWatcherStartService.Do. -type XPackWatcherStartResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_stats.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_stats.go deleted file mode 100644 index 9c615c2..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_stats.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// XPackWatcherStatsService returns the current watcher metrics. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-stats.html. -type XPackWatcherStatsService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers - - metric string - emitStacktraces *bool -} - -// NewXPackWatcherStatsService creates a new XPackWatcherStatsService. -func NewXPackWatcherStatsService(client *Client) *XPackWatcherStatsService { - return &XPackWatcherStatsService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherStatsService) Pretty(pretty bool) *XPackWatcherStatsService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherStatsService) Human(human bool) *XPackWatcherStatsService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherStatsService) ErrorTrace(errorTrace bool) *XPackWatcherStatsService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherStatsService) FilterPath(filterPath ...string) *XPackWatcherStatsService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherStatsService) Header(name string, value string) *XPackWatcherStatsService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherStatsService) Headers(headers http.Header) *XPackWatcherStatsService { - s.headers = headers - return s -} - -// Metric controls what additional stat metrics should be include in the response. -func (s *XPackWatcherStatsService) Metric(metric string) *XPackWatcherStatsService { - s.metric = metric - return s -} - -// EmitStacktraces, if enabled, emits stack traces of currently running watches. -func (s *XPackWatcherStatsService) EmitStacktraces(emitStacktraces bool) *XPackWatcherStatsService { - s.emitStacktraces = &emitStacktraces - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherStatsService) buildURL() (string, url.Values, error) { - // Build URL - path := "/_watcher/stats" - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - if v := s.emitStacktraces; v != nil { - params.Set("emit_stacktraces", fmt.Sprint(*v)) - } - if s.metric != "" { - params.Set("metric", s.metric) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherStatsService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *XPackWatcherStatsService) Do(ctx context.Context) (*XPackWatcherStatsResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "GET", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherStatsResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherStatsResponse is the response of XPackWatcherStatsService.Do. -type XPackWatcherStatsResponse struct { - Stats []XPackWatcherStats `json:"stats"` -} - -// XPackWatcherStats represents the stats used in XPackWatcherStatsResponse. -type XPackWatcherStats struct { - WatcherState string `json:"watcher_state"` - WatchCount int `json:"watch_count"` - ExecutionThreadPool map[string]interface{} `json:"execution_thread_pool"` -} diff --git a/vendor/github.com/olivere/elastic/v7/xpack_watcher_stop.go b/vendor/github.com/olivere/elastic/v7/xpack_watcher_stop.go deleted file mode 100644 index d5fe9d0..0000000 --- a/vendor/github.com/olivere/elastic/v7/xpack_watcher_stop.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2012-2018 Oliver Eilhard. All rights reserved. -// Use of this source code is governed by a MIT-license. -// See http://olivere.mit-license.org/license.txt for details. - -package elastic - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/url" - "strings" -) - -// XPackWatcherStopService stops the watcher service if it is running. -// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/watcher-api-stop.html. -type XPackWatcherStopService struct { - client *Client - - pretty *bool // pretty format the returned JSON response - human *bool // return human readable values for statistics - errorTrace *bool // include the stack trace of returned errors - filterPath []string // list of filters used to reduce the response - headers http.Header // custom request-level HTTP headers -} - -// NewXPackWatcherStopService creates a new XPackWatcherStopService. -func NewXPackWatcherStopService(client *Client) *XPackWatcherStopService { - return &XPackWatcherStopService{ - client: client, - } -} - -// Pretty tells Elasticsearch whether to return a formatted JSON response. -func (s *XPackWatcherStopService) Pretty(pretty bool) *XPackWatcherStopService { - s.pretty = &pretty - return s -} - -// Human specifies whether human readable values should be returned in -// the JSON response, e.g. "7.5mb". -func (s *XPackWatcherStopService) Human(human bool) *XPackWatcherStopService { - s.human = &human - return s -} - -// ErrorTrace specifies whether to include the stack trace of returned errors. -func (s *XPackWatcherStopService) ErrorTrace(errorTrace bool) *XPackWatcherStopService { - s.errorTrace = &errorTrace - return s -} - -// FilterPath specifies a list of filters used to reduce the response. -func (s *XPackWatcherStopService) FilterPath(filterPath ...string) *XPackWatcherStopService { - s.filterPath = filterPath - return s -} - -// Header adds a header to the request. -func (s *XPackWatcherStopService) Header(name string, value string) *XPackWatcherStopService { - if s.headers == nil { - s.headers = http.Header{} - } - s.headers.Add(name, value) - return s -} - -// Headers specifies the headers of the request. -func (s *XPackWatcherStopService) Headers(headers http.Header) *XPackWatcherStopService { - s.headers = headers - return s -} - -// buildURL builds the URL for the operation. -func (s *XPackWatcherStopService) buildURL() (string, url.Values, error) { - // Build URL path - path := "/_watcher/_stop" - - // Add query string parameters - params := url.Values{} - if v := s.pretty; v != nil { - params.Set("pretty", fmt.Sprint(*v)) - } - if v := s.human; v != nil { - params.Set("human", fmt.Sprint(*v)) - } - if v := s.errorTrace; v != nil { - params.Set("error_trace", fmt.Sprint(*v)) - } - if len(s.filterPath) > 0 { - params.Set("filter_path", strings.Join(s.filterPath, ",")) - } - return path, params, nil -} - -// Validate checks if the operation is valid. -func (s *XPackWatcherStopService) Validate() error { - return nil -} - -// Do executes the operation. -func (s *XPackWatcherStopService) Do(ctx context.Context) (*XPackWatcherStopResponse, error) { - // Check pre-conditions - if err := s.Validate(); err != nil { - return nil, err - } - - // Get URL for request - path, params, err := s.buildURL() - if err != nil { - return nil, err - } - - // Get HTTP response - res, err := s.client.PerformRequest(ctx, PerformRequestOptions{ - Method: "POST", - Path: path, - Params: params, - Headers: s.headers, - }) - if err != nil { - return nil, err - } - - // Return operation response - ret := new(XPackWatcherStopResponse) - if err := json.Unmarshal(res.Body, ret); err != nil { - return nil, err - } - return ret, nil -} - -// XPackWatcherStopResponse is the response of XPackWatcherStopService.Do. -type XPackWatcherStopResponse struct { - Acknowledged bool `json:"acknowledged"` -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de0..0000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e..0000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cd..0000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb..0000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932ead..0000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea2..0000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d..0000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a834..0000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/prometheus/client_golang/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE deleted file mode 100644 index dd878a3..0000000 --- a/vendor/github.com/prometheus/client_golang/NOTICE +++ /dev/null @@ -1,23 +0,0 @@ -Prometheus instrumentation library for Go applications -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). - - -The following components are included in this product: - -perks - a fork of https://github.com/bmizerany/perks -https://github.com/beorn7/perks -Copyright 2013-2015 Blake Mizerany, Björn Rabenstein -See https://github.com/beorn7/perks/blob/master/README.md for license details. - -Go support for Protocol Buffers - Google's data interchange format -http://github.com/golang/protobuf/ -Copyright 2010 The Go Authors -See source code for license details. - -Support for streaming Protocol Buffer messages for the Go language (golang). -https://github.com/matttproud/golang_protobuf_extensions -Copyright 2013 Matt T. Proud -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore deleted file mode 100644 index 3460f03..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md deleted file mode 100644 index 44986bf..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/README.md +++ /dev/null @@ -1 +0,0 @@ -See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info.go deleted file mode 100644 index 288f0e8..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build go1.12 - -package prometheus - -import "runtime/debug" - -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. -func readBuildInfo() (path, version, sum string) { - path, version, sum = "unknown", "unknown", "unknown" - if bi, ok := debug.ReadBuildInfo(); ok { - path = bi.Main.Path - version = bi.Main.Version - sum = bi.Main.Sum - } - return -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go deleted file mode 100644 index 6609e28..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !go1.12 - -package prometheus - -// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before -// 1.12. Remove this whole file once the minimum supported Go version is 1.12. -func readBuildInfo() (path, version, sum string) { - return "unknown", "unknown", "unknown" -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go deleted file mode 100644 index 1e83965..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Collector is the interface implemented by anything that can be used by -// Prometheus to collect metrics. A Collector has to be registered for -// collection. See Registerer.Register. -// -// The stock metrics provided by this package (Gauge, Counter, Summary, -// Histogram, Untyped) are also Collectors (which only ever collect one metric, -// namely itself). An implementer of Collector may, however, collect multiple -// metrics in a coordinated fashion and/or create metrics on the fly. Examples -// for collectors already implemented in this library are the metric vectors -// (i.e. collection of multiple instances of the same Metric but with different -// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. -type Collector interface { - // Describe sends the super-set of all possible descriptors of metrics - // collected by this Collector to the provided channel and returns once - // the last descriptor has been sent. The sent descriptors fulfill the - // consistency and uniqueness requirements described in the Desc - // documentation. - // - // It is valid if one and the same Collector sends duplicate - // descriptors. Those duplicates are simply ignored. However, two - // different Collectors must not send duplicate descriptors. - // - // Sending no descriptor at all marks the Collector as “unchecked”, - // i.e. no checks will be performed at registration time, and the - // Collector may yield any Metric it sees fit in its Collect method. - // - // This method idempotently sends the same descriptors throughout the - // lifetime of the Collector. It may be called concurrently and - // therefore must be implemented in a concurrency safe way. - // - // If a Collector encounters an error while executing this method, it - // must send an invalid descriptor (created with NewInvalidDesc) to - // signal the error to the registry. - Describe(chan<- *Desc) - // Collect is called by the Prometheus registry when collecting - // metrics. The implementation sends each collected metric via the - // provided channel and returns once the last metric has been sent. The - // descriptor of each sent metric is one of those returned by Describe - // (unless the Collector is unchecked, see above). Returned metrics that - // share the same descriptor must differ in their variable label - // values. - // - // This method may be called concurrently and must therefore be - // implemented in a concurrency safe way. Blocking occurs at the expense - // of total performance of rendering all registered metrics. Ideally, - // Collector implementations support concurrent readers. - Collect(chan<- Metric) -} - -// DescribeByCollect is a helper to implement the Describe method of a custom -// Collector. It collects the metrics from the provided Collector and sends -// their descriptors to the provided channel. -// -// If a Collector collects the same metrics throughout its lifetime, its -// Describe method can simply be implemented as: -// -// func (c customCollector) Describe(ch chan<- *Desc) { -// DescribeByCollect(c, ch) -// } -// -// However, this will not work if the metrics collected change dynamically over -// the lifetime of the Collector in a way that their combined set of descriptors -// changes as well. The shortcut implementation will then violate the contract -// of the Describe method. If a Collector sometimes collects no metrics at all -// (for example vectors like CounterVec, GaugeVec, etc., which only collect -// metrics after a metric with a fully specified label set has been accessed), -// it might even get registered as an unchecked Collector (cf. the Register -// method of the Registerer interface). Hence, only use this shortcut -// implementation of Describe if you are certain to fulfill the contract. -// -// The Collector example demonstrates a use of DescribeByCollect. -func DescribeByCollect(c Collector, descs chan<- *Desc) { - metrics := make(chan Metric) - go func() { - c.Collect(metrics) - close(metrics) - }() - for m := range metrics { - descs <- m.Desc() - } -} - -// selfCollector implements Collector for a single Metric so that the Metric -// collects itself. Add it as an anonymous field to a struct that implements -// Metric, and call init with the Metric itself as an argument. -type selfCollector struct { - self Metric -} - -// init provides the selfCollector with a reference to the metric it is supposed -// to collect. It is usually called within the factory function to create a -// metric. See example. -func (c *selfCollector) init(self Metric) { - c.self = self -} - -// Describe implements Collector. -func (c *selfCollector) Describe(ch chan<- *Desc) { - ch <- c.self.Desc() -} - -// Collect implements Collector. -func (c *selfCollector) Collect(ch chan<- Metric) { - ch <- c.self -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go deleted file mode 100644 index df72fcf..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Counter is a Metric that represents a single numerical value that only ever -// goes up. That implies that it cannot be used to count items whose number can -// also go down, e.g. the number of currently running goroutines. Those -// "counters" are represented by Gauges. -// -// A Counter is typically used to count requests served, tasks completed, errors -// occurred, etc. -// -// To create Counter instances, use NewCounter. -type Counter interface { - Metric - Collector - - // Inc increments the counter by 1. Use Add to increment it by arbitrary - // non-negative values. - Inc() - // Add adds the given value to the counter. It panics if the value is < - // 0. - Add(float64) -} - -// ExemplarAdder is implemented by Counters that offer the option of adding a -// value to the Counter together with an exemplar. Its AddWithExemplar method -// works like the Add method of the Counter interface but also replaces the -// currently saved exemplar (if any) with a new one, created from the provided -// value, the current time as timestamp, and the provided labels. Empty Labels -// will lead to a valid (label-less) exemplar. But if Labels is nil, the current -// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any -// of the provided labels are invalid, or if the provided labels contain more -// than 64 runes in total. -type ExemplarAdder interface { - AddWithExemplar(value float64, exemplar Labels) -} - -// CounterOpts is an alias for Opts. See there for doc comments. -type CounterOpts Opts - -// NewCounter creates a new Counter based on the provided CounterOpts. -// -// The returned implementation also implements ExemplarAdder. It is safe to -// perform the corresponding type assertion. -// -// The returned implementation tracks the counter value in two separate -// variables, a float64 and a uint64. The latter is used to track calls of the -// Inc method and calls of the Add method with a value that can be represented -// as a uint64. This allows atomic increments of the counter with optimal -// performance. (It is common to have an Inc call in very hot execution paths.) -// Both internal tracking values are added up in the Write method. This has to -// be taken into account when it comes to precision and overflow behavior. -func NewCounter(opts CounterOpts) Counter { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now} - result.init(result) // Init self-collection. - return result -} - -type counter struct { - // valBits contains the bits of the represented float64 value, while - // valInt stores values that are exact integers. Both have to go first - // in the struct to guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - valInt uint64 - - selfCollector - desc *Desc - - labelPairs []*dto.LabelPair - exemplar atomic.Value // Containing nil or a *dto.Exemplar. - - now func() time.Time // To mock out time.Now() for testing. -} - -func (c *counter) Desc() *Desc { - return c.desc -} - -func (c *counter) Add(v float64) { - if v < 0 { - panic(errors.New("counter cannot decrease in value")) - } - - ival := uint64(v) - if float64(ival) == v { - atomic.AddUint64(&c.valInt, ival) - return - } - - for { - oldBits := atomic.LoadUint64(&c.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { - return - } - } -} - -func (c *counter) AddWithExemplar(v float64, e Labels) { - c.Add(v) - c.updateExemplar(v, e) -} - -func (c *counter) Inc() { - atomic.AddUint64(&c.valInt, 1) -} - -func (c *counter) Write(out *dto.Metric) error { - fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) - ival := atomic.LoadUint64(&c.valInt) - val := fval + float64(ival) - - var exemplar *dto.Exemplar - if e := c.exemplar.Load(); e != nil { - exemplar = e.(*dto.Exemplar) - } - - return populateMetric(CounterValue, val, c.labelPairs, exemplar, out) -} - -func (c *counter) updateExemplar(v float64, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, c.now(), l) - if err != nil { - panic(err) - } - c.exemplar.Store(e) -} - -// CounterVec is a Collector that bundles a set of Counters that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. number of HTTP requests, partitioned by response code and -// method). Create instances with NewCounterVec. -type CounterVec struct { - *metricVec -} - -// NewCounterVec creates a new CounterVec based on the provided CounterOpts and -// partitioned by the given label names. -func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &CounterVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs), now: time.Now} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Counter for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Counter is created. -// -// It is possible to call this method without using the returned Counter to only -// create the new Counter but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Counter for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Counter from the CounterVec. In that case, -// the Counter will still exist, but it will not be exported anymore, even if a -// Counter with the same label values is created later. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// GetMetricWith returns the Counter for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Counter is created. Implications of -// creating a Counter without using it and keeping the Counter for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Counter), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *CounterVec) WithLabelValues(lvs ...string) Counter { - c, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return c -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *CounterVec) With(labels Labels) Counter { - c, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return c -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the CounterVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &CounterVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// CounterFunc is a Counter whose value is determined at collect time by calling a -// provided function. -// -// To create CounterFunc instances, use NewCounterFunc. -type CounterFunc interface { - Metric - Collector -} - -// NewCounterFunc creates a new CounterFunc based on the provided -// CounterOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a CounterFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. The function should also honor -// the contract for a Counter (values only go up, not down), but compliance will -// not be checked. -func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), CounterValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go deleted file mode 100644 index e3232d7..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "sort" - "strings" - - "github.com/cespare/xxhash/v2" - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// Desc is the descriptor used by every Prometheus Metric. It is essentially -// the immutable meta-data of a Metric. The normal Metric implementations -// included in this package manage their Desc under the hood. Users only have to -// deal with Desc if they use advanced features like the ExpvarCollector or -// custom Collectors and Metrics. -// -// Descriptors registered with the same registry have to fulfill certain -// consistency and uniqueness criteria if they share the same fully-qualified -// name: They must have the same help string and the same label names (aka label -// dimensions) in each, constLabels and variableLabels, but they must differ in -// the values of the constLabels. -// -// Descriptors that share the same fully-qualified names and the same label -// values of their constLabels are considered equal. -// -// Use NewDesc to create new Desc instances. -type Desc struct { - // fqName has been built from Namespace, Subsystem, and Name. - fqName string - // help provides some helpful information about this metric. - help string - // constLabelPairs contains precalculated DTO label pairs based on - // the constant labels. - constLabelPairs []*dto.LabelPair - // VariableLabels contains names of labels for which the metric - // maintains variable values. - variableLabels []string - // id is a hash of the values of the ConstLabels and fqName. This - // must be unique among all registered descriptors and can therefore be - // used as an identifier of the descriptor. - id uint64 - // dimHash is a hash of the label names (preset and variable) and the - // Help string. Each Desc with the same fqName must have the same - // dimHash. - dimHash uint64 - // err is an error that occurred during construction. It is reported on - // registration time. - err error -} - -// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc -// and will be reported on registration time. variableLabels and constLabels can -// be nil if no such labels should be set. fqName must not be empty. -// -// variableLabels only contain the label names. Their label values are variable -// and therefore not part of the Desc. (They are managed within the Metric.) -// -// For constLabels, the label values are constant. Therefore, they are fully -// specified in the Desc. See the Collector example for a usage pattern. -func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { - d := &Desc{ - fqName: fqName, - help: help, - variableLabels: variableLabels, - } - if !model.IsValidMetricName(model.LabelValue(fqName)) { - d.err = fmt.Errorf("%q is not a valid metric name", fqName) - return d - } - // labelValues contains the label values of const labels (in order of - // their sorted label names) plus the fqName (at position 0). - labelValues := make([]string, 1, len(constLabels)+1) - labelValues[0] = fqName - labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) - labelNameSet := map[string]struct{}{} - // First add only the const label names and sort them... - for labelName := range constLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, labelName) - labelNameSet[labelName] = struct{}{} - } - sort.Strings(labelNames) - // ... so that we can now add const label values in the order of their names. - for _, labelName := range labelNames { - labelValues = append(labelValues, constLabels[labelName]) - } - // Validate the const label values. They can't have a wrong cardinality, so - // use in len(labelValues) as expectedNumberOfValues. - if err := validateLabelValues(labelValues, len(labelValues)); err != nil { - d.err = err - return d - } - // Now add the variable label names, but prefix them with something that - // cannot be in a regular label name. That prevents matching the label - // dimension with a different mix between preset and variable labels. - for _, labelName := range variableLabels { - if !checkLabelName(labelName) { - d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) - return d - } - labelNames = append(labelNames, "$"+labelName) - labelNameSet[labelName] = struct{}{} - } - if len(labelNames) != len(labelNameSet) { - d.err = errors.New("duplicate label names") - return d - } - - xxh := xxhash.New() - for _, val := range labelValues { - xxh.WriteString(val) - xxh.Write(separatorByteSlice) - } - d.id = xxh.Sum64() - // Sort labelNames so that order doesn't matter for the hash. - sort.Strings(labelNames) - // Now hash together (in this order) the help string and the sorted - // label names. - xxh.Reset() - xxh.WriteString(help) - xxh.Write(separatorByteSlice) - for _, labelName := range labelNames { - xxh.WriteString(labelName) - xxh.Write(separatorByteSlice) - } - d.dimHash = xxh.Sum64() - - d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) - for n, v := range constLabels { - d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(v), - }) - } - sort.Sort(labelPairSorter(d.constLabelPairs)) - return d -} - -// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the -// provided error set. If a collector returning such a descriptor is registered, -// registration will fail with the provided error. NewInvalidDesc can be used by -// a Collector to signal inability to describe itself. -func NewInvalidDesc(err error) *Desc { - return &Desc{ - err: err, - } -} - -func (d *Desc) String() string { - lpStrings := make([]string, 0, len(d.constLabelPairs)) - for _, lp := range d.constLabelPairs { - lpStrings = append( - lpStrings, - fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), - ) - } - return fmt.Sprintf( - "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", - d.fqName, - d.help, - strings.Join(lpStrings, ","), - d.variableLabels, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go deleted file mode 100644 index 9845012..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus is the core instrumentation package. It provides metrics -// primitives to instrument code for monitoring. It also offers a registry for -// metrics. Sub-packages allow to expose the registered metrics via HTTP -// (package promhttp) or push them to a Pushgateway (package push). There is -// also a sub-package promauto, which provides metrics constructors with -// automatic registration. -// -// All exported functions and methods are safe to be used concurrently unless -// specified otherwise. -// -// A Basic Example -// -// As a starting point, a very basic usage example: -// -// package main -// -// import ( -// "log" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var ( -// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ -// Name: "cpu_temperature_celsius", -// Help: "Current temperature of the CPU.", -// }) -// hdFailures = prometheus.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hd_errors_total", -// Help: "Number of hard-disk errors.", -// }, -// []string{"device"}, -// ) -// ) -// -// func init() { -// // Metrics have to be registered to be exposed: -// prometheus.MustRegister(cpuTemp) -// prometheus.MustRegister(hdFailures) -// } -// -// func main() { -// cpuTemp.Set(65.3) -// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() -// -// // The Handler function provides a default handler to expose metrics -// // via an HTTP server. "/metrics" is the usual endpoint for that. -// http.Handle("/metrics", promhttp.Handler()) -// log.Fatal(http.ListenAndServe(":8080", nil)) -// } -// -// -// This is a complete program that exports two metrics, a Gauge and a Counter, -// the latter with a label attached to turn it into a (one-dimensional) vector. -// -// Metrics -// -// The number of exported identifiers in this package might appear a bit -// overwhelming. However, in addition to the basic plumbing shown in the example -// above, you only need to understand the different metric types and their -// vector versions for basic usage. Furthermore, if you are not concerned with -// fine-grained control of when and how to register metrics with the registry, -// have a look at the promauto package, which will effectively allow you to -// ignore registration altogether in simple cases. -// -// Above, you have already touched the Counter and the Gauge. There are two more -// advanced metric types: the Summary and Histogram. A more thorough description -// of those four metric types can be found in the Prometheus docs: -// https://prometheus.io/docs/concepts/metric_types/ -// -// In addition to the fundamental metric types Gauge, Counter, Summary, and -// Histogram, a very important part of the Prometheus data model is the -// partitioning of samples along dimensions called labels, which results in -// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, -// and HistogramVec. -// -// While only the fundamental metric types implement the Metric interface, both -// the metrics and their vector versions implement the Collector interface. A -// Collector manages the collection of a number of Metrics, but for convenience, -// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and -// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec, -// and HistogramVec are not. -// -// To create instances of Metrics and their vector versions, you need a suitable -// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts. -// -// Custom Collectors and constant Metrics -// -// While you could create your own implementations of Metric, most likely you -// will only ever implement the Collector interface on your own. At a first -// glance, a custom Collector seems handy to bundle Metrics for common -// registration (with the prime example of the different metric vectors above, -// which bundle all the metrics of the same name but with different labels). -// -// There is a more involved use case, too: If you already have metrics -// available, created outside of the Prometheus context, you don't need the -// interface of the various Metric types. You essentially want to mirror the -// existing numbers into Prometheus Metrics during collection. An own -// implementation of the Collector interface is perfect for that. You can create -// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and -// NewConstSummary (and their respective Must… versions). NewConstMetric is used -// for all metric types with just a float64 as their value: Counter, Gauge, and -// a special “type” called Untyped. Use the latter if you are not sure if the -// mirrored metric is a Counter or a Gauge. Creation of the Metric instance -// happens in the Collect method. The Describe method has to return separate -// Desc instances, representative of the “throw-away” metrics to be created -// later. NewDesc comes in handy to create those Desc instances. Alternatively, -// you could return no Desc at all, which will mark the Collector “unchecked”. -// No checks are performed at registration time, but metric consistency will -// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape -// errors. Thus, with unchecked Collectors, the responsibility to not collect -// metrics that lead to inconsistencies in the total scrape result lies with the -// implementer of the Collector. While this is not a desirable state, it is -// sometimes necessary. The typical use case is a situation where the exact -// metrics to be returned by a Collector cannot be predicted at registration -// time, but the implementer has sufficient knowledge of the whole system to -// guarantee metric consistency. -// -// The Collector example illustrates the use case. You can also look at the -// source code of the processCollector (mirroring process metrics), the -// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar -// metrics) as examples that are used in this package itself. -// -// If you just need to call a function to get a single float value to collect as -// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting -// shortcuts. -// -// Advanced Uses of the Registry -// -// While MustRegister is the by far most common way of registering a Collector, -// sometimes you might want to handle the errors the registration might cause. -// As suggested by the name, MustRegister panics if an error occurs. With the -// Register function, the error is returned and can be handled. -// -// An error is returned if the registered Collector is incompatible or -// inconsistent with already registered metrics. The registry aims for -// consistency of the collected metrics according to the Prometheus data model. -// Inconsistencies are ideally detected at registration time, not at collect -// time. The former will usually be detected at start-up time of a program, -// while the latter will only happen at scrape time, possibly not even on the -// first scrape if the inconsistency only becomes relevant later. That is the -// main reason why a Collector and a Metric have to describe themselves to the -// registry. -// -// So far, everything we did operated on the so-called default registry, as it -// can be found in the global DefaultRegisterer variable. With NewRegistry, you -// can create a custom registry, or you can even implement the Registerer or -// Gatherer interfaces yourself. The methods Register and Unregister work in the -// same way on a custom registry as the global functions Register and Unregister -// on the default registry. -// -// There are a number of uses for custom registries: You can use registries with -// special properties, see NewPedanticRegistry. You can avoid global state, as -// it is imposed by the DefaultRegisterer. You can use multiple registries at -// the same time to expose different metrics in different ways. You can use -// separate registries for testing purposes. -// -// Also note that the DefaultRegisterer comes registered with a Collector for Go -// runtime metrics (via NewGoCollector) and a Collector for process metrics (via -// NewProcessCollector). With a custom registry, you are in control and decide -// yourself about the Collectors to register. -// -// HTTP Exposition -// -// The Registry implements the Gatherer interface. The caller of the Gather -// method can then expose the gathered metrics in some way. Usually, the metrics -// are served via HTTP on the /metrics endpoint. That's happening in the example -// above. The tools to expose metrics via HTTP are in the promhttp sub-package. -// -// Pushing to the Pushgateway -// -// Function for pushing to the Pushgateway can be found in the push sub-package. -// -// Graphite Bridge -// -// Functions and examples to push metrics from a Gatherer to Graphite can be -// found in the graphite sub-package. -// -// Other Means of Exposition -// -// More ways of exposing metrics can easily be added by following the approaches -// of the existing implementations. -package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go deleted file mode 100644 index 18a99d5..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "encoding/json" - "expvar" -) - -type expvarCollector struct { - exports map[string]*Desc -} - -// NewExpvarCollector returns a newly allocated expvar Collector that still has -// to be registered with a Prometheus registry. -// -// An expvar Collector collects metrics from the expvar interface. It provides a -// quick way to expose numeric values that are already exported via expvar as -// Prometheus metrics. Note that the data models of expvar and Prometheus are -// fundamentally different, and that the expvar Collector is inherently slower -// than native Prometheus metrics. Thus, the expvar Collector is probably great -// for experiments and prototying, but you should seriously consider a more -// direct implementation of Prometheus metrics for monitoring production -// systems. -// -// The exports map has the following meaning: -// -// The keys in the map correspond to expvar keys, i.e. for every expvar key you -// want to export as Prometheus metric, you need an entry in the exports -// map. The descriptor mapped to each key describes how to export the expvar -// value. It defines the name and the help string of the Prometheus metric -// proxying the expvar value. The type will always be Untyped. -// -// For descriptors without variable labels, the expvar value must be a number or -// a bool. The number is then directly exported as the Prometheus sample -// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values -// that are not numbers or bools are silently ignored. -// -// If the descriptor has one variable label, the expvar value must be an expvar -// map. The keys in the expvar map become the various values of the one -// Prometheus label. The values in the expvar map must be numbers or bools again -// as above. -// -// For descriptors with more than one variable label, the expvar must be a -// nested expvar map, i.e. where the values of the topmost map are maps again -// etc. until a depth is reached that corresponds to the number of labels. The -// leaves of that structure must be numbers or bools as above to serve as the -// sample values. -// -// Anything that does not fit into the scheme above is silently ignored. -func NewExpvarCollector(exports map[string]*Desc) Collector { - return &expvarCollector{ - exports: exports, - } -} - -// Describe implements Collector. -func (e *expvarCollector) Describe(ch chan<- *Desc) { - for _, desc := range e.exports { - ch <- desc - } -} - -// Collect implements Collector. -func (e *expvarCollector) Collect(ch chan<- Metric) { - for name, desc := range e.exports { - var m Metric - expVar := expvar.Get(name) - if expVar == nil { - continue - } - var v interface{} - labels := make([]string, len(desc.variableLabels)) - if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { - ch <- NewInvalidMetric(desc, err) - continue - } - var processValue func(v interface{}, i int) - processValue = func(v interface{}, i int) { - if i >= len(labels) { - copiedLabels := append(make([]string, 0, len(labels)), labels...) - switch v := v.(type) { - case float64: - m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) - case bool: - if v { - m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) - } else { - m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) - } - default: - return - } - ch <- m - return - } - vm, ok := v.(map[string]interface{}) - if !ok { - return - } - for lv, val := range vm { - labels[i] = lv - processValue(val, i+1) - } - } - processValue(v, 0) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go deleted file mode 100644 index 3d383a7..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go deleted file mode 100644 index d67573f..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "math" - "sync/atomic" - "time" - - dto "github.com/prometheus/client_model/go" -) - -// Gauge is a Metric that represents a single numerical value that can -// arbitrarily go up and down. -// -// A Gauge is typically used for measured values like temperatures or current -// memory usage, but also "counts" that can go up and down, like the number of -// running goroutines. -// -// To create Gauge instances, use NewGauge. -type Gauge interface { - Metric - Collector - - // Set sets the Gauge to an arbitrary value. - Set(float64) - // Inc increments the Gauge by 1. Use Add to increment it by arbitrary - // values. - Inc() - // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary - // values. - Dec() - // Add adds the given value to the Gauge. (The value can be negative, - // resulting in a decrease of the Gauge.) - Add(float64) - // Sub subtracts the given value from the Gauge. (The value can be - // negative, resulting in an increase of the Gauge.) - Sub(float64) - - // SetToCurrentTime sets the Gauge to the current Unix time in seconds. - SetToCurrentTime() -} - -// GaugeOpts is an alias for Opts. See there for doc comments. -type GaugeOpts Opts - -// NewGauge creates a new Gauge based on the provided GaugeOpts. -// -// The returned implementation is optimized for a fast Set method. If you have a -// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick -// the former. For example, the Inc method of the returned Gauge is slower than -// the Inc method of a Counter returned by NewCounter. This matches the typical -// scenarios for Gauges and Counters, where the former tends to be Set-heavy and -// the latter Inc-heavy. -func NewGauge(opts GaugeOpts) Gauge { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ) - result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} - result.init(result) // Init self-collection. - return result -} - -type gauge struct { - // valBits contains the bits of the represented float64 value. It has - // to go first in the struct to guarantee alignment for atomic - // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG - valBits uint64 - - selfCollector - - desc *Desc - labelPairs []*dto.LabelPair -} - -func (g *gauge) Desc() *Desc { - return g.desc -} - -func (g *gauge) Set(val float64) { - atomic.StoreUint64(&g.valBits, math.Float64bits(val)) -} - -func (g *gauge) SetToCurrentTime() { - g.Set(float64(time.Now().UnixNano()) / 1e9) -} - -func (g *gauge) Inc() { - g.Add(1) -} - -func (g *gauge) Dec() { - g.Add(-1) -} - -func (g *gauge) Add(val float64) { - for { - oldBits := atomic.LoadUint64(&g.valBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + val) - if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { - return - } - } -} - -func (g *gauge) Sub(val float64) { - g.Add(val * -1) -} - -func (g *gauge) Write(out *dto.Metric) error { - val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) - return populateMetric(GaugeValue, val, g.labelPairs, nil, out) -} - -// GaugeVec is a Collector that bundles a set of Gauges that all share the same -// Desc, but have different values for their variable labels. This is used if -// you want to count the same thing partitioned by various dimensions -// (e.g. number of operations queued, partitioned by user and operation -// type). Create instances with NewGaugeVec. -type GaugeVec struct { - *metricVec -} - -// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and -// partitioned by the given label names. -func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &GaugeVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - if len(lvs) != len(desc.variableLabels) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) - } - result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} - result.init(result) // Init self-collection. - return result - }), - } -} - -// GetMetricWithLabelValues returns the Gauge for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Gauge is created. -// -// It is possible to call this method without using the returned Gauge to only -// create the new Gauge but leave it at its starting value 0. See also the -// SummaryVec example. -// -// Keeping the Gauge for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Gauge from the GaugeVec. In that case, the -// Gauge will still exist, but it will not be exported anymore, even if a -// Gauge with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// GetMetricWith returns the Gauge for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Gauge is created. Implications of -// creating a Gauge without using it and keeping the Gauge for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Gauge), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Add(42) -func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { - g, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return g -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) -func (v *GaugeVec) With(labels Labels) Gauge { - g, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return g -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the GaugeVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &GaugeVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -// GaugeFunc is a Gauge whose value is determined at collect time by calling a -// provided function. -// -// To create GaugeFunc instances, use NewGaugeFunc. -type GaugeFunc interface { - Metric - Collector -} - -// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The -// value reported is determined by calling the given function from within the -// Write method. Take into account that metric collection may happen -// concurrently. Therefore, it must be safe to call the provided function -// concurrently. -// -// NewGaugeFunc is a good way to create an “info” style metric with a constant -// value of 1. Example: -// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56 -func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), GaugeValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go deleted file mode 100644 index ea05cf4..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "runtime" - "runtime/debug" - "sync" - "time" -) - -type goCollector struct { - goroutinesDesc *Desc - threadsDesc *Desc - gcDesc *Desc - goInfoDesc *Desc - - // ms... are memstats related. - msLast *runtime.MemStats // Previously collected memstats. - msLastTimestamp time.Time - msMtx sync.Mutex // Protects msLast and msLastTimestamp. - msMetrics memStatsMetrics - msRead func(*runtime.MemStats) // For mocking in tests. - msMaxWait time.Duration // Wait time for fresh memstats. - msMaxAge time.Duration // Maximum allowed age of old memstats. -} - -// NewGoCollector returns a collector that exports metrics about the current Go -// process. This includes memory stats. To collect those, runtime.ReadMemStats -// is called. This requires to “stop the world”, which usually only happens for -// garbage collection (GC). Take the following implications into account when -// deciding whether to use the Go collector: -// -// 1. The performance impact of stopping the world is the more relevant the more -// frequently metrics are collected. However, with Go1.9 or later the -// stop-the-world time per metrics collection is very short (~25µs) so that the -// performance impact will only matter in rare cases. However, with older Go -// versions, the stop-the-world duration depends on the heap size and can be -// quite significant (~1.7 ms/GiB as per -// https://go-review.googlesource.com/c/go/+/34937). -// -// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the -// metrics collection happens to coincide with GC, it will only complete after -// GC has finished. Usually, GC is fast enough to not cause problems. However, -// with a very large heap, GC might take multiple seconds, which is enough to -// cause scrape timeouts in common setups. To avoid this problem, the Go -// collector will use the memstats from a previous collection if -// runtime.ReadMemStats takes more than 1s. However, if there are no previously -// collected memstats, or their collection is more than 5m ago, the collection -// will block until runtime.ReadMemStats succeeds. (The problem might be solved -// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go -// issue.) -func NewGoCollector() Collector { - return &goCollector{ - goroutinesDesc: NewDesc( - "go_goroutines", - "Number of goroutines that currently exist.", - nil, nil), - threadsDesc: NewDesc( - "go_threads", - "Number of OS threads created.", - nil, nil), - gcDesc: NewDesc( - "go_gc_duration_seconds", - "A summary of the pause duration of garbage collection cycles.", - nil, nil), - goInfoDesc: NewDesc( - "go_info", - "Information about the Go environment.", - nil, Labels{"version": runtime.Version()}), - msLast: &runtime.MemStats{}, - msRead: runtime.ReadMemStats, - msMaxWait: time.Second, - msMaxAge: 5 * time.Minute, - msMetrics: memStatsMetrics{ - { - desc: NewDesc( - memstatNamespace("alloc_bytes"), - "Number of bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("alloc_bytes_total"), - "Total number of bytes allocated, even if freed.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("sys_bytes"), - "Number of bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("lookups_total"), - "Total number of pointer lookups.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("mallocs_total"), - "Total number of mallocs.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("frees_total"), - "Total number of frees.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, - valType: CounterValue, - }, { - desc: NewDesc( - memstatNamespace("heap_alloc_bytes"), - "Number of heap bytes allocated and still in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_sys_bytes"), - "Number of heap bytes obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_idle_bytes"), - "Number of heap bytes waiting to be used.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_inuse_bytes"), - "Number of heap bytes that are in use.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_released_bytes"), - "Number of heap bytes released to OS.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("heap_objects"), - "Number of allocated objects.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_inuse_bytes"), - "Number of bytes in use by the stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("stack_sys_bytes"), - "Number of bytes obtained from system for stack allocator.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_inuse_bytes"), - "Number of bytes in use by mspan structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mspan_sys_bytes"), - "Number of bytes used for mspan structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_inuse_bytes"), - "Number of bytes in use by mcache structures.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("mcache_sys_bytes"), - "Number of bytes used for mcache structures obtained from system.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("buck_hash_sys_bytes"), - "Number of bytes used by the profiling bucket hash table.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_sys_bytes"), - "Number of bytes used for garbage collection system metadata.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("other_sys_bytes"), - "Number of bytes used for other system allocations.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("next_gc_bytes"), - "Number of heap bytes when next garbage collection will take place.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("last_gc_time_seconds"), - "Number of seconds since 1970 of last garbage collection.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, - valType: GaugeValue, - }, { - desc: NewDesc( - memstatNamespace("gc_cpu_fraction"), - "The fraction of this program's available CPU time used by the GC since the program started.", - nil, nil, - ), - eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, - valType: GaugeValue, - }, - }, - } -} - -func memstatNamespace(s string) string { - return "go_memstats_" + s -} - -// Describe returns all descriptions of the collector. -func (c *goCollector) Describe(ch chan<- *Desc) { - ch <- c.goroutinesDesc - ch <- c.threadsDesc - ch <- c.gcDesc - ch <- c.goInfoDesc - for _, i := range c.msMetrics { - ch <- i.desc - } -} - -// Collect returns the current state of all metrics of the collector. -func (c *goCollector) Collect(ch chan<- Metric) { - var ( - ms = &runtime.MemStats{} - done = make(chan struct{}) - ) - // Start reading memstats first as it might take a while. - go func() { - c.msRead(ms) - c.msMtx.Lock() - c.msLast = ms - c.msLastTimestamp = time.Now() - c.msMtx.Unlock() - close(done) - }() - - ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) - n, _ := runtime.ThreadCreateProfile(nil) - ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) - - var stats debug.GCStats - stats.PauseQuantiles = make([]time.Duration, 5) - debug.ReadGCStats(&stats) - - quantiles := make(map[float64]float64) - for idx, pq := range stats.PauseQuantiles[1:] { - quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() - } - quantiles[0.0] = stats.PauseQuantiles[0].Seconds() - ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) - - ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) - - timer := time.NewTimer(c.msMaxWait) - select { - case <-done: // Our own ReadMemStats succeeded in time. Use it. - timer.Stop() // Important for high collection frequencies to not pile up timers. - c.msCollect(ch, ms) - return - case <-timer.C: // Time out, use last memstats if possible. Continue below. - } - c.msMtx.Lock() - if time.Since(c.msLastTimestamp) < c.msMaxAge { - // Last memstats are recent enough. Collect from them under the lock. - c.msCollect(ch, c.msLast) - c.msMtx.Unlock() - return - } - // If we are here, the last memstats are too old or don't exist. We have - // to wait until our own ReadMemStats finally completes. For that to - // happen, we have to release the lock. - c.msMtx.Unlock() - <-done - c.msCollect(ch, ms) -} - -func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { - for _, i := range c.msMetrics { - ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) - } -} - -// memStatsMetrics provide description, value, and value type for memstat metrics. -type memStatsMetrics []struct { - desc *Desc - eval func(*runtime.MemStats) float64 - valType ValueType -} - -// NewBuildInfoCollector returns a collector collecting a single metric -// "go_build_info" with the constant value 1 and three labels "path", "version", -// and "checksum". Their label values contain the main module path, version, and -// checksum, respectively. The labels will only have meaningful values if the -// binary is built with Go module support and from source code retrieved from -// the source repository (rather than the local file system). This is usually -// accomplished by building from outside of GOPATH, specifying the full address -// of the main package, e.g. "GO111MODULE=on go run -// github.com/prometheus/client_golang/examples/random". If built without Go -// module support, all label values will be "unknown". If built with Go module -// support but using the source code from the local file system, the "path" will -// be set appropriately, but "checksum" will be empty and "version" will be -// "(devel)". -// -// This collector uses only the build information for the main module. See -// https://github.com/povilasv/prommod for an example of a collector for the -// module dependencies. -func NewBuildInfoCollector() Collector { - path, version, sum := readBuildInfo() - c := &selfCollector{MustNewConstMetric( - NewDesc( - "go_build_info", - "Build information about the main Go module.", - nil, Labels{"path": path, "version": version, "checksum": sum}, - ), - GaugeValue, 1)} - c.init(c.self) - return c -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go deleted file mode 100644 index 4271f43..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ /dev/null @@ -1,636 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// A Histogram counts individual observations from an event or sample stream in -// configurable buckets. Similar to a summary, it also provides a sum of -// observations and an observation count. -// -// On the Prometheus server, quantiles can be calculated from a Histogram using -// the histogram_quantile function in the query language. -// -// Note that Histograms, in contrast to Summaries, can be aggregated with the -// Prometheus query language (see the documentation for detailed -// procedures). However, Histograms require the user to pre-define suitable -// buckets, and they are in general less accurate. The Observe method of a -// Histogram has a very low performance overhead in comparison with the Observe -// method of a Summary. -// -// To create Histogram instances, use NewHistogram. -type Histogram interface { - Metric - Collector - - // Observe adds a single observation to the histogram. - Observe(float64) -} - -// bucketLabel is used for the label that defines the upper bound of a -// bucket of a histogram ("le" -> "less or equal"). -const bucketLabel = "le" - -// DefBuckets are the default Histogram buckets. The default buckets are -// tailored to broadly measure the response time (in seconds) of a network -// service. Most likely, however, you will be required to define buckets -// customized to your use case. -var ( - DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - - errBucketLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in histograms", bucketLabel, - ) -) - -// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest -// bucket has an upper bound of 'start'. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is zero or negative. -func LinearBuckets(start, width float64, count int) []float64 { - if count < 1 { - panic("LinearBuckets needs a positive count") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start += width - } - return buckets -} - -// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an -// upper bound of 'start' and each following bucket's upper bound is 'factor' -// times the previous bucket's upper bound. The final +Inf bucket is not counted -// and not included in the returned slice. The returned slice is meant to be -// used for the Buckets field of HistogramOpts. -// -// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, -// or if 'factor' is less than or equal 1. -func ExponentialBuckets(start, factor float64, count int) []float64 { - if count < 1 { - panic("ExponentialBuckets needs a positive count") - } - if start <= 0 { - panic("ExponentialBuckets needs a positive start value") - } - if factor <= 1 { - panic("ExponentialBuckets needs a factor greater than 1") - } - buckets := make([]float64, count) - for i := range buckets { - buckets[i] = start - start *= factor - } - return buckets -} - -// HistogramOpts bundles the options for creating a Histogram metric. It is -// mandatory to set Name to a non-empty string. All other fields are optional -// and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type HistogramOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Histogram (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Histogram must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Histogram. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels - ConstLabels Labels - - // Buckets defines the buckets into which observations are counted. Each - // element in the slice is the upper inclusive bound of a bucket. The - // values must be sorted in strictly increasing order. There is no need - // to add a highest bucket with +Inf bound, it will be added - // implicitly. The default value is DefBuckets. - Buckets []float64 -} - -// NewHistogram creates a new Histogram based on the provided HistogramOpts. It -// panics if the buckets in HistogramOpts are not in strictly increasing order. -// -// The returned implementation also implements ExemplarObserver. It is safe to -// perform the corresponding type assertion. Exemplars are tracked separately -// for each bucket. -func NewHistogram(opts HistogramOpts) Histogram { - return newHistogram( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == bucketLabel { - panic(errBucketLabelNotAllowed) - } - } - - if len(opts.Buckets) == 0 { - opts.Buckets = DefBuckets - } - - h := &histogram{ - desc: desc, - upperBounds: opts.Buckets, - labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*histogramCounts{{}, {}}, - now: time.Now, - } - for i, upperBound := range h.upperBounds { - if i < len(h.upperBounds)-1 { - if upperBound >= h.upperBounds[i+1] { - panic(fmt.Errorf( - "histogram buckets must be in increasing order: %f >= %f", - upperBound, h.upperBounds[i+1], - )) - } - } else { - if math.IsInf(upperBound, +1) { - // The +Inf bucket is implicit. Remove it here. - h.upperBounds = h.upperBounds[:i] - } - } - } - // Finally we know the final length of h.upperBounds and can make buckets - // for both counts as well as exemplars: - h.counts[0].buckets = make([]uint64, len(h.upperBounds)) - h.counts[1].buckets = make([]uint64, len(h.upperBounds)) - h.exemplars = make([]atomic.Value, len(h.upperBounds)+1) - - h.init(h) // Init self-collection. - return h -} - -type histogramCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 - buckets []uint64 -} - -type histogram struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // histogramCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the histogram) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - // - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*histogramCounts - - upperBounds []float64 - labelPairs []*dto.LabelPair - exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar. - - now func() time.Time // To mock out time.Now() for testing. -} - -func (h *histogram) Desc() *Desc { - return h.desc -} - -func (h *histogram) Observe(v float64) { - h.observe(v, h.findBucket(v)) -} - -func (h *histogram) ObserveWithExemplar(v float64, e Labels) { - i := h.findBucket(v) - h.observe(v, i) - h.updateExemplar(v, i, e) -} - -func (h *histogram) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - h.writeMtx.Lock() - defer h.writeMtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := h.counts[n>>63] - coldCounts := h.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - - his := &dto.Histogram{ - Bucket: make([]*dto.Bucket, len(h.upperBounds)), - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - } - var cumCount uint64 - for i, upperBound := range h.upperBounds { - cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) - his.Bucket[i] = &dto.Bucket{ - CumulativeCount: proto.Uint64(cumCount), - UpperBound: proto.Float64(upperBound), - } - if e := h.exemplars[i].Load(); e != nil { - his.Bucket[i].Exemplar = e.(*dto.Exemplar) - } - } - // If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly. - if e := h.exemplars[len(h.upperBounds)].Load(); e != nil { - b := &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(math.Inf(1)), - Exemplar: e.(*dto.Exemplar), - } - his.Bucket = append(his.Bucket, b) - } - - out.Histogram = his - out.Label = h.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - for i := range h.upperBounds { - atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) - atomic.StoreUint64(&coldCounts.buckets[i], 0) - } - return nil -} - -// findBucket returns the index of the bucket for the provided value, or -// len(h.upperBounds) for the +Inf bucket. -func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op - return sort.SearchFloat64s(h.upperBounds, v) -} - -// observe is the implementation for Observe without the findBucket part. -func (h *histogram) observe(v float64, bucket int) { - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&h.countAndHotIdx, 1) - hotCounts := h.counts[n>>63] - - if bucket < len(h.upperBounds) { - atomic.AddUint64(&hotCounts.buckets[bucket], 1) - } - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -// updateExemplar replaces the exemplar for the provided bucket. With empty -// labels, it's a no-op. It panics if any of the labels is invalid. -func (h *histogram) updateExemplar(v float64, bucket int, l Labels) { - if l == nil { - return - } - e, err := newExemplar(v, h.now(), l) - if err != nil { - panic(err) - } - h.exemplars[bucket].Store(e) -} - -// HistogramVec is a Collector that bundles a set of Histograms that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewHistogramVec. -type HistogramVec struct { - *metricVec -} - -// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and -// partitioned by the given label names. -func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &HistogramVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newHistogram(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Histogram for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Histogram is created. -// -// It is possible to call this method without using the returned Histogram to only -// create the new Histogram but leave it at its starting value, a Histogram without -// any observations. -// -// Keeping the Histogram for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Histogram from the HistogramVec. In that case, the -// Histogram will still exist, but it will not be exported anymore, even if a -// Histogram with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Histogram for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Histogram is created. Implications of -// creating a Histogram without using it and keeping the Histogram for later use -// are the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { - h, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return h -} - -// With works as GetMetricWith but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *HistogramVec) With(labels Labels) Observer { - h, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return h -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the HistogramVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &HistogramVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constHistogram struct { - desc *Desc - count uint64 - sum float64 - buckets map[float64]uint64 - labelPairs []*dto.LabelPair -} - -func (h *constHistogram) Desc() *Desc { - return h.desc -} - -func (h *constHistogram) Write(out *dto.Metric) error { - his := &dto.Histogram{} - buckets := make([]*dto.Bucket, 0, len(h.buckets)) - - his.SampleCount = proto.Uint64(h.count) - his.SampleSum = proto.Float64(h.sum) - - for upperBound, count := range h.buckets { - buckets = append(buckets, &dto.Bucket{ - CumulativeCount: proto.Uint64(count), - UpperBound: proto.Float64(upperBound), - }) - } - - if len(buckets) > 0 { - sort.Sort(buckSort(buckets)) - } - his.Bucket = buckets - - out.Histogram = his - out.Label = h.labelPairs - - return nil -} - -// NewConstHistogram returns a metric representing a Prometheus histogram with -// fixed values for the count, sum, and bucket counts. As those parameters -// cannot be changed, the returned value does not implement the Histogram -// interface (but only the Metric interface). Users of this package will not -// have much use for it in regular operations. However, when implementing custom -// Collectors, it is useful as a throw-away metric that is generated on the fly -// to send it to Prometheus in the Collect method. -// -// buckets is a map of upper bounds to cumulative counts, excluding the +Inf -// bucket. -// -// NewConstHistogram returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constHistogram{ - desc: desc, - count: count, - sum: sum, - buckets: buckets, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstHistogram is a version of NewConstHistogram that panics where -// NewConstMetric would have returned an error. -func MustNewConstHistogram( - desc *Desc, - count uint64, - sum float64, - buckets map[float64]uint64, - labelValues ...string, -) Metric { - m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type buckSort []*dto.Bucket - -func (s buckSort) Len() int { - return len(s) -} - -func (s buckSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s buckSort) Less(i, j int) bool { - return s[i].GetUpperBound() < s[j].GetUpperBound() -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go deleted file mode 100644 index 351c26e..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "sort" - - dto "github.com/prometheus/client_model/go" -) - -// metricSorter is a sortable slice of *dto.Metric. -type metricSorter []*dto.Metric - -func (s metricSorter) Len() int { - return len(s) -} - -func (s metricSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s metricSorter) Less(i, j int) bool { - if len(s[i].Label) != len(s[j].Label) { - // This should not happen. The metrics are - // inconsistent. However, we have to deal with the fact, as - // people might use custom collectors or metric family injection - // to create inconsistent metrics. So let's simply compare the - // number of labels in this case. That will still yield - // reproducible sorting. - return len(s[i].Label) < len(s[j].Label) - } - for n, lp := range s[i].Label { - vi := lp.GetValue() - vj := s[j].Label[n].GetValue() - if vi != vj { - return vi < vj - } - } - - // We should never arrive here. Multiple metrics with the same - // label set in the same scrape will lead to undefined ingestion - // behavior. However, as above, we have to provide stable sorting - // here, even for inconsistent metrics. So sort equal metrics - // by their timestamp, with missing timestamps (implying "now") - // coming last. - if s[i].TimestampMs == nil { - return false - } - if s[j].TimestampMs == nil { - return true - } - return s[i].GetTimestampMs() < s[j].GetTimestampMs() -} - -// NormalizeMetricFamilies returns a MetricFamily slice with empty -// MetricFamilies pruned and the remaining MetricFamilies sorted by name within -// the slice, with the contained Metrics sorted within each MetricFamily. -func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { - for _, mf := range metricFamiliesByName { - sort.Sort(metricSorter(mf.Metric)) - } - names := make([]string, 0, len(metricFamiliesByName)) - for name, mf := range metricFamiliesByName { - if len(mf.Metric) > 0 { - names = append(names, name) - } - } - sort.Strings(names) - result := make([]*dto.MetricFamily, 0, len(names)) - for _, name := range names { - result = append(result, metricFamiliesByName[name]) - } - return result -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go deleted file mode 100644 index 2744443..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "fmt" - "strings" - "unicode/utf8" - - "github.com/prometheus/common/model" -) - -// Labels represents a collection of label name -> value mappings. This type is -// commonly used with the With(Labels) and GetMetricWith(Labels) methods of -// metric vector Collectors, e.g.: -// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) -// -// The other use-case is the specification of constant label pairs in Opts or to -// create a Desc. -type Labels map[string]string - -// reservedLabelPrefix is a prefix which is not legal in user-supplied -// label names. -const reservedLabelPrefix = "__" - -var errInconsistentCardinality = errors.New("inconsistent label cardinality") - -func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { - return fmt.Errorf( - "%s: %q has %d variable labels named %q but %d values %q were provided", - errInconsistentCardinality, fqName, - len(labels), labels, - len(labelValues), labelValues, - ) -} - -func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { - if len(labels) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(labels), labels, - ) - } - - for name, val := range labels { - if !utf8.ValidString(val) { - return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) - } - } - - return nil -} - -func validateLabelValues(vals []string, expectedNumberOfValues int) error { - if len(vals) != expectedNumberOfValues { - return fmt.Errorf( - "%s: expected %d label values but got %d in %#v", - errInconsistentCardinality, expectedNumberOfValues, - len(vals), vals, - ) - } - - for _, val := range vals { - if !utf8.ValidString(val) { - return fmt.Errorf("label value %q is not valid UTF-8", val) - } - } - - return nil -} - -func checkLabelName(l string) bool { - return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go deleted file mode 100644 index 0df1eff..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "strings" - "time" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash. - -// A Metric models a single sample value with its meta data being exported to -// Prometheus. Implementations of Metric in this package are Gauge, Counter, -// Histogram, Summary, and Untyped. -type Metric interface { - // Desc returns the descriptor for the Metric. This method idempotently - // returns the same descriptor throughout the lifetime of the - // Metric. The returned descriptor is immutable by contract. A Metric - // unable to describe itself must return an invalid descriptor (created - // with NewInvalidDesc). - Desc() *Desc - // Write encodes the Metric into a "Metric" Protocol Buffer data - // transmission object. - // - // Metric implementations must observe concurrency safety as reads of - // this metric may occur at any time, and any blocking occurs at the - // expense of total performance of rendering all registered - // metrics. Ideally, Metric implementations should support concurrent - // readers. - // - // While populating dto.Metric, it is the responsibility of the - // implementation to ensure validity of the Metric protobuf (like valid - // UTF-8 strings or syntactically valid metric and label names). It is - // recommended to sort labels lexicographically. Callers of Write should - // still make sure of sorting if they depend on it. - Write(*dto.Metric) error - // TODO(beorn7): The original rationale of passing in a pre-allocated - // dto.Metric protobuf to save allocations has disappeared. The - // signature of this method should be changed to "Write() (*dto.Metric, - // error)". -} - -// Opts bundles the options for creating most Metric types. Each metric -// implementation XXX has its own XXXOpts type, but in most cases, it is just be -// an alias of this type (which might change when the requirement arises.) -// -// It is mandatory to set Name to a non-empty string. All other fields are -// optional and can safely be left at their zero value, although it is strongly -// encouraged to set a Help string. -type Opts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Metric (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the metric must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this metric. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels - ConstLabels Labels -} - -// BuildFQName joins the given three name components by "_". Empty name -// components are ignored. If the name parameter itself is empty, an empty -// string is returned, no matter what. Metric implementations included in this -// library use this function internally to generate the fully-qualified metric -// name from the name component in their Opts. Users of the library will only -// need this function if they implement their own Metric or instantiate a Desc -// (with NewDesc) directly. -func BuildFQName(namespace, subsystem, name string) string { - if name == "" { - return "" - } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") - } - return name -} - -// labelPairSorter implements sort.Interface. It is used to sort a slice of -// dto.LabelPair pointers. -type labelPairSorter []*dto.LabelPair - -func (s labelPairSorter) Len() int { - return len(s) -} - -func (s labelPairSorter) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s labelPairSorter) Less(i, j int) bool { - return s[i].GetName() < s[j].GetName() -} - -type invalidMetric struct { - desc *Desc - err error -} - -// NewInvalidMetric returns a metric whose Write method always returns the -// provided error. It is useful if a Collector finds itself unable to collect -// a metric and wishes to report an error to the registry. -func NewInvalidMetric(desc *Desc, err error) Metric { - return &invalidMetric{desc, err} -} - -func (m *invalidMetric) Desc() *Desc { return m.desc } - -func (m *invalidMetric) Write(*dto.Metric) error { return m.err } - -type timestampedMetric struct { - Metric - t time.Time -} - -func (m timestampedMetric) Write(pb *dto.Metric) error { - e := m.Metric.Write(pb) - pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) - return e -} - -// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a -// way that it has an explicit timestamp set to the provided Time. This is only -// useful in rare cases as the timestamp of a Prometheus metric should usually -// be set by the Prometheus server during scraping. Exceptions include mirroring -// metrics with given timestamps from other metric -// sources. -// -// NewMetricWithTimestamp works best with MustNewConstMetric, -// MustNewConstHistogram, and MustNewConstSummary, see example. -// -// Currently, the exposition formats used by Prometheus are limited to -// millisecond resolution. Thus, the provided time will be rounded down to the -// next full millisecond value. -func NewMetricWithTimestamp(t time.Time, m Metric) Metric { - return timestampedMetric{Metric: m, t: t} -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go deleted file mode 100644 index 4412801..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// Observer is the interface that wraps the Observe method, which is used by -// Histogram and Summary to add observations. -type Observer interface { - Observe(float64) -} - -// The ObserverFunc type is an adapter to allow the use of ordinary -// functions as Observers. If f is a function with the appropriate -// signature, ObserverFunc(f) is an Observer that calls f. -// -// This adapter is usually used in connection with the Timer type, and there are -// two general use cases: -// -// The most common one is to use a Gauge as the Observer for a Timer. -// See the "Gauge" Timer example. -// -// The more advanced use case is to create a function that dynamically decides -// which Observer to use for observing the duration. See the "Complex" Timer -// example. -type ObserverFunc func(float64) - -// Observe calls f(value). It implements Observer. -func (f ObserverFunc) Observe(value float64) { - f(value) -} - -// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. -type ObserverVec interface { - GetMetricWith(Labels) (Observer, error) - GetMetricWithLabelValues(lvs ...string) (Observer, error) - With(Labels) Observer - WithLabelValues(...string) Observer - CurryWith(Labels) (ObserverVec, error) - MustCurryWith(Labels) ObserverVec - - Collector -} - -// ExemplarObserver is implemented by Observers that offer the option of -// observing a value together with an exemplar. Its ObserveWithExemplar method -// works like the Observe method of an Observer but also replaces the currently -// saved exemplar (if any) with a new one, created from the provided value, the -// current time as timestamp, and the provided Labels. Empty Labels will lead to -// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is -// left in place. ObserveWithExemplar panics if any of the provided labels are -// invalid or if the provided labels contain more than 64 runes in total. -type ExemplarObserver interface { - ObserveWithExemplar(value float64, exemplar Labels) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go deleted file mode 100644 index 9b80979..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "errors" - "os" -) - -type processCollector struct { - collectFn func(chan<- Metric) - pidFn func() (int, error) - reportErrors bool - cpuTotal *Desc - openFDs, maxFDs *Desc - vsize, maxVsize *Desc - rss *Desc - startTime *Desc -} - -// ProcessCollectorOpts defines the behavior of a process metrics collector -// created with NewProcessCollector. -type ProcessCollectorOpts struct { - // PidFn returns the PID of the process the collector collects metrics - // for. It is called upon each collection. By default, the PID of the - // current process is used, as determined on construction time by - // calling os.Getpid(). - PidFn func() (int, error) - // If non-empty, each of the collected metrics is prefixed by the - // provided string and an underscore ("_"). - Namespace string - // If true, any error encountered during collection is reported as an - // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored - // and the collected metrics will be incomplete. (Possibly, no metrics - // will be collected at all.) While that's usually not desired, it is - // appropriate for the common "mix-in" of process metrics, where process - // metrics are nice to have, but failing to collect them should not - // disrupt the collection of the remaining metrics. - ReportErrors bool -} - -// NewProcessCollector returns a collector which exports the current state of -// process metrics including CPU, memory and file descriptor usage as well as -// the process start time. The detailed behavior is defined by the provided -// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a -// collector for the current process with an empty namespace string and no error -// reporting. -// -// The collector only works on operating systems with a Linux-style proc -// filesystem and on Microsoft Windows. On other operating systems, it will not -// collect any metrics. -func NewProcessCollector(opts ProcessCollectorOpts) Collector { - ns := "" - if len(opts.Namespace) > 0 { - ns = opts.Namespace + "_" - } - - c := &processCollector{ - reportErrors: opts.ReportErrors, - cpuTotal: NewDesc( - ns+"process_cpu_seconds_total", - "Total user and system CPU time spent in seconds.", - nil, nil, - ), - openFDs: NewDesc( - ns+"process_open_fds", - "Number of open file descriptors.", - nil, nil, - ), - maxFDs: NewDesc( - ns+"process_max_fds", - "Maximum number of open file descriptors.", - nil, nil, - ), - vsize: NewDesc( - ns+"process_virtual_memory_bytes", - "Virtual memory size in bytes.", - nil, nil, - ), - maxVsize: NewDesc( - ns+"process_virtual_memory_max_bytes", - "Maximum amount of virtual memory available in bytes.", - nil, nil, - ), - rss: NewDesc( - ns+"process_resident_memory_bytes", - "Resident memory size in bytes.", - nil, nil, - ), - startTime: NewDesc( - ns+"process_start_time_seconds", - "Start time of the process since unix epoch in seconds.", - nil, nil, - ), - } - - if opts.PidFn == nil { - pid := os.Getpid() - c.pidFn = func() (int, error) { return pid, nil } - } else { - c.pidFn = opts.PidFn - } - - // Set up process metric collection if supported by the runtime. - if canCollectProcess() { - c.collectFn = c.processCollect - } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } - } - - return c -} - -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime -} - -// Collect returns the current state of all metrics of the collector. -func (c *processCollector) Collect(ch chan<- Metric) { - c.collectFn(ch) -} - -func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { - if !c.reportErrors { - return - } - if desc == nil { - desc = NewInvalidDesc(err) - } - ch <- NewInvalidMetric(desc, err) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go deleted file mode 100644 index 3117461..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package prometheus - -import ( - "github.com/prometheus/procfs" -) - -func canCollectProcess() bool { - _, err := procfs.NewDefaultFS() - return err == nil -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - pid, err := c.pidFn() - if err != nil { - c.reportError(ch, nil, err) - return - } - - p, err := procfs.NewProc(pid) - if err != nil { - c.reportError(ch, nil, err) - return - } - - if stat, err := p.Stat(); err == nil { - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) - if startTime, err := stat.StartTime(); err == nil { - ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) - } else { - c.reportError(ch, c.startTime, err) - } - } else { - c.reportError(ch, nil, err) - } - - if fds, err := p.FileDescriptorsLen(); err == nil { - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) - } else { - c.reportError(ch, c.openFDs, err) - } - - if limits, err := p.Limits(); err == nil { - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) - ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) - } else { - c.reportError(ch, nil, err) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go deleted file mode 100644 index e0b935d..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -func canCollectProcess() bool { - return true -} - -var ( - modpsapi = syscall.NewLazyDLL("psapi.dll") - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") - procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") -) - -type processMemoryCounters struct { - // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex - _ uint32 - PageFaultCount uint32 - PeakWorkingSetSize uint64 - WorkingSetSize uint64 - QuotaPeakPagedPoolUsage uint64 - QuotaPagedPoolUsage uint64 - QuotaPeakNonPagedPoolUsage uint64 - QuotaNonPagedPoolUsage uint64 - PagefileUsage uint64 - PeakPagefileUsage uint64 - PrivateUsage uint64 -} - -func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { - mem := processMemoryCounters{} - r1, _, err := procGetProcessMemoryInfo.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&mem)), - uintptr(unsafe.Sizeof(mem)), - ) - if r1 != 1 { - return mem, err - } else { - return mem, nil - } -} - -func getProcessHandleCount(handle windows.Handle) (uint32, error) { - var count uint32 - r1, _, err := procGetProcessHandleCount.Call( - uintptr(handle), - uintptr(unsafe.Pointer(&count)), - ) - if r1 != 1 { - return 0, err - } else { - return count, nil - } -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } - - var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) - ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) - - mem, err := getProcessMemoryInfo(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) - ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) - - handles, err := getProcessHandleCount(h) - if err != nil { - c.reportError(ch, nil, err) - return - } - ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) - ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. -} - -func fileTimeToSeconds(ft windows.Filetime) float64 { - return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go b/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go deleted file mode 100644 index 3c10c85..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promauto provides alternative constructors for the fundamental -// Prometheus metric types and their …Vec and …Func variants. The difference to -// their counterparts in the prometheus package is that the promauto -// constructors return Collectors that are already registered with a -// registry. There are two sets of constructors. The constructors in the first -// set are top-level functions, while the constructors in the other set are -// methods of the Factory type. The top-level function return Collectors -// registered with the global registry (prometheus.DefaultRegisterer), while the -// methods return Collectors registered with the registry the Factory was -// constructed with. All constructors panic if the registration fails. -// -// The following example is a complete program to create a histogram of normally -// distributed random numbers from the math/rand package: -// -// package main -// -// import ( -// "math/rand" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// -// func Random() { -// for { -// histogram.Observe(rand.NormFloat64()) -// } -// } -// -// func main() { -// go Random() -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } -// -// Prometheus's version of a minimal hello-world program: -// -// package main -// -// import ( -// "fmt" -// "net/http" -// -// "github.com/prometheus/client_golang/prometheus" -// "github.com/prometheus/client_golang/prometheus/promauto" -// "github.com/prometheus/client_golang/prometheus/promhttp" -// ) -// -// func main() { -// http.Handle("/", promhttp.InstrumentHandlerCounter( -// promauto.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "hello_requests_total", -// Help: "Total number of hello-world requests by HTTP code.", -// }, -// []string{"code"}, -// ), -// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// fmt.Fprint(w, "Hello, world!") -// }), -// )) -// http.Handle("/metrics", promhttp.Handler()) -// http.ListenAndServe(":1971", nil) -// } -// -// A Factory is created with the With(prometheus.Registerer) function, which -// enables two usage pattern. With(prometheus.Registerer) can be called once per -// line: -// -// var ( -// reg = prometheus.NewRegistry() -// randomNumbers = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = promauto.With(reg).NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code end method.", -// }, -// []string{"code", "method"}, -// ) -// ) -// -// Or it can be used to create a Factory once to be used multiple times: -// -// var ( -// reg = prometheus.NewRegistry() -// factory = promauto.With(reg) -// randomNumbers = factory.NewHistogram(prometheus.HistogramOpts{ -// Name: "random_numbers", -// Help: "A histogram of normally distributed random numbers.", -// Buckets: prometheus.LinearBuckets(-3, .1, 61), -// }) -// requestCount = factory.NewCounterVec( -// prometheus.CounterOpts{ -// Name: "http_requests_total", -// Help: "Total number of HTTP requests by status code end method.", -// }, -// []string{"code", "method"}, -// ) -// ) -// -// This appears very handy. So why are these constructors locked away in a -// separate package? -// -// The main problem is that registration may fail, e.g. if a metric inconsistent -// with the newly to be registered one is already registered. Therefore, the -// Register method in the prometheus.Registerer interface returns an error, and -// the same is the case for the top-level prometheus.Register function that -// registers with the global registry. The prometheus package also provides -// MustRegister versions for both. They panic if the registration fails, and -// they clearly call this out by using the Must… idiom. Panicking is a bit -// problematic here because it doesn't just happen on input provided by the -// caller that is invalid on its own. Things are a bit more subtle here: Metric -// creation and registration tend to be spread widely over the codebase. It can -// easily happen that an incompatible metric is added to an unrelated part of -// the code, and suddenly code that used to work perfectly fine starts to panic -// (provided that the registration of the newly added metric happens before the -// registration of the previously existing metric). This may come as an even -// bigger surprise with the global registry, where simply importing another -// package can trigger a panic (if the newly imported package registers metrics -// in its init function). At least, in the prometheus package, creation of -// metrics and other collectors is separate from registration. You first create -// the metric, and then you decide explicitly if you want to register it with a -// local or the global registry, and if you want to handle the error or risk a -// panic. With the constructors in the promauto package, registration is -// automatic, and if it fails, it will always panic. Furthermore, the -// constructors will often be called in the var section of a file, which means -// that panicking will happen as a side effect of merely importing a package. -// -// A separate package allows conservative users to entirely ignore it. And -// whoever wants to use it, will do so explicitly, with an opportunity to read -// this warning. -// -// Enjoy promauto responsibly! -package promauto - -import "github.com/prometheus/client_golang/prometheus" - -// NewCounter works like the function of the same name in the prometheus package -// but it automatically registers the Counter with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics. -func NewCounter(opts prometheus.CounterOpts) prometheus.Counter { - return With(prometheus.DefaultRegisterer).NewCounter(opts) -} - -// NewCounterVec works like the function of the same name in the prometheus -// package but it automatically registers the CounterVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec -// panics. -func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { - return With(prometheus.DefaultRegisterer).NewCounterVec(opts, labelNames) -} - -// NewCounterFunc works like the function of the same name in the prometheus -// package but it automatically registers the CounterFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc -// panics. -func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { - return With(prometheus.DefaultRegisterer).NewCounterFunc(opts, function) -} - -// NewGauge works like the function of the same name in the prometheus package -// but it automatically registers the Gauge with the -// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics. -func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { - return With(prometheus.DefaultRegisterer).NewGauge(opts) -} - -// NewGaugeVec works like the function of the same name in the prometheus -// package but it automatically registers the GaugeVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics. -func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { - return With(prometheus.DefaultRegisterer).NewGaugeVec(opts, labelNames) -} - -// NewGaugeFunc works like the function of the same name in the prometheus -// package but it automatically registers the GaugeFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics. -func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { - return With(prometheus.DefaultRegisterer).NewGaugeFunc(opts, function) -} - -// NewSummary works like the function of the same name in the prometheus package -// but it automatically registers the Summary with the -// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics. -func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { - return With(prometheus.DefaultRegisterer).NewSummary(opts) -} - -// NewSummaryVec works like the function of the same name in the prometheus -// package but it automatically registers the SummaryVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec -// panics. -func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { - return With(prometheus.DefaultRegisterer).NewSummaryVec(opts, labelNames) -} - -// NewHistogram works like the function of the same name in the prometheus -// package but it automatically registers the Histogram with the -// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics. -func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { - return With(prometheus.DefaultRegisterer).NewHistogram(opts) -} - -// NewHistogramVec works like the function of the same name in the prometheus -// package but it automatically registers the HistogramVec with the -// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec -// panics. -func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { - return With(prometheus.DefaultRegisterer).NewHistogramVec(opts, labelNames) -} - -// NewUntypedFunc works like the function of the same name in the prometheus -// package but it automatically registers the UntypedFunc with the -// prometheus.DefaultRegisterer. If the registration fails, NewUntypedFunc -// panics. -func NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { - return With(prometheus.DefaultRegisterer).NewUntypedFunc(opts, function) -} - -// Factory provides factory methods to create Collectors that are automatically -// registered with a Registerer. Create a Factory with the With function, -// providing a Registerer to auto-register created Collectors with. The zero -// value of a Factory creates Collectors that are not registered with any -// Registerer. All methods of the Factory panic if the registration fails. -type Factory struct { - r prometheus.Registerer -} - -// With creates a Factory using the provided Registerer for registration of the -// created Collectors. -func With(r prometheus.Registerer) Factory { return Factory{r} } - -// NewCounter works like the function of the same name in the prometheus package -// but it automatically registers the Counter with the Factory's Registerer. -func (f Factory) NewCounter(opts prometheus.CounterOpts) prometheus.Counter { - c := prometheus.NewCounter(opts) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewCounterVec works like the function of the same name in the prometheus -// package but it automatically registers the CounterVec with the Factory's -// Registerer. -func (f Factory) NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec { - c := prometheus.NewCounterVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewCounterFunc works like the function of the same name in the prometheus -// package but it automatically registers the CounterFunc with the Factory's -// Registerer. -func (f Factory) NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc { - c := prometheus.NewCounterFunc(opts, function) - if f.r != nil { - f.r.MustRegister(c) - } - return c -} - -// NewGauge works like the function of the same name in the prometheus package -// but it automatically registers the Gauge with the Factory's Registerer. -func (f Factory) NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge { - g := prometheus.NewGauge(opts) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewGaugeVec works like the function of the same name in the prometheus -// package but it automatically registers the GaugeVec with the Factory's -// Registerer. -func (f Factory) NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec { - g := prometheus.NewGaugeVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewGaugeFunc works like the function of the same name in the prometheus -// package but it automatically registers the GaugeFunc with the Factory's -// Registerer. -func (f Factory) NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc { - g := prometheus.NewGaugeFunc(opts, function) - if f.r != nil { - f.r.MustRegister(g) - } - return g -} - -// NewSummary works like the function of the same name in the prometheus package -// but it automatically registers the Summary with the Factory's Registerer. -func (f Factory) NewSummary(opts prometheus.SummaryOpts) prometheus.Summary { - s := prometheus.NewSummary(opts) - if f.r != nil { - f.r.MustRegister(s) - } - return s -} - -// NewSummaryVec works like the function of the same name in the prometheus -// package but it automatically registers the SummaryVec with the Factory's -// Registerer. -func (f Factory) NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec { - s := prometheus.NewSummaryVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(s) - } - return s -} - -// NewHistogram works like the function of the same name in the prometheus -// package but it automatically registers the Histogram with the Factory's -// Registerer. -func (f Factory) NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram { - h := prometheus.NewHistogram(opts) - if f.r != nil { - f.r.MustRegister(h) - } - return h -} - -// NewHistogramVec works like the function of the same name in the prometheus -// package but it automatically registers the HistogramVec with the Factory's -// Registerer. -func (f Factory) NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec { - h := prometheus.NewHistogramVec(opts, labelNames) - if f.r != nil { - f.r.MustRegister(h) - } - return h -} - -// NewUntypedFunc works like the function of the same name in the prometheus -// package but it automatically registers the UntypedFunc with the Factory's -// Registerer. -func (f Factory) NewUntypedFunc(opts prometheus.UntypedOpts, function func() float64) prometheus.UntypedFunc { - u := prometheus.NewUntypedFunc(opts, function) - if f.r != nil { - f.r.MustRegister(u) - } - return u -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go deleted file mode 100644 index 5070e72..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go +++ /dev/null @@ -1,370 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "bufio" - "io" - "net" - "net/http" -) - -const ( - closeNotifier = 1 << iota - flusher - hijacker - readerFrom - pusher -) - -type delegator interface { - http.ResponseWriter - - Status() int - Written() int64 -} - -type responseWriterDelegator struct { - http.ResponseWriter - - status int - written int64 - wroteHeader bool - observeWriteHeader func(int) -} - -func (r *responseWriterDelegator) Status() int { - return r.status -} - -func (r *responseWriterDelegator) Written() int64 { - return r.written -} - -func (r *responseWriterDelegator) WriteHeader(code int) { - if r.observeWriteHeader != nil && !r.wroteHeader { - // Only call observeWriteHeader for the 1st time. It's a bug if - // WriteHeader is called more than once, but we want to protect - // against it here. Note that we still delegate the WriteHeader - // to the original ResponseWriter to not mask the bug from it. - r.observeWriteHeader(code) - } - r.status = code - r.wroteHeader = true - r.ResponseWriter.WriteHeader(code) -} - -func (r *responseWriterDelegator) Write(b []byte) (int, error) { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !r.wroteHeader { - r.WriteHeader(http.StatusOK) - } - n, err := r.ResponseWriter.Write(b) - r.written += int64(n) - return n, err -} - -type closeNotifierDelegator struct{ *responseWriterDelegator } -type flusherDelegator struct{ *responseWriterDelegator } -type hijackerDelegator struct{ *responseWriterDelegator } -type readerFromDelegator struct{ *responseWriterDelegator } -type pusherDelegator struct{ *responseWriterDelegator } - -func (d closeNotifierDelegator) CloseNotify() <-chan bool { - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. - return d.ResponseWriter.(http.CloseNotifier).CloseNotify() -} -func (d flusherDelegator) Flush() { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - d.ResponseWriter.(http.Flusher).Flush() -} -func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return d.ResponseWriter.(http.Hijacker).Hijack() -} -func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { - // If applicable, call WriteHeader here so that observeWriteHeader is - // handled appropriately. - if !d.wroteHeader { - d.WriteHeader(http.StatusOK) - } - n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) - d.written += n - return n, err -} -func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { - return d.ResponseWriter.(http.Pusher).Push(target, opts) -} - -var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) - -func init() { - // TODO(beorn7): Code generation would help here. - pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 - return d - } - pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 - return closeNotifierDelegator{d} - } - pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 - return flusherDelegator{d} - } - pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 - return struct { - *responseWriterDelegator - http.Flusher - http.CloseNotifier - }{d, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 - return hijackerDelegator{d} - } - pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 - return struct { - *responseWriterDelegator - http.Hijacker - http.CloseNotifier - }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - }{d, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 - return struct { - *responseWriterDelegator - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 - return readerFromDelegator{d} - } - pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.CloseNotifier - }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - }{d, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - }{d, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 - return struct { - *responseWriterDelegator - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 - return pusherDelegator{d} - } - pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 - return struct { - *responseWriterDelegator - http.Pusher - http.CloseNotifier - }{d, pusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - }{d, pusherDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 - return struct { - *responseWriterDelegator - http.Pusher - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - }{d, pusherDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 - return struct { - *responseWriterDelegator - http.Pusher - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - }{d, pusherDelegator{d}, readerFromDelegator{d}} - } - pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} - } - pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 - return struct { - *responseWriterDelegator - http.Pusher - io.ReaderFrom - http.Hijacker - http.Flusher - http.CloseNotifier - }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} - } -} - -func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { - d := &responseWriterDelegator{ - ResponseWriter: w, - observeWriteHeader: observeWriteHeaderFunc, - } - - id := 0 - //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to - //remove support from client_golang yet. - if _, ok := w.(http.CloseNotifier); ok { - id += closeNotifier - } - if _, ok := w.(http.Flusher); ok { - id += flusher - } - if _, ok := w.(http.Hijacker); ok { - id += hijacker - } - if _, ok := w.(io.ReaderFrom); ok { - id += readerFrom - } - if _, ok := w.(http.Pusher); ok { - id += pusher - } - - return pickDelegator[id](d) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go deleted file mode 100644 index 5e1c454..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ /dev/null @@ -1,379 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package promhttp provides tooling around HTTP servers and clients. -// -// First, the package allows the creation of http.Handler instances to expose -// Prometheus metrics via HTTP. promhttp.Handler acts on the -// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a -// custom registry or anything that implements the Gatherer interface. It also -// allows the creation of handlers that act differently on errors or allow to -// log errors. -// -// Second, the package provides tooling to instrument instances of http.Handler -// via middleware. Middleware wrappers follow the naming scheme -// InstrumentHandlerX, where X describes the intended use of the middleware. -// See each function's doc comment for specific details. -// -// Finally, the package allows for an http.RoundTripper to be instrumented via -// middleware. Middleware wrappers follow the naming scheme -// InstrumentRoundTripperX, where X describes the intended use of the -// middleware. See each function's doc comment for specific details. -package promhttp - -import ( - "compress/gzip" - "fmt" - "io" - "net/http" - "strings" - "sync" - "time" - - "github.com/prometheus/common/expfmt" - - "github.com/prometheus/client_golang/prometheus" -) - -const ( - contentTypeHeader = "Content-Type" - contentEncodingHeader = "Content-Encoding" - acceptEncodingHeader = "Accept-Encoding" -) - -var gzipPool = sync.Pool{ - New: func() interface{} { - return gzip.NewWriter(nil) - }, -} - -// Handler returns an http.Handler for the prometheus.DefaultGatherer, using -// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has -// no error logging, and it applies compression if requested by the client. -// -// The returned http.Handler is already instrumented using the -// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you -// create multiple http.Handlers by separate calls of the Handler function, the -// metrics used for instrumentation will be shared between them, providing -// global scrape counts. -// -// This function is meant to cover the bulk of basic use cases. If you are doing -// anything that requires more customization (including using a non-default -// Gatherer, different instrumentation, and non-default HandlerOpts), use the -// HandlerFor function. See there for details. -func Handler() http.Handler { - return InstrumentMetricHandler( - prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), - ) -} - -// HandlerFor returns an uninstrumented http.Handler for the provided -// Gatherer. The behavior of the Handler is defined by the provided -// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom -// Gatherers, with non-default HandlerOpts, and/or with custom (or no) -// instrumentation. Use the InstrumentMetricHandler function to apply the same -// kind of instrumentation as it is used by the Handler function. -func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { - var ( - inFlightSem chan struct{} - errCnt = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_errors_total", - Help: "Total number of internal errors encountered by the promhttp metric handler.", - }, - []string{"cause"}, - ) - ) - - if opts.MaxRequestsInFlight > 0 { - inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) - } - if opts.Registry != nil { - // Initialize all possibilites that can occur below. - errCnt.WithLabelValues("gathering") - errCnt.WithLabelValues("encoding") - if err := opts.Registry.Register(errCnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - errCnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - } - - h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { - if inFlightSem != nil { - select { - case inFlightSem <- struct{}{}: // All good, carry on. - defer func() { <-inFlightSem }() - default: - http.Error(rsp, fmt.Sprintf( - "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, - ), http.StatusServiceUnavailable) - return - } - } - mfs, err := reg.Gather() - if err != nil { - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error gathering metrics:", err) - } - errCnt.WithLabelValues("gathering").Inc() - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case ContinueOnError: - if len(mfs) == 0 { - // Still report the error if no metrics have been gathered. - httpError(rsp, err) - return - } - case HTTPErrorOnError: - httpError(rsp, err) - return - } - } - - var contentType expfmt.Format - if opts.EnableOpenMetrics { - contentType = expfmt.NegotiateIncludingOpenMetrics(req.Header) - } else { - contentType = expfmt.Negotiate(req.Header) - } - header := rsp.Header() - header.Set(contentTypeHeader, string(contentType)) - - w := io.Writer(rsp) - if !opts.DisableCompression && gzipAccepted(req.Header) { - header.Set(contentEncodingHeader, "gzip") - gz := gzipPool.Get().(*gzip.Writer) - defer gzipPool.Put(gz) - - gz.Reset(w) - defer gz.Close() - - w = gz - } - - enc := expfmt.NewEncoder(w, contentType) - - // handleError handles the error according to opts.ErrorHandling - // and returns true if we have to abort after the handling. - handleError := func(err error) bool { - if err == nil { - return false - } - if opts.ErrorLog != nil { - opts.ErrorLog.Println("error encoding and sending metric family:", err) - } - errCnt.WithLabelValues("encoding").Inc() - switch opts.ErrorHandling { - case PanicOnError: - panic(err) - case HTTPErrorOnError: - // We cannot really send an HTTP error at this - // point because we most likely have written - // something to rsp already. But at least we can - // stop sending. - return true - } - // Do nothing in all other cases, including ContinueOnError. - return false - } - - for _, mf := range mfs { - if handleError(enc.Encode(mf)) { - return - } - } - if closer, ok := enc.(expfmt.Closer); ok { - // This in particular takes care of the final "# EOF\n" line for OpenMetrics. - if handleError(closer.Close()) { - return - } - } - }) - - if opts.Timeout <= 0 { - return h - } - return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( - "Exceeded configured timeout of %v.\n", - opts.Timeout, - )) -} - -// InstrumentMetricHandler is usually used with an http.Handler returned by the -// HandlerFor function. It instruments the provided http.Handler with two -// metrics: A counter vector "promhttp_metric_handler_requests_total" to count -// scrapes partitioned by HTTP status code, and a gauge -// "promhttp_metric_handler_requests_in_flight" to track the number of -// simultaneous scrapes. This function idempotently registers collectors for -// both metrics with the provided Registerer. It panics if the registration -// fails. The provided metrics are useful to see how many scrapes hit the -// monitored target (which could be from different Prometheus servers or other -// scrapers), and how often they overlap (which would result in more than one -// scrape in flight at the same time). Note that the scrapes-in-flight gauge -// will contain the scrape by which it is exposed, while the scrape counter will -// only get incremented after the scrape is complete (as only then the status -// code is known). For tracking scrape durations, use the -// "scrape_duration_seconds" gauge created by the Prometheus server upon each -// scrape. -func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { - cnt := prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "promhttp_metric_handler_requests_total", - Help: "Total number of scrapes by HTTP status code.", - }, - []string{"code"}, - ) - // Initialize the most likely HTTP status codes. - cnt.WithLabelValues("200") - cnt.WithLabelValues("500") - cnt.WithLabelValues("503") - if err := reg.Register(cnt); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - cnt = are.ExistingCollector.(*prometheus.CounterVec) - } else { - panic(err) - } - } - - gge := prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "promhttp_metric_handler_requests_in_flight", - Help: "Current number of scrapes being served.", - }) - if err := reg.Register(gge); err != nil { - if are, ok := err.(prometheus.AlreadyRegisteredError); ok { - gge = are.ExistingCollector.(prometheus.Gauge) - } else { - panic(err) - } - } - - return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) -} - -// HandlerErrorHandling defines how a Handler serving metrics will handle -// errors. -type HandlerErrorHandling int - -// These constants cause handlers serving metrics to behave as described if -// errors are encountered. -const ( - // Serve an HTTP status code 500 upon the first error - // encountered. Report the error message in the body. Note that HTTP - // errors cannot be served anymore once the beginning of a regular - // payload has been sent. Thus, in the (unlikely) case that encoding the - // payload into the negotiated wire format fails, serving the response - // will simply be aborted. Set an ErrorLog in HandlerOpts to detect - // those errors. - HTTPErrorOnError HandlerErrorHandling = iota - // Ignore errors and try to serve as many metrics as possible. However, - // if no metrics can be served, serve an HTTP status code 500 and the - // last error message in the body. Only use this in deliberate "best - // effort" metrics collection scenarios. In this case, it is highly - // recommended to provide other means of detecting errors: By setting an - // ErrorLog in HandlerOpts, the errors are logged. By providing a - // Registry in HandlerOpts, the exposed metrics include an error counter - // "promhttp_metric_handler_errors_total", which can be used for - // alerts. - ContinueOnError - // Panic upon the first error encountered (useful for "crash only" apps). - PanicOnError -) - -// Logger is the minimal interface HandlerOpts needs for logging. Note that -// log.Logger from the standard library implements this interface, and it is -// easy to implement by custom loggers, if they don't do so already anyway. -type Logger interface { - Println(v ...interface{}) -} - -// HandlerOpts specifies options how to serve metrics via an http.Handler. The -// zero value of HandlerOpts is a reasonable default. -type HandlerOpts struct { - // ErrorLog specifies an optional logger for errors collecting and - // serving metrics. If nil, errors are not logged at all. - ErrorLog Logger - // ErrorHandling defines how errors are handled. Note that errors are - // logged regardless of the configured ErrorHandling provided ErrorLog - // is not nil. - ErrorHandling HandlerErrorHandling - // If Registry is not nil, it is used to register a metric - // "promhttp_metric_handler_errors_total", partitioned by "cause". A - // failed registration causes a panic. Note that this error counter is - // different from the instrumentation you get from the various - // InstrumentHandler... helpers. It counts errors that don't necessarily - // result in a non-2xx HTTP status code. There are two typical cases: - // (1) Encoding errors that only happen after streaming of the HTTP body - // has already started (and the status code 200 has been sent). This - // should only happen with custom collectors. (2) Collection errors with - // no effect on the HTTP status code because ErrorHandling is set to - // ContinueOnError. - Registry prometheus.Registerer - // If DisableCompression is true, the handler will never compress the - // response, even if requested by the client. - DisableCompression bool - // The number of concurrent HTTP requests is limited to - // MaxRequestsInFlight. Additional requests are responded to with 503 - // Service Unavailable and a suitable message in the body. If - // MaxRequestsInFlight is 0 or negative, no limit is applied. - MaxRequestsInFlight int - // If handling a request takes longer than Timeout, it is responded to - // with 503 ServiceUnavailable and a suitable Message. No timeout is - // applied if Timeout is 0 or negative. Note that with the current - // implementation, reaching the timeout simply ends the HTTP requests as - // described above (and even that only if sending of the body hasn't - // started yet), while the bulk work of gathering all the metrics keeps - // running in the background (with the eventual result to be thrown - // away). Until the implementation is improved, it is recommended to - // implement a separate timeout in potentially slow Collectors. - Timeout time.Duration - // If true, the experimental OpenMetrics encoding is added to the - // possible options during content negotiation. Note that Prometheus - // 2.5.0+ will negotiate OpenMetrics as first priority. OpenMetrics is - // the only way to transmit exemplars. However, the move to OpenMetrics - // is not completely transparent. Most notably, the values of "quantile" - // labels of Summaries and "le" labels of Histograms are formatted with - // a trailing ".0" if they would otherwise look like integer numbers - // (which changes the identity of the resulting series on the Prometheus - // server). - EnableOpenMetrics bool -} - -// gzipAccepted returns whether the client will accept gzip-encoded content. -func gzipAccepted(header http.Header) bool { - a := header.Get(acceptEncodingHeader) - parts := strings.Split(a, ",") - for _, part := range parts { - part = strings.TrimSpace(part) - if part == "gzip" || strings.HasPrefix(part, "gzip;") { - return true - } - } - return false -} - -// httpError removes any content-encoding header and then calls http.Error with -// the provided error and http.StatusInternalServerError. Error contents is -// supposed to be uncompressed plain text. Same as with a plain http.Error, this -// must not be called if the header or any payload has already been sent. -func httpError(rsp http.ResponseWriter, err error) { - rsp.Header().Del(contentEncodingHeader) - http.Error( - rsp, - "An error has occurred while serving metrics:\n\n"+err.Error(), - http.StatusInternalServerError, - ) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go deleted file mode 100644 index 83c49b6..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "crypto/tls" - "net/http" - "net/http/httptrace" - "time" - - "github.com/prometheus/client_golang/prometheus" -) - -// The RoundTripperFunc type is an adapter to allow the use of ordinary -// functions as RoundTrippers. If f is a function with the appropriate -// signature, RountTripperFunc(f) is a RoundTripper that calls f. -type RoundTripperFunc func(req *http.Request) (*http.Response, error) - -// RoundTrip implements the RoundTripper interface. -func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { - return rt(r) -} - -// InstrumentRoundTripperInFlight is a middleware that wraps the provided -// http.RoundTripper. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.RoundTripper. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - gauge.Inc() - defer gauge.Dec() - return next.RoundTrip(r) - }) -} - -// InstrumentRoundTripperCounter is a middleware that wraps the provided -// http.RoundTripper to observe the request result with the provided CounterVec. -// The CounterVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. Partitioning of the CounterVec happens by HTTP status code -// and/or HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped RoundTripper panics or returns a non-nil error, the Counter -// is not incremented. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(counter) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - resp, err := next.RoundTrip(r) - if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() - } - return resp, err - }) -} - -// InstrumentRoundTripperDuration is a middleware that wraps the provided -// http.RoundTripper to observe the request duration with the provided -// ObserverVec. The ObserverVec must have zero, one, or two non-const -// non-curried labels. For those, the only allowed label names are "code" and -// "method". The function panics otherwise. The Observe method of the Observer -// in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped RoundTripper panics or returns a non-nil error, no values are -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { - code, method := checkLabels(obs) - - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - resp, err := next.RoundTrip(r) - if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) - } - return resp, err - }) -} - -// InstrumentTrace is used to offer flexibility in instrumenting the available -// httptrace.ClientTrace hook functions. Each function is passed a float64 -// representing the time in seconds since the start of the http request. A user -// may choose to use separately buckets Histograms, or implement custom -// instance labels on a per function basis. -type InstrumentTrace struct { - GotConn func(float64) - PutIdleConn func(float64) - GotFirstResponseByte func(float64) - Got100Continue func(float64) - DNSStart func(float64) - DNSDone func(float64) - ConnectStart func(float64) - ConnectDone func(float64) - TLSHandshakeStart func(float64) - TLSHandshakeDone func(float64) - WroteHeaders func(float64) - Wait100Continue func(float64) - WroteRequest func(float64) -} - -// InstrumentRoundTripperTrace is a middleware that wraps the provided -// RoundTripper and reports times to hook functions provided in the -// InstrumentTrace struct. Hook functions that are not present in the provided -// InstrumentTrace struct are ignored. Times reported to the hook functions are -// time since the start of the request. Only with Go1.9+, those times are -// guaranteed to never be negative. (Earlier Go versions are not using a -// monotonic clock.) Note that partitioning of Histograms is expensive and -// should be used judiciously. -// -// For hook functions that receive an error as an argument, no observations are -// made in the event of a non-nil error value. -// -// See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { - return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { - start := time.Now() - - trace := &httptrace.ClientTrace{ - GotConn: func(_ httptrace.GotConnInfo) { - if it.GotConn != nil { - it.GotConn(time.Since(start).Seconds()) - } - }, - PutIdleConn: func(err error) { - if err != nil { - return - } - if it.PutIdleConn != nil { - it.PutIdleConn(time.Since(start).Seconds()) - } - }, - DNSStart: func(_ httptrace.DNSStartInfo) { - if it.DNSStart != nil { - it.DNSStart(time.Since(start).Seconds()) - } - }, - DNSDone: func(_ httptrace.DNSDoneInfo) { - if it.DNSDone != nil { - it.DNSDone(time.Since(start).Seconds()) - } - }, - ConnectStart: func(_, _ string) { - if it.ConnectStart != nil { - it.ConnectStart(time.Since(start).Seconds()) - } - }, - ConnectDone: func(_, _ string, err error) { - if err != nil { - return - } - if it.ConnectDone != nil { - it.ConnectDone(time.Since(start).Seconds()) - } - }, - GotFirstResponseByte: func() { - if it.GotFirstResponseByte != nil { - it.GotFirstResponseByte(time.Since(start).Seconds()) - } - }, - Got100Continue: func() { - if it.Got100Continue != nil { - it.Got100Continue(time.Since(start).Seconds()) - } - }, - TLSHandshakeStart: func() { - if it.TLSHandshakeStart != nil { - it.TLSHandshakeStart(time.Since(start).Seconds()) - } - }, - TLSHandshakeDone: func(_ tls.ConnectionState, err error) { - if err != nil { - return - } - if it.TLSHandshakeDone != nil { - it.TLSHandshakeDone(time.Since(start).Seconds()) - } - }, - WroteHeaders: func() { - if it.WroteHeaders != nil { - it.WroteHeaders(time.Since(start).Seconds()) - } - }, - Wait100Continue: func() { - if it.Wait100Continue != nil { - it.Wait100Continue(time.Since(start).Seconds()) - } - }, - WroteRequest: func(_ httptrace.WroteRequestInfo) { - if it.WroteRequest != nil { - it.WroteRequest(time.Since(start).Seconds()) - } - }, - } - r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) - - return next.RoundTrip(r) - }) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go deleted file mode 100644 index 9db2438..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ /dev/null @@ -1,447 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -import ( - "errors" - "net/http" - "strconv" - "strings" - "time" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus" -) - -// magicString is used for the hacky label test in checkLabels. Remove once fixed. -const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" - -// InstrumentHandlerInFlight is a middleware that wraps the provided -// http.Handler. It sets the provided prometheus.Gauge to the number of -// requests currently handled by the wrapped http.Handler. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - g.Inc() - defer g.Dec() - next.ServeHTTP(w, r) - }) -} - -// InstrumentHandlerDuration is a middleware that wraps the provided -// http.Handler to observe the request duration with the provided ObserverVec. -// The ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request duration in seconds. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - - obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) - }) -} - -// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler -// to observe the request result with the provided CounterVec. The CounterVec -// must have zero, one, or two non-const non-curried labels. For those, the only -// allowed label names are "code" and "method". The function panics -// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or -// HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, the Counter is not incremented. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(counter) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status())).Inc() - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0)).Inc() - }) -} - -// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided -// http.Handler to observe with the provided ObserverVec the request duration -// until the response headers are written. The ObserverVec must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. The Observe method of -// the Observer in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. -// -// If the wrapped Handler panics before calling WriteHeader, no value is -// reported. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - now := time.Now() - d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) - }) - next.ServeHTTP(d, r) - }) -} - -// InstrumentHandlerRequestSize is a middleware that wraps the provided -// http.Handler to observe the request size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { - code, method := checkLabels(obs) - - if code { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) - }) - } - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) - }) -} - -// InstrumentHandlerResponseSize is a middleware that wraps the provided -// http.Handler to observe the response size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the response size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. -// -// If the wrapped Handler does not set a status code, a status code of 200 is assumed. -// -// If the wrapped Handler panics, no values are reported. -// -// See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { - code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - d := newDelegator(w, nil) - next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) - }) -} - -func checkLabels(c prometheus.Collector) (code bool, method bool) { - // TODO(beorn7): Remove this hacky way to check for instance labels - // once Descriptors can have their dimensionality queried. - var ( - desc *prometheus.Desc - m prometheus.Metric - pm dto.Metric - lvs []string - ) - - // Get the Desc from the Collector. - descc := make(chan *prometheus.Desc, 1) - c.Describe(descc) - - select { - case desc = <-descc: - default: - panic("no description provided by collector") - } - select { - case <-descc: - panic("more than one description provided by collector") - default: - } - - close(descc) - - // Create a ConstMetric with the Desc. Since we don't know how many - // variable labels there are, try for as long as it needs. - for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { - m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) - } - - // Write out the metric into a proto message and look at the labels. - // If the value is not the magicString, it is a constLabel, which doesn't interest us. - // If the label is curried, it doesn't interest us. - // In all other cases, only "code" or "method" is allowed. - if err := m.Write(&pm); err != nil { - panic("error checking metric for labels") - } - for _, label := range pm.Label { - name, value := label.GetName(), label.GetValue() - if value != magicString || isLabelCurried(c, name) { - continue - } - switch name { - case "code": - code = true - case "method": - method = true - default: - panic("metric partitioned with non-supported labels") - } - } - return -} - -func isLabelCurried(c prometheus.Collector, label string) bool { - // This is even hackier than the label test above. - // We essentially try to curry again and see if it works. - // But for that, we need to type-convert to the two - // types we use here, ObserverVec or *CounterVec. - switch v := c.(type) { - case *prometheus.CounterVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - case prometheus.ObserverVec: - if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { - return false - } - default: - panic("unsupported metric vec type") - } - return true -} - -// emptyLabels is a one-time allocation for non-partitioned metrics to avoid -// unnecessary allocations on each request. -var emptyLabels = prometheus.Labels{} - -func labels(code, method bool, reqMethod string, status int) prometheus.Labels { - if !(code || method) { - return emptyLabels - } - labels := prometheus.Labels{} - - if code { - labels["code"] = sanitizeCode(status) - } - if method { - labels["method"] = sanitizeMethod(reqMethod) - } - - return labels -} - -func computeApproximateRequestSize(r *http.Request) int { - s := 0 - if r.URL != nil { - s += len(r.URL.String()) - } - - s += len(r.Method) - s += len(r.Proto) - for name, values := range r.Header { - s += len(name) - for _, value := range values { - s += len(value) - } - } - s += len(r.Host) - - // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. - - if r.ContentLength != -1 { - s += int(r.ContentLength) - } - return s -} - -func sanitizeMethod(m string) string { - switch m { - case "GET", "get": - return "get" - case "PUT", "put": - return "put" - case "HEAD", "head": - return "head" - case "POST", "post": - return "post" - case "DELETE", "delete": - return "delete" - case "CONNECT", "connect": - return "connect" - case "OPTIONS", "options": - return "options" - case "NOTIFY", "notify": - return "notify" - default: - return strings.ToLower(m) - } -} - -// If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, santizeCode will return 200, for consistency with behavior in -// the stdlib. -func sanitizeCode(s int) string { - switch s { - case 100: - return "100" - case 101: - return "101" - - case 200, 0: - return "200" - case 201: - return "201" - case 202: - return "202" - case 203: - return "203" - case 204: - return "204" - case 205: - return "205" - case 206: - return "206" - - case 300: - return "300" - case 301: - return "301" - case 302: - return "302" - case 304: - return "304" - case 305: - return "305" - case 307: - return "307" - - case 400: - return "400" - case 401: - return "401" - case 402: - return "402" - case 403: - return "403" - case 404: - return "404" - case 405: - return "405" - case 406: - return "406" - case 407: - return "407" - case 408: - return "408" - case 409: - return "409" - case 410: - return "410" - case 411: - return "411" - case 412: - return "412" - case 413: - return "413" - case 414: - return "414" - case 415: - return "415" - case 416: - return "416" - case 417: - return "417" - case 418: - return "418" - - case 500: - return "500" - case 501: - return "501" - case 502: - return "502" - case 503: - return "503" - case 504: - return "504" - case 505: - return "505" - - case 428: - return "428" - case 429: - return "429" - case 431: - return "431" - case 511: - return "511" - - default: - return strconv.Itoa(s) - } -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go deleted file mode 100644 index c05d6ee..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ /dev/null @@ -1,947 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - "strings" - "sync" - "unicode/utf8" - - "github.com/cespare/xxhash/v2" - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/expfmt" - - dto "github.com/prometheus/client_model/go" - - "github.com/prometheus/client_golang/prometheus/internal" -) - -const ( - // Capacity for the channel to collect metrics and descriptors. - capMetricChan = 1000 - capDescChan = 10 -) - -// DefaultRegisterer and DefaultGatherer are the implementations of the -// Registerer and Gatherer interface a number of convenience functions in this -// package act on. Initially, both variables point to the same Registry, which -// has a process collector (currently on Linux only, see NewProcessCollector) -// and a Go collector (see NewGoCollector, in particular the note about -// stop-the-world implication with Go versions older than 1.9) already -// registered. This approach to keep default instances as global state mirrors -// the approach of other packages in the Go standard library. Note that there -// are caveats. Change the variables with caution and only if you understand the -// consequences. Users who want to avoid global state altogether should not use -// the convenience functions and act on custom instances instead. -var ( - defaultRegistry = NewRegistry() - DefaultRegisterer Registerer = defaultRegistry - DefaultGatherer Gatherer = defaultRegistry -) - -func init() { - MustRegister(NewProcessCollector(ProcessCollectorOpts{})) - MustRegister(NewGoCollector()) -} - -// NewRegistry creates a new vanilla Registry without any Collectors -// pre-registered. -func NewRegistry() *Registry { - return &Registry{ - collectorsByID: map[uint64]Collector{}, - descIDs: map[uint64]struct{}{}, - dimHashesByName: map[string]uint64{}, - } -} - -// NewPedanticRegistry returns a registry that checks during collection if each -// collected Metric is consistent with its reported Desc, and if the Desc has -// actually been registered with the registry. Unchecked Collectors (those whose -// Describe method does not yield any descriptors) are excluded from the check. -// -// Usually, a Registry will be happy as long as the union of all collected -// Metrics is consistent and valid even if some metrics are not consistent with -// their own Desc or a Desc provided by their registered Collector. Well-behaved -// Collectors and Metrics will only provide consistent Descs. This Registry is -// useful to test the implementation of Collectors and Metrics. -func NewPedanticRegistry() *Registry { - r := NewRegistry() - r.pedanticChecksEnabled = true - return r -} - -// Registerer is the interface for the part of a registry in charge of -// registering and unregistering. Users of custom registries should use -// Registerer as type for registration purposes (rather than the Registry type -// directly). In that way, they are free to use custom Registerer implementation -// (e.g. for testing purposes). -type Registerer interface { - // Register registers a new Collector to be included in metrics - // collection. It returns an error if the descriptors provided by the - // Collector are invalid or if they — in combination with descriptors of - // already registered Collectors — do not fulfill the consistency and - // uniqueness criteria described in the documentation of metric.Desc. - // - // If the provided Collector is equal to a Collector already registered - // (which includes the case of re-registering the same Collector), the - // returned error is an instance of AlreadyRegisteredError, which - // contains the previously registered Collector. - // - // A Collector whose Describe method does not yield any Desc is treated - // as unchecked. Registration will always succeed. No check for - // re-registering (see previous paragraph) is performed. Thus, the - // caller is responsible for not double-registering the same unchecked - // Collector, and for providing a Collector that will not cause - // inconsistent metrics on collection. (This would lead to scrape - // errors.) - Register(Collector) error - // MustRegister works like Register but registers any number of - // Collectors and panics upon the first registration that causes an - // error. - MustRegister(...Collector) - // Unregister unregisters the Collector that equals the Collector passed - // in as an argument. (Two Collectors are considered equal if their - // Describe method yields the same set of descriptors.) The function - // returns whether a Collector was unregistered. Note that an unchecked - // Collector cannot be unregistered (as its Describe method does not - // yield any descriptor). - // - // Note that even after unregistering, it will not be possible to - // register a new Collector that is inconsistent with the unregistered - // Collector, e.g. a Collector collecting metrics with the same name but - // a different help string. The rationale here is that the same registry - // instance must only collect consistent metrics throughout its - // lifetime. - Unregister(Collector) bool -} - -// Gatherer is the interface for the part of a registry in charge of gathering -// the collected metrics into a number of MetricFamilies. The Gatherer interface -// comes with the same general implication as described for the Registerer -// interface. -type Gatherer interface { - // Gather calls the Collect method of the registered Collectors and then - // gathers the collected metrics into a lexicographically sorted slice - // of uniquely named MetricFamily protobufs. Gather ensures that the - // returned slice is valid and self-consistent so that it can be used - // for valid exposition. As an exception to the strict consistency - // requirements described for metric.Desc, Gather will tolerate - // different sets of label names for metrics of the same metric family. - // - // Even if an error occurs, Gather attempts to gather as many metrics as - // possible. Hence, if a non-nil error is returned, the returned - // MetricFamily slice could be nil (in case of a fatal error that - // prevented any meaningful metric collection) or contain a number of - // MetricFamily protobufs, some of which might be incomplete, and some - // might be missing altogether. The returned error (which might be a - // MultiError) explains the details. Note that this is mostly useful for - // debugging purposes. If the gathered protobufs are to be used for - // exposition in actual monitoring, it is almost always better to not - // expose an incomplete result and instead disregard the returned - // MetricFamily protobufs in case the returned error is non-nil. - Gather() ([]*dto.MetricFamily, error) -} - -// Register registers the provided Collector with the DefaultRegisterer. -// -// Register is a shortcut for DefaultRegisterer.Register(c). See there for more -// details. -func Register(c Collector) error { - return DefaultRegisterer.Register(c) -} - -// MustRegister registers the provided Collectors with the DefaultRegisterer and -// panics if any error occurs. -// -// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See -// there for more details. -func MustRegister(cs ...Collector) { - DefaultRegisterer.MustRegister(cs...) -} - -// Unregister removes the registration of the provided Collector from the -// DefaultRegisterer. -// -// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for -// more details. -func Unregister(c Collector) bool { - return DefaultRegisterer.Unregister(c) -} - -// GathererFunc turns a function into a Gatherer. -type GathererFunc func() ([]*dto.MetricFamily, error) - -// Gather implements Gatherer. -func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { - return gf() -} - -// AlreadyRegisteredError is returned by the Register method if the Collector to -// be registered has already been registered before, or a different Collector -// that collects the same metrics has been registered before. Registration fails -// in that case, but you can detect from the kind of error what has -// happened. The error contains fields for the existing Collector and the -// (rejected) new Collector that equals the existing one. This can be used to -// find out if an equal Collector has been registered before and switch over to -// using the old one, as demonstrated in the example. -type AlreadyRegisteredError struct { - ExistingCollector, NewCollector Collector -} - -func (err AlreadyRegisteredError) Error() string { - return "duplicate metrics collector registration attempted" -} - -// MultiError is a slice of errors implementing the error interface. It is used -// by a Gatherer to report multiple errors during MetricFamily gathering. -type MultiError []error - -func (errs MultiError) Error() string { - if len(errs) == 0 { - return "" - } - buf := &bytes.Buffer{} - fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) - for _, err := range errs { - fmt.Fprintf(buf, "\n* %s", err) - } - return buf.String() -} - -// Append appends the provided error if it is not nil. -func (errs *MultiError) Append(err error) { - if err != nil { - *errs = append(*errs, err) - } -} - -// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only -// contained error as error if len(errs is 1). In all other cases, it returns -// the MultiError directly. This is helpful for returning a MultiError in a way -// that only uses the MultiError if needed. -func (errs MultiError) MaybeUnwrap() error { - switch len(errs) { - case 0: - return nil - case 1: - return errs[0] - default: - return errs - } -} - -// Registry registers Prometheus collectors, collects their metrics, and gathers -// them into MetricFamilies for exposition. It implements both Registerer and -// Gatherer. The zero value is not usable. Create instances with NewRegistry or -// NewPedanticRegistry. -type Registry struct { - mtx sync.RWMutex - collectorsByID map[uint64]Collector // ID is a hash of the descIDs. - descIDs map[uint64]struct{} - dimHashesByName map[string]uint64 - uncheckedCollectors []Collector - pedanticChecksEnabled bool -} - -// Register implements Registerer. -func (r *Registry) Register(c Collector) error { - var ( - descChan = make(chan *Desc, capDescChan) - newDescIDs = map[uint64]struct{}{} - newDimHashesByName = map[string]uint64{} - collectorID uint64 // All desc IDs XOR'd together. - duplicateDescErr error - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - r.mtx.Lock() - defer func() { - // Drain channel in case of premature return to not leak a goroutine. - for range descChan { - } - r.mtx.Unlock() - }() - // Conduct various tests... - for desc := range descChan { - - // Is the descriptor valid at all? - if desc.err != nil { - return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) - } - - // Is the descID unique? - // (In other words: Is the fqName + constLabel combination unique?) - if _, exists := r.descIDs[desc.id]; exists { - duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) - } - // If it is not a duplicate desc in this collector, XOR it to - // the collectorID. (We allow duplicate descs within the same - // collector, but their existence must be a no-op.) - if _, exists := newDescIDs[desc.id]; !exists { - newDescIDs[desc.id] = struct{}{} - collectorID ^= desc.id - } - - // Are all the label names and the help string consistent with - // previous descriptors of the same name? - // First check existing descriptors... - if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) - } - } else { - // ...then check the new descriptors already seen. - if dimHash, exists := newDimHashesByName[desc.fqName]; exists { - if dimHash != desc.dimHash { - return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) - } - } else { - newDimHashesByName[desc.fqName] = desc.dimHash - } - } - } - // A Collector yielding no Desc at all is considered unchecked. - if len(newDescIDs) == 0 { - r.uncheckedCollectors = append(r.uncheckedCollectors, c) - return nil - } - if existing, exists := r.collectorsByID[collectorID]; exists { - switch e := existing.(type) { - case *wrappingCollector: - return AlreadyRegisteredError{ - ExistingCollector: e.unwrapRecursively(), - NewCollector: c, - } - default: - return AlreadyRegisteredError{ - ExistingCollector: e, - NewCollector: c, - } - } - } - // If the collectorID is new, but at least one of the descs existed - // before, we are in trouble. - if duplicateDescErr != nil { - return duplicateDescErr - } - - // Only after all tests have passed, actually register. - r.collectorsByID[collectorID] = c - for hash := range newDescIDs { - r.descIDs[hash] = struct{}{} - } - for name, dimHash := range newDimHashesByName { - r.dimHashesByName[name] = dimHash - } - return nil -} - -// Unregister implements Registerer. -func (r *Registry) Unregister(c Collector) bool { - var ( - descChan = make(chan *Desc, capDescChan) - descIDs = map[uint64]struct{}{} - collectorID uint64 // All desc IDs XOR'd together. - ) - go func() { - c.Describe(descChan) - close(descChan) - }() - for desc := range descChan { - if _, exists := descIDs[desc.id]; !exists { - collectorID ^= desc.id - descIDs[desc.id] = struct{}{} - } - } - - r.mtx.RLock() - if _, exists := r.collectorsByID[collectorID]; !exists { - r.mtx.RUnlock() - return false - } - r.mtx.RUnlock() - - r.mtx.Lock() - defer r.mtx.Unlock() - - delete(r.collectorsByID, collectorID) - for id := range descIDs { - delete(r.descIDs, id) - } - // dimHashesByName is left untouched as those must be consistent - // throughout the lifetime of a program. - return true -} - -// MustRegister implements Registerer. -func (r *Registry) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -// Gather implements Gatherer. -func (r *Registry) Gather() ([]*dto.MetricFamily, error) { - var ( - checkedMetricChan = make(chan Metric, capMetricChan) - uncheckedMetricChan = make(chan Metric, capMetricChan) - metricHashes = map[uint64]struct{}{} - wg sync.WaitGroup - errs MultiError // The collected errors to return in the end. - registeredDescIDs map[uint64]struct{} // Only used for pedantic checks - ) - - r.mtx.RLock() - goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) - metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) - checkedCollectors := make(chan Collector, len(r.collectorsByID)) - uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) - for _, collector := range r.collectorsByID { - checkedCollectors <- collector - } - for _, collector := range r.uncheckedCollectors { - uncheckedCollectors <- collector - } - // In case pedantic checks are enabled, we have to copy the map before - // giving up the RLock. - if r.pedanticChecksEnabled { - registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) - for id := range r.descIDs { - registeredDescIDs[id] = struct{}{} - } - } - r.mtx.RUnlock() - - wg.Add(goroutineBudget) - - collectWorker := func() { - for { - select { - case collector := <-checkedCollectors: - collector.Collect(checkedMetricChan) - case collector := <-uncheckedCollectors: - collector.Collect(uncheckedMetricChan) - default: - return - } - wg.Done() - } - } - - // Start the first worker now to make sure at least one is running. - go collectWorker() - goroutineBudget-- - - // Close checkedMetricChan and uncheckedMetricChan once all collectors - // are collected. - go func() { - wg.Wait() - close(checkedMetricChan) - close(uncheckedMetricChan) - }() - - // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. - defer func() { - if checkedMetricChan != nil { - for range checkedMetricChan { - } - } - if uncheckedMetricChan != nil { - for range uncheckedMetricChan { - } - } - }() - - // Copy the channel references so we can nil them out later to remove - // them from the select statements below. - cmc := checkedMetricChan - umc := uncheckedMetricChan - - for { - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - default: - if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { - // All collectors are already being worked on or - // we have already as many goroutines started as - // there are collectors. Do the same as above, - // just without the default. - select { - case metric, ok := <-cmc: - if !ok { - cmc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - registeredDescIDs, - )) - case metric, ok := <-umc: - if !ok { - umc = nil - break - } - errs.Append(processMetric( - metric, metricFamiliesByName, - metricHashes, - nil, - )) - } - break - } - // Start more workers. - go collectWorker() - goroutineBudget-- - runtime.Gosched() - } - // Once both checkedMetricChan and uncheckdMetricChan are closed - // and drained, the contraption above will nil out cmc and umc, - // and then we can leave the collect loop here. - if cmc == nil && umc == nil { - break - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the -// Prometheus text format, and writes it to a temporary file. Upon success, the -// temporary file is renamed to the provided filename. -// -// This is intended for use with the textfile collector of the node exporter. -// Note that the node exporter expects the filename to be suffixed with ".prom". -func WriteToTextfile(filename string, g Gatherer) error { - tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) - if err != nil { - return err - } - defer os.Remove(tmp.Name()) - - mfs, err := g.Gather() - if err != nil { - return err - } - for _, mf := range mfs { - if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { - return err - } - } - if err := tmp.Close(); err != nil { - return err - } - - if err := os.Chmod(tmp.Name(), 0644); err != nil { - return err - } - return os.Rename(tmp.Name(), filename) -} - -// processMetric is an internal helper method only used by the Gather method. -func processMetric( - metric Metric, - metricFamiliesByName map[string]*dto.MetricFamily, - metricHashes map[uint64]struct{}, - registeredDescIDs map[uint64]struct{}, -) error { - desc := metric.Desc() - // Wrapped metrics collected by an unchecked Collector can have an - // invalid Desc. - if desc.err != nil { - return desc.err - } - dtoMetric := &dto.Metric{} - if err := metric.Write(dtoMetric); err != nil { - return fmt.Errorf("error collecting metric %v: %s", desc, err) - } - metricFamily, ok := metricFamiliesByName[desc.fqName] - if ok { // Existing name. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), - ) - } - // TODO(beorn7): Simplify switch once Desc has type. - switch metricFamily.GetType() { - case dto.MetricType_COUNTER: - if dtoMetric.Counter == nil { - return fmt.Errorf( - "collected metric %s %s should be a Counter", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_GAUGE: - if dtoMetric.Gauge == nil { - return fmt.Errorf( - "collected metric %s %s should be a Gauge", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_SUMMARY: - if dtoMetric.Summary == nil { - return fmt.Errorf( - "collected metric %s %s should be a Summary", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_UNTYPED: - if dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %s %s should be Untyped", - desc.fqName, dtoMetric, - ) - } - case dto.MetricType_HISTOGRAM: - if dtoMetric.Histogram == nil { - return fmt.Errorf( - "collected metric %s %s should be a Histogram", - desc.fqName, dtoMetric, - ) - } - default: - panic("encountered MetricFamily with invalid type") - } - } else { // New name. - metricFamily = &dto.MetricFamily{} - metricFamily.Name = proto.String(desc.fqName) - metricFamily.Help = proto.String(desc.help) - // TODO(beorn7): Simplify switch once Desc has type. - switch { - case dtoMetric.Gauge != nil: - metricFamily.Type = dto.MetricType_GAUGE.Enum() - case dtoMetric.Counter != nil: - metricFamily.Type = dto.MetricType_COUNTER.Enum() - case dtoMetric.Summary != nil: - metricFamily.Type = dto.MetricType_SUMMARY.Enum() - case dtoMetric.Untyped != nil: - metricFamily.Type = dto.MetricType_UNTYPED.Enum() - case dtoMetric.Histogram != nil: - metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() - default: - return fmt.Errorf("empty metric collected: %s", dtoMetric) - } - if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { - return err - } - metricFamiliesByName[desc.fqName] = metricFamily - } - if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { - return err - } - if registeredDescIDs != nil { - // Is the desc registered at all? - if _, exist := registeredDescIDs[desc.id]; !exist { - return fmt.Errorf( - "collected metric %s %s with unregistered descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { - return err - } - } - metricFamily.Metric = append(metricFamily.Metric, dtoMetric) - return nil -} - -// Gatherers is a slice of Gatherer instances that implements the Gatherer -// interface itself. Its Gather method calls Gather on all Gatherers in the -// slice in order and returns the merged results. Errors returned from the -// Gather calls are all returned in a flattened MultiError. Duplicate and -// inconsistent Metrics are skipped (first occurrence in slice order wins) and -// reported in the returned error. -// -// Gatherers can be used to merge the Gather results from multiple -// Registries. It also provides a way to directly inject existing MetricFamily -// protobufs into the gathering by creating a custom Gatherer with a Gather -// method that simply returns the existing MetricFamily protobufs. Note that no -// registration is involved (in contrast to Collector registration), so -// obviously registration-time checks cannot happen. Any inconsistencies between -// the gathered MetricFamilies are reported as errors by the Gather method, and -// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies -// (e.g. syntactically invalid metric or label names) will go undetected. -type Gatherers []Gatherer - -// Gather implements Gatherer. -func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { - var ( - metricFamiliesByName = map[string]*dto.MetricFamily{} - metricHashes = map[uint64]struct{}{} - errs MultiError // The collected errors to return in the end. - ) - - for i, g := range gs { - mfs, err := g.Gather() - if err != nil { - if multiErr, ok := err.(MultiError); ok { - for _, err := range multiErr { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } else { - errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) - } - } - for _, mf := range mfs { - existingMF, exists := metricFamiliesByName[mf.GetName()] - if exists { - if existingMF.GetHelp() != mf.GetHelp() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has help %q but should have %q", - mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), - )) - continue - } - if existingMF.GetType() != mf.GetType() { - errs = append(errs, fmt.Errorf( - "gathered metric family %s has type %s but should have %s", - mf.GetName(), mf.GetType(), existingMF.GetType(), - )) - continue - } - } else { - existingMF = &dto.MetricFamily{} - existingMF.Name = mf.Name - existingMF.Help = mf.Help - existingMF.Type = mf.Type - if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { - errs = append(errs, err) - continue - } - metricFamiliesByName[mf.GetName()] = existingMF - } - for _, m := range mf.Metric { - if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { - errs = append(errs, err) - continue - } - existingMF.Metric = append(existingMF.Metric, m) - } - } - } - return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() -} - -// checkSuffixCollisions checks for collisions with the “magic” suffixes the -// Prometheus text format and the internal metric representation of the -// Prometheus server add while flattening Summaries and Histograms. -func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { - var ( - newName = mf.GetName() - newType = mf.GetType() - newNameWithoutSuffix = "" - ) - switch { - case strings.HasSuffix(newName, "_count"): - newNameWithoutSuffix = newName[:len(newName)-6] - case strings.HasSuffix(newName, "_sum"): - newNameWithoutSuffix = newName[:len(newName)-4] - case strings.HasSuffix(newName, "_bucket"): - newNameWithoutSuffix = newName[:len(newName)-7] - } - if newNameWithoutSuffix != "" { - if existingMF, ok := mfs[newNameWithoutSuffix]; ok { - switch existingMF.GetType() { - case dto.MetricType_SUMMARY: - if !strings.HasSuffix(newName, "_bucket") { - return fmt.Errorf( - "collected metric named %q collides with previously collected summary named %q", - newName, newNameWithoutSuffix, - ) - } - case dto.MetricType_HISTOGRAM: - return fmt.Errorf( - "collected metric named %q collides with previously collected histogram named %q", - newName, newNameWithoutSuffix, - ) - } - } - } - if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_count"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_count", - ) - } - if _, ok := mfs[newName+"_sum"]; ok { - return fmt.Errorf( - "collected histogram or summary named %q collides with previously collected metric named %q", - newName, newName+"_sum", - ) - } - } - if newType == dto.MetricType_HISTOGRAM { - if _, ok := mfs[newName+"_bucket"]; ok { - return fmt.Errorf( - "collected histogram named %q collides with previously collected metric named %q", - newName, newName+"_bucket", - ) - } - } - return nil -} - -// checkMetricConsistency checks if the provided Metric is consistent with the -// provided MetricFamily. It also hashes the Metric labels and the MetricFamily -// name. If the resulting hash is already in the provided metricHashes, an error -// is returned. If not, it is added to metricHashes. -func checkMetricConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - metricHashes map[uint64]struct{}, -) error { - name := metricFamily.GetName() - - // Type consistency with metric family. - if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || - metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || - metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || - metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || - metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { - return fmt.Errorf( - "collected metric %q { %s} is not a %s", - name, dtoMetric, metricFamily.GetType(), - ) - } - - previousLabelName := "" - for _, labelPair := range dtoMetric.GetLabel() { - labelName := labelPair.GetName() - if labelName == previousLabelName { - return fmt.Errorf( - "collected metric %q { %s} has two or more labels with the same name: %s", - name, dtoMetric, labelName, - ) - } - if !checkLabelName(labelName) { - return fmt.Errorf( - "collected metric %q { %s} has a label with an invalid name: %s", - name, dtoMetric, labelName, - ) - } - if dtoMetric.Summary != nil && labelName == quantileLabel { - return fmt.Errorf( - "collected metric %q { %s} must not have an explicit %q label", - name, dtoMetric, quantileLabel, - ) - } - if !utf8.ValidString(labelPair.GetValue()) { - return fmt.Errorf( - "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", - name, dtoMetric, labelName, labelPair.GetValue()) - } - previousLabelName = labelName - } - - // Is the metric unique (i.e. no other metric with the same name and the same labels)? - h := xxhash.New() - h.WriteString(name) - h.Write(separatorByteSlice) - // Make sure label pairs are sorted. We depend on it for the consistency - // check. - if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { - // We cannot sort dtoMetric.Label in place as it is immutable by contract. - copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) - copy(copiedLabels, dtoMetric.Label) - sort.Sort(labelPairSorter(copiedLabels)) - dtoMetric.Label = copiedLabels - } - for _, lp := range dtoMetric.Label { - h.WriteString(lp.GetName()) - h.Write(separatorByteSlice) - h.WriteString(lp.GetValue()) - h.Write(separatorByteSlice) - } - hSum := h.Sum64() - if _, exists := metricHashes[hSum]; exists { - return fmt.Errorf( - "collected metric %q { %s} was collected before with the same name and label values", - name, dtoMetric, - ) - } - metricHashes[hSum] = struct{}{} - return nil -} - -func checkDescConsistency( - metricFamily *dto.MetricFamily, - dtoMetric *dto.Metric, - desc *Desc, -) error { - // Desc help consistency with metric family help. - if metricFamily.GetHelp() != desc.help { - return fmt.Errorf( - "collected metric %s %s has help %q but should have %q", - metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, - ) - } - - // Is the desc consistent with the content of the metric? - lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) - copy(lpsFromDesc, desc.constLabelPairs) - for _, l := range desc.variableLabels { - lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ - Name: proto.String(l), - }) - } - if len(lpsFromDesc) != len(dtoMetric.Label) { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - sort.Sort(labelPairSorter(lpsFromDesc)) - for i, lpFromDesc := range lpsFromDesc { - lpFromMetric := dtoMetric.Label[i] - if lpFromDesc.GetName() != lpFromMetric.GetName() || - lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { - return fmt.Errorf( - "labels in collected metric %s %s are inconsistent with descriptor %s", - metricFamily.GetName(), dtoMetric, desc, - ) - } - } - return nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go deleted file mode 100644 index ae42e76..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ /dev/null @@ -1,736 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "math" - "runtime" - "sort" - "sync" - "sync/atomic" - "time" - - "github.com/beorn7/perks/quantile" - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// quantileLabel is used for the label that defines the quantile in a -// summary. -const quantileLabel = "quantile" - -// A Summary captures individual observations from an event or sample stream and -// summarizes them in a manner similar to traditional summary statistics: 1. sum -// of observations, 2. observation count, 3. rank estimations. -// -// A typical use-case is the observation of request latencies. By default, a -// Summary provides the median, the 90th and the 99th percentile of the latency -// as rank estimations. However, the default behavior will change in the -// upcoming v1.0.0 of the library. There will be no rank estimations at all by -// default. For a sane transition, it is recommended to set the desired rank -// estimations explicitly. -// -// Note that the rank estimations cannot be aggregated in a meaningful way with -// the Prometheus query language (i.e. you cannot average or add them). If you -// need aggregatable quantiles (e.g. you want the 99th percentile latency of all -// queries served across all instances of a service), consider the Histogram -// metric type. See the Prometheus documentation for more details. -// -// To create Summary instances, use NewSummary. -type Summary interface { - Metric - Collector - - // Observe adds a single observation to the summary. - Observe(float64) -} - -var errQuantileLabelNotAllowed = fmt.Errorf( - "%q is not allowed as label name in summaries", quantileLabel, -) - -// Default values for SummaryOpts. -const ( - // DefMaxAge is the default duration for which observations stay - // relevant. - DefMaxAge time.Duration = 10 * time.Minute - // DefAgeBuckets is the default number of buckets used to calculate the - // age of observations. - DefAgeBuckets = 5 - // DefBufCap is the standard buffer size for collecting Summary observations. - DefBufCap = 500 -) - -// SummaryOpts bundles the options for creating a Summary metric. It is -// mandatory to set Name to a non-empty string. While all other fields are -// optional and can safely be left at their zero value, it is recommended to set -// a help string and to explicitly set the Objectives field to the desired value -// as the default value will change in the upcoming v1.0.0 of the library. -type SummaryOpts struct { - // Namespace, Subsystem, and Name are components of the fully-qualified - // name of the Summary (created by joining these components with - // "_"). Only Name is mandatory, the others merely help structuring the - // name. Note that the fully-qualified name of the Summary must be a - // valid Prometheus metric name. - Namespace string - Subsystem string - Name string - - // Help provides information about this Summary. - // - // Metrics with the same fully-qualified name must have the same Help - // string. - Help string - - // ConstLabels are used to attach fixed labels to this metric. Metrics - // with the same fully-qualified name must have the same label names in - // their ConstLabels. - // - // Due to the way a Summary is represented in the Prometheus text format - // and how it is handled by the Prometheus server internally, “quantile” - // is an illegal label name. Construction of a Summary or SummaryVec - // will panic if this label name is used in ConstLabels. - // - // ConstLabels are only used rarely. In particular, do not use them to - // attach the same labels to all your metrics. Those use cases are - // better covered by target labels set by the scraping Prometheus - // server, or by one specific metric (e.g. a build_info or a - // machine_role metric). See also - // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels - ConstLabels Labels - - // Objectives defines the quantile rank estimates with their respective - // absolute error. If Objectives[q] = e, then the value reported for q - // will be the φ-quantile value for some φ between q-e and q+e. The - // default value is an empty map, resulting in a summary without - // quantiles. - Objectives map[float64]float64 - - // MaxAge defines the duration for which an observation stays relevant - // for the summary. Must be positive. The default value is DefMaxAge. - MaxAge time.Duration - - // AgeBuckets is the number of buckets used to exclude observations that - // are older than MaxAge from the summary. A higher number has a - // resource penalty, so only increase it if the higher resolution is - // really required. For very high observation rates, you might want to - // reduce the number of age buckets. With only one age bucket, you will - // effectively see a complete reset of the summary each time MaxAge has - // passed. The default value is DefAgeBuckets. - AgeBuckets uint32 - - // BufCap defines the default sample stream buffer size. The default - // value of DefBufCap should suffice for most uses. If there is a need - // to increase the value, a multiple of 500 is recommended (because that - // is the internal buffer size of the underlying package - // "github.com/bmizerany/perks/quantile"). - BufCap uint32 -} - -// Problem with the sliding-window decay algorithm... The Merge method of -// perk/quantile is actually not working as advertised - and it might be -// unfixable, as the underlying algorithm is apparently not capable of merging -// summaries in the first place. To avoid using Merge, we are currently adding -// observations to _each_ age bucket, i.e. the effort to add a sample is -// essentially multiplied by the number of age buckets. When rotating age -// buckets, we empty the previous head stream. On scrape time, we simply take -// the quantiles from the head stream (no merging required). Result: More effort -// on observation time, less effort on scrape time, which is exactly the -// opposite of what we try to accomplish, but at least the results are correct. -// -// The quite elegant previous contraption to merge the age buckets efficiently -// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) -// can't be used anymore. - -// NewSummary creates a new Summary based on the provided SummaryOpts. -func NewSummary(opts SummaryOpts) Summary { - return newSummary( - NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), - opts, - ) -} - -func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { - if len(desc.variableLabels) != len(labelValues) { - panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) - } - - for _, n := range desc.variableLabels { - if n == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - for _, lp := range desc.constLabelPairs { - if lp.GetName() == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - - if opts.Objectives == nil { - opts.Objectives = map[float64]float64{} - } - - if opts.MaxAge < 0 { - panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) - } - if opts.MaxAge == 0 { - opts.MaxAge = DefMaxAge - } - - if opts.AgeBuckets == 0 { - opts.AgeBuckets = DefAgeBuckets - } - - if opts.BufCap == 0 { - opts.BufCap = DefBufCap - } - - if len(opts.Objectives) == 0 { - // Use the lock-free implementation of a Summary without objectives. - s := &noObjectivesSummary{ - desc: desc, - labelPairs: makeLabelPairs(desc, labelValues), - counts: [2]*summaryCounts{{}, {}}, - } - s.init(s) // Init self-collection. - return s - } - - s := &summary{ - desc: desc, - - objectives: opts.Objectives, - sortedObjectives: make([]float64, 0, len(opts.Objectives)), - - labelPairs: makeLabelPairs(desc, labelValues), - - hotBuf: make([]float64, 0, opts.BufCap), - coldBuf: make([]float64, 0, opts.BufCap), - streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), - } - s.headStreamExpTime = time.Now().Add(s.streamDuration) - s.hotBufExpTime = s.headStreamExpTime - - for i := uint32(0); i < opts.AgeBuckets; i++ { - s.streams = append(s.streams, s.newStream()) - } - s.headStream = s.streams[0] - - for qu := range s.objectives { - s.sortedObjectives = append(s.sortedObjectives, qu) - } - sort.Float64s(s.sortedObjectives) - - s.init(s) // Init self-collection. - return s -} - -type summary struct { - selfCollector - - bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. - mtx sync.Mutex // Protects every other moving part. - // Lock bufMtx before mtx if both are needed. - - desc *Desc - - objectives map[float64]float64 - sortedObjectives []float64 - - labelPairs []*dto.LabelPair - - sum float64 - cnt uint64 - - hotBuf, coldBuf []float64 - - streams []*quantile.Stream - streamDuration time.Duration - headStream *quantile.Stream - headStreamIdx int - headStreamExpTime, hotBufExpTime time.Time -} - -func (s *summary) Desc() *Desc { - return s.desc -} - -func (s *summary) Observe(v float64) { - s.bufMtx.Lock() - defer s.bufMtx.Unlock() - - now := time.Now() - if now.After(s.hotBufExpTime) { - s.asyncFlush(now) - } - s.hotBuf = append(s.hotBuf, v) - if len(s.hotBuf) == cap(s.hotBuf) { - s.asyncFlush(now) - } -} - -func (s *summary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.objectives)) - - s.bufMtx.Lock() - s.mtx.Lock() - // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) - s.bufMtx.Unlock() - - s.flushColdBuf() - sum.SampleCount = proto.Uint64(s.cnt) - sum.SampleSum = proto.Float64(s.sum) - - for _, rank := range s.sortedObjectives { - var q float64 - if s.headStream.Count() == 0 { - q = math.NaN() - } else { - q = s.headStream.Query(rank) - } - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - s.mtx.Unlock() - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - return nil -} - -func (s *summary) newStream() *quantile.Stream { - return quantile.NewTargeted(s.objectives) -} - -// asyncFlush needs bufMtx locked. -func (s *summary) asyncFlush(now time.Time) { - s.mtx.Lock() - s.swapBufs(now) - - // Unblock the original goroutine that was responsible for the mutation - // that triggered the compaction. But hold onto the global non-buffer - // state mutex until the operation finishes. - go func() { - s.flushColdBuf() - s.mtx.Unlock() - }() -} - -// rotateStreams needs mtx AND bufMtx locked. -func (s *summary) maybeRotateStreams() { - for !s.hotBufExpTime.Equal(s.headStreamExpTime) { - s.headStream.Reset() - s.headStreamIdx++ - if s.headStreamIdx >= len(s.streams) { - s.headStreamIdx = 0 - } - s.headStream = s.streams[s.headStreamIdx] - s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) - } -} - -// flushColdBuf needs mtx locked. -func (s *summary) flushColdBuf() { - for _, v := range s.coldBuf { - for _, stream := range s.streams { - stream.Insert(v) - } - s.cnt++ - s.sum += v - } - s.coldBuf = s.coldBuf[0:0] - s.maybeRotateStreams() -} - -// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. -func (s *summary) swapBufs(now time.Time) { - if len(s.coldBuf) != 0 { - panic("coldBuf is not empty") - } - s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf - // hotBuf is now empty and gets new expiration set. - for now.After(s.hotBufExpTime) { - s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) - } -} - -type summaryCounts struct { - // sumBits contains the bits of the float64 representing the sum of all - // observations. sumBits and count have to go first in the struct to - // guarantee alignment for atomic operations. - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - sumBits uint64 - count uint64 -} - -type noObjectivesSummary struct { - // countAndHotIdx enables lock-free writes with use of atomic updates. - // The most significant bit is the hot index [0 or 1] of the count field - // below. Observe calls update the hot one. All remaining bits count the - // number of Observe calls. Observe starts by incrementing this counter, - // and finish by incrementing the count field in the respective - // summaryCounts, as a marker for completion. - // - // Calls of the Write method (which are non-mutating reads from the - // perspective of the summary) swap the hot–cold under the writeMtx - // lock. A cooldown is awaited (while locked) by comparing the number of - // observations with the initiation count. Once they match, then the - // last observation on the now cool one has completed. All cool fields must - // be merged into the new hot before releasing writeMtx. - - // Fields with atomic access first! See alignment constraint: - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG - countAndHotIdx uint64 - - selfCollector - desc *Desc - writeMtx sync.Mutex // Only used in the Write method. - - // Two counts, one is "hot" for lock-free observations, the other is - // "cold" for writing out a dto.Metric. It has to be an array of - // pointers to guarantee 64bit alignment of the histogramCounts, see - // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. - counts [2]*summaryCounts - - labelPairs []*dto.LabelPair -} - -func (s *noObjectivesSummary) Desc() *Desc { - return s.desc -} - -func (s *noObjectivesSummary) Observe(v float64) { - // We increment h.countAndHotIdx so that the counter in the lower - // 63 bits gets incremented. At the same time, we get the new value - // back, which we can use to find the currently-hot counts. - n := atomic.AddUint64(&s.countAndHotIdx, 1) - hotCounts := s.counts[n>>63] - - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + v) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - break - } - } - // Increment count last as we take it as a signal that the observation - // is complete. - atomic.AddUint64(&hotCounts.count, 1) -} - -func (s *noObjectivesSummary) Write(out *dto.Metric) error { - // For simplicity, we protect this whole method by a mutex. It is not in - // the hot path, i.e. Observe is called much more often than Write. The - // complication of making Write lock-free isn't worth it, if possible at - // all. - s.writeMtx.Lock() - defer s.writeMtx.Unlock() - - // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) - // without touching the count bits. See the struct comments for a full - // description of the algorithm. - n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) - // count is contained unchanged in the lower 63 bits. - count := n & ((1 << 63) - 1) - // The most significant bit tells us which counts is hot. The complement - // is thus the cold one. - hotCounts := s.counts[n>>63] - coldCounts := s.counts[(^n)>>63] - - // Await cooldown. - for count != atomic.LoadUint64(&coldCounts.count) { - runtime.Gosched() // Let observations get work done. - } - - sum := &dto.Summary{ - SampleCount: proto.Uint64(count), - SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), - } - - out.Summary = sum - out.Label = s.labelPairs - - // Finally add all the cold counts to the new hot counts and reset the cold counts. - atomic.AddUint64(&hotCounts.count, count) - atomic.StoreUint64(&coldCounts.count, 0) - for { - oldBits := atomic.LoadUint64(&hotCounts.sumBits) - newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) - if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { - atomic.StoreUint64(&coldCounts.sumBits, 0) - break - } - } - return nil -} - -type quantSort []*dto.Quantile - -func (s quantSort) Len() int { - return len(s) -} - -func (s quantSort) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s quantSort) Less(i, j int) bool { - return s[i].GetQuantile() < s[j].GetQuantile() -} - -// SummaryVec is a Collector that bundles a set of Summaries that all share the -// same Desc, but have different values for their variable labels. This is used -// if you want to count the same thing partitioned by various dimensions -// (e.g. HTTP request latencies, partitioned by status code and method). Create -// instances with NewSummaryVec. -type SummaryVec struct { - *metricVec -} - -// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and -// partitioned by the given label names. -// -// Due to the way a Summary is represented in the Prometheus text format and how -// it is handled by the Prometheus server internally, “quantile” is an illegal -// label name. NewSummaryVec will panic if this label name is used. -func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { - for _, ln := range labelNames { - if ln == quantileLabel { - panic(errQuantileLabelNotAllowed) - } - } - desc := NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - labelNames, - opts.ConstLabels, - ) - return &SummaryVec{ - metricVec: newMetricVec(desc, func(lvs ...string) Metric { - return newSummary(desc, opts, lvs...) - }), - } -} - -// GetMetricWithLabelValues returns the Summary for the given slice of label -// values (same order as the VariableLabels in Desc). If that combination of -// label values is accessed for the first time, a new Summary is created. -// -// It is possible to call this method without using the returned Summary to only -// create the new Summary but leave it at its starting value, a Summary without -// any observations. -// -// Keeping the Summary for later use is possible (and should be considered if -// performance is critical), but keep in mind that Reset, DeleteLabelValues and -// Delete can be used to delete the Summary from the SummaryVec. In that case, -// the Summary will still exist, but it will not be exported anymore, even if a -// Summary with the same label values is created later. See also the CounterVec -// example. -// -// An error is returned if the number of label values is not the same as the -// number of VariableLabels in Desc (minus any curried labels). -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as -// an alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the GaugeVec example. -func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { - metric, err := v.metricVec.getMetricWithLabelValues(lvs...) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// GetMetricWith returns the Summary for the given Labels map (the label names -// must match those of the VariableLabels in Desc). If that label map is -// accessed for the first time, a new Summary is created. Implications of -// creating a Summary without using it and keeping the Summary for later use are -// the same as for GetMetricWithLabelValues. -// -// An error is returned if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc (minus any curried labels). -// -// This method is used for the same purpose as -// GetMetricWithLabelValues(...string). See there for pros and cons of the two -// methods. -func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { - metric, err := v.metricVec.getMetricWith(labels) - if metric != nil { - return metric.(Observer), err - } - return nil, err -} - -// WithLabelValues works as GetMetricWithLabelValues, but panics where -// GetMetricWithLabelValues would have returned an error. Not returning an -// error allows shortcuts like -// myVec.WithLabelValues("404", "GET").Observe(42.21) -func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { - s, err := v.GetMetricWithLabelValues(lvs...) - if err != nil { - panic(err) - } - return s -} - -// With works as GetMetricWith, but panics where GetMetricWithLabels would have -// returned an error. Not returning an error allows shortcuts like -// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) -func (v *SummaryVec) With(labels Labels) Observer { - s, err := v.GetMetricWith(labels) - if err != nil { - panic(err) - } - return s -} - -// CurryWith returns a vector curried with the provided labels, i.e. the -// returned vector has those labels pre-set for all labeled operations performed -// on it. The cardinality of the curried vector is reduced accordingly. The -// order of the remaining labels stays the same (just with the curried labels -// taken out of the sequence – which is relevant for the -// (GetMetric)WithLabelValues methods). It is possible to curry a curried -// vector, but only with labels not yet used for currying before. -// -// The metrics contained in the SummaryVec are shared between the curried and -// uncurried vectors. They are just accessed differently. Curried and uncurried -// vectors behave identically in terms of collection. Only one must be -// registered with a given registry (usually the uncurried version). The Reset -// method deletes all metrics, even if called on a curried vector. -func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { - vec, err := v.curryWith(labels) - if vec != nil { - return &SummaryVec{vec}, err - } - return nil, err -} - -// MustCurryWith works as CurryWith but panics where CurryWith would have -// returned an error. -func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { - vec, err := v.CurryWith(labels) - if err != nil { - panic(err) - } - return vec -} - -type constSummary struct { - desc *Desc - count uint64 - sum float64 - quantiles map[float64]float64 - labelPairs []*dto.LabelPair -} - -func (s *constSummary) Desc() *Desc { - return s.desc -} - -func (s *constSummary) Write(out *dto.Metric) error { - sum := &dto.Summary{} - qs := make([]*dto.Quantile, 0, len(s.quantiles)) - - sum.SampleCount = proto.Uint64(s.count) - sum.SampleSum = proto.Float64(s.sum) - - for rank, q := range s.quantiles { - qs = append(qs, &dto.Quantile{ - Quantile: proto.Float64(rank), - Value: proto.Float64(q), - }) - } - - if len(qs) > 0 { - sort.Sort(quantSort(qs)) - } - sum.Quantile = qs - - out.Summary = sum - out.Label = s.labelPairs - - return nil -} - -// NewConstSummary returns a metric representing a Prometheus summary with fixed -// values for the count, sum, and quantiles. As those parameters cannot be -// changed, the returned value does not implement the Summary interface (but -// only the Metric interface). Users of this package will not have much use for -// it in regular operations. However, when implementing custom Collectors, it is -// useful as a throw-away metric that is generated on the fly to send it to -// Prometheus in the Collect method. -// -// quantiles maps ranks to quantile values. For example, a median latency of -// 0.23s and a 99th percentile latency of 0.56s would be expressed as: -// map[float64]float64{0.5: 0.23, 0.99: 0.56} -// -// NewConstSummary returns an error if the length of labelValues is not -// consistent with the variable labels in Desc or if Desc is invalid. -func NewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constSummary{ - desc: desc, - count: count, - sum: sum, - quantiles: quantiles, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstSummary is a version of NewConstSummary that panics where -// NewConstMetric would have returned an error. -func MustNewConstSummary( - desc *Desc, - count uint64, - sum float64, - quantiles map[float64]float64, - labelValues ...string, -) Metric { - m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) - if err != nil { - panic(err) - } - return m -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go deleted file mode 100644 index 8d5f105..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import "time" - -// Timer is a helper type to time functions. Use NewTimer to create new -// instances. -type Timer struct { - begin time.Time - observer Observer -} - -// NewTimer creates a new Timer. The provided Observer is used to observe a -// duration in seconds. Timer is usually used to time a function call in the -// following way: -// func TimeMe() { -// timer := NewTimer(myHistogram) -// defer timer.ObserveDuration() -// // Do actual work. -// } -func NewTimer(o Observer) *Timer { - return &Timer{ - begin: time.Now(), - observer: o, - } -} - -// ObserveDuration records the duration passed since the Timer was created with -// NewTimer. It calls the Observe method of the Observer provided during -// construction with the duration in seconds as an argument. The observed -// duration is also returned. ObserveDuration is usually called with a defer -// statement. -// -// Note that this method is only guaranteed to never observe negative durations -// if used with Go1.9+. -func (t *Timer) ObserveDuration() time.Duration { - d := time.Since(t.begin) - if t.observer != nil { - t.observer.Observe(d.Seconds()) - } - return d -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go deleted file mode 100644 index 0f9ce63..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -// UntypedOpts is an alias for Opts. See there for doc comments. -type UntypedOpts Opts - -// UntypedFunc works like GaugeFunc but the collected metric is of type -// "Untyped". UntypedFunc is useful to mirror an external metric of unknown -// type. -// -// To create UntypedFunc instances, use NewUntypedFunc. -type UntypedFunc interface { - Metric - Collector -} - -// NewUntypedFunc creates a new UntypedFunc based on the provided -// UntypedOpts. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where an UntypedFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { - return newValueFunc(NewDesc( - BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), - opts.Help, - nil, - opts.ConstLabels, - ), UntypedValue, function) -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go deleted file mode 100644 index 2be470c..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/value.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - "time" - "unicode/utf8" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" - - dto "github.com/prometheus/client_model/go" -) - -// ValueType is an enumeration of metric types that represent a simple value. -type ValueType int - -// Possible values for the ValueType enum. Use UntypedValue to mark a metric -// with an unknown type. -const ( - _ ValueType = iota - CounterValue - GaugeValue - UntypedValue -) - -// valueFunc is a generic metric for simple values retrieved on collect time -// from a function. It implements Metric and Collector. Its effective type is -// determined by ValueType. This is a low-level building block used by the -// library to back the implementations of CounterFunc, GaugeFunc, and -// UntypedFunc. -type valueFunc struct { - selfCollector - - desc *Desc - valType ValueType - function func() float64 - labelPairs []*dto.LabelPair -} - -// newValueFunc returns a newly allocated valueFunc with the given Desc and -// ValueType. The value reported is determined by calling the given function -// from within the Write method. Take into account that metric collection may -// happen concurrently. If that results in concurrent calls to Write, like in -// the case where a valueFunc is directly registered with Prometheus, the -// provided function must be concurrency-safe. -func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { - result := &valueFunc{ - desc: desc, - valType: valueType, - function: function, - labelPairs: makeLabelPairs(desc, nil), - } - result.init(result) - return result -} - -func (v *valueFunc) Desc() *Desc { - return v.desc -} - -func (v *valueFunc) Write(out *dto.Metric) error { - return populateMetric(v.valType, v.function(), v.labelPairs, nil, out) -} - -// NewConstMetric returns a metric with one fixed value that cannot be -// changed. Users of this package will not have much use for it in regular -// operations. However, when implementing custom Collectors, it is useful as a -// throw-away metric that is generated on the fly to send it to Prometheus in -// the Collect method. NewConstMetric returns an error if the length of -// labelValues is not consistent with the variable labels in Desc or if Desc is -// invalid. -func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { - if desc.err != nil { - return nil, desc.err - } - if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { - return nil, err - } - return &constMetric{ - desc: desc, - valType: valueType, - val: value, - labelPairs: makeLabelPairs(desc, labelValues), - }, nil -} - -// MustNewConstMetric is a version of NewConstMetric that panics where -// NewConstMetric would have returned an error. -func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { - m, err := NewConstMetric(desc, valueType, value, labelValues...) - if err != nil { - panic(err) - } - return m -} - -type constMetric struct { - desc *Desc - valType ValueType - val float64 - labelPairs []*dto.LabelPair -} - -func (m *constMetric) Desc() *Desc { - return m.desc -} - -func (m *constMetric) Write(out *dto.Metric) error { - return populateMetric(m.valType, m.val, m.labelPairs, nil, out) -} - -func populateMetric( - t ValueType, - v float64, - labelPairs []*dto.LabelPair, - e *dto.Exemplar, - m *dto.Metric, -) error { - m.Label = labelPairs - switch t { - case CounterValue: - m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e} - case GaugeValue: - m.Gauge = &dto.Gauge{Value: proto.Float64(v)} - case UntypedValue: - m.Untyped = &dto.Untyped{Value: proto.Float64(v)} - default: - return fmt.Errorf("encountered unknown type %v", t) - } - return nil -} - -func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { - totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) - if totalLen == 0 { - // Super fast path. - return nil - } - if len(desc.variableLabels) == 0 { - // Moderately fast path. - return desc.constLabelPairs - } - labelPairs := make([]*dto.LabelPair, 0, totalLen) - for i, n := range desc.variableLabels { - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(n), - Value: proto.String(labelValues[i]), - }) - } - labelPairs = append(labelPairs, desc.constLabelPairs...) - sort.Sort(labelPairSorter(labelPairs)) - return labelPairs -} - -// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels. -const ExemplarMaxRunes = 64 - -// newExemplar creates a new dto.Exemplar from the provided values. An error is -// returned if any of the label names or values are invalid or if the total -// number of runes in the label names and values exceeds ExemplarMaxRunes. -func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) { - e := &dto.Exemplar{} - e.Value = proto.Float64(value) - tsProto, err := ptypes.TimestampProto(ts) - if err != nil { - return nil, err - } - e.Timestamp = tsProto - labelPairs := make([]*dto.LabelPair, 0, len(l)) - var runes int - for name, value := range l { - if !checkLabelName(name) { - return nil, fmt.Errorf("exemplar label name %q is invalid", name) - } - runes += utf8.RuneCountInString(name) - if !utf8.ValidString(value) { - return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value) - } - runes += utf8.RuneCountInString(value) - labelPairs = append(labelPairs, &dto.LabelPair{ - Name: proto.String(name), - Value: proto.String(value), - }) - } - if runes > ExemplarMaxRunes { - return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes) - } - e.Label = labelPairs - return e, nil -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go deleted file mode 100644 index d53848d..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sync" - - "github.com/prometheus/common/model" -) - -// metricVec is a Collector to bundle metrics of the same name that differ in -// their label values. metricVec is not used directly (and therefore -// unexported). It is used as a building block for implementations of vectors of -// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. -// It also handles label currying. -type metricVec struct { - *metricMap - - curry []curriedLabelValue - - // hashAdd and hashAddByte can be replaced for testing collision handling. - hashAdd func(h uint64, s string) uint64 - hashAddByte func(h uint64, b byte) uint64 -} - -// newMetricVec returns an initialized metricVec. -func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { - return &metricVec{ - metricMap: &metricMap{ - metrics: map[uint64][]metricWithLabelValues{}, - desc: desc, - newMetric: newMetric, - }, - hashAdd: hashAdd, - hashAddByte: hashAddByte, - } -} - -// DeleteLabelValues removes the metric where the variable labels are the same -// as those passed in as labels (same order as the VariableLabels in Desc). It -// returns true if a metric was deleted. -// -// It is not an error if the number of label values is not the same as the -// number of VariableLabels in Desc. However, such inconsistent label count can -// never match an actual metric, so the method will always return false in that -// case. -// -// Note that for more than one label value, this method is prone to mistakes -// caused by an incorrect order of arguments. Consider Delete(Labels) as an -// alternative to avoid that type of mistake. For higher label numbers, the -// latter has a much more readable (albeit more verbose) syntax, but it comes -// with a performance overhead (for creating and processing the Labels map). -// See also the CounterVec example. -func (m *metricVec) DeleteLabelValues(lvs ...string) bool { - h, err := m.hashLabelValues(lvs) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) -} - -// Delete deletes the metric where the variable labels are the same as those -// passed in as labels. It returns true if a metric was deleted. -// -// It is not an error if the number and names of the Labels are inconsistent -// with those of the VariableLabels in Desc. However, such inconsistent Labels -// can never match an actual metric, so the method will always return false in -// that case. -// -// This method is used for the same purpose as DeleteLabelValues(...string). See -// there for pros and cons of the two methods. -func (m *metricVec) Delete(labels Labels) bool { - h, err := m.hashLabels(labels) - if err != nil { - return false - } - - return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) -} - -// Without explicit forwarding of Describe, Collect, Reset, those methods won't -// show up in GoDoc. - -// Describe implements Collector. -func (m *metricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) } - -// Collect implements Collector. -func (m *metricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) } - -// Reset deletes all metrics in this vector. -func (m *metricVec) Reset() { m.metricMap.Reset() } - -func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { - var ( - newCurry []curriedLabelValue - oldCurry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { - if ok { - return nil, fmt.Errorf("label name %q is already curried", label) - } - newCurry = append(newCurry, oldCurry[iCurry]) - iCurry++ - } else { - if !ok { - continue // Label stays uncurried. - } - newCurry = append(newCurry, curriedLabelValue{i, val}) - } - } - if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { - return nil, fmt.Errorf("%d unknown label(s) found during currying", l) - } - - return &metricVec{ - metricMap: m.metricMap, - curry: newCurry, - hashAdd: m.hashAdd, - hashAddByte: m.hashAddByte, - }, nil -} - -func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { - h, err := m.hashLabelValues(lvs) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil -} - -func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { - h, err := m.hashLabels(labels) - if err != nil { - return nil, err - } - - return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil -} - -func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { - if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iVals, iCurry int - ) - for i := 0; i < len(m.desc.variableLabels); i++ { - if iCurry < len(curry) && curry[iCurry].index == i { - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - h = m.hashAdd(h, vals[iVals]) - iVals++ - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -func (m *metricVec) hashLabels(labels Labels) (uint64, error) { - if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { - return 0, err - } - - var ( - h = hashNew() - curry = m.curry - iCurry int - ) - for i, label := range m.desc.variableLabels { - val, ok := labels[label] - if iCurry < len(curry) && curry[iCurry].index == i { - if ok { - return 0, fmt.Errorf("label name %q is already curried", label) - } - h = m.hashAdd(h, curry[iCurry].value) - iCurry++ - } else { - if !ok { - return 0, fmt.Errorf("label name %q missing in label map", label) - } - h = m.hashAdd(h, val) - } - h = m.hashAddByte(h, model.SeparatorByte) - } - return h, nil -} - -// metricWithLabelValues provides the metric and its label values for -// disambiguation on hash collision. -type metricWithLabelValues struct { - values []string - metric Metric -} - -// curriedLabelValue sets the curried value for a label at the given index. -type curriedLabelValue struct { - index int - value string -} - -// metricMap is a helper for metricVec and shared between differently curried -// metricVecs. -type metricMap struct { - mtx sync.RWMutex // Protects metrics. - metrics map[uint64][]metricWithLabelValues - desc *Desc - newMetric func(labelValues ...string) Metric -} - -// Describe implements Collector. It will send exactly one Desc to the provided -// channel. -func (m *metricMap) Describe(ch chan<- *Desc) { - ch <- m.desc -} - -// Collect implements Collector. -func (m *metricMap) Collect(ch chan<- Metric) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - for _, metrics := range m.metrics { - for _, metric := range metrics { - ch <- metric.metric - } - } -} - -// Reset deletes all metrics in this vector. -func (m *metricMap) Reset() { - m.mtx.Lock() - defer m.mtx.Unlock() - - for h := range m.metrics { - delete(m.metrics, h) - } -} - -// deleteByHashWithLabelValues removes the metric from the hash bucket h. If -// there are multiple matches in the bucket, use lvs to select a metric and -// remove only that metric. -func (m *metricMap) deleteByHashWithLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - - i := findMetricWithLabelValues(metrics, lvs, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.metrics, h) - } - return true -} - -// deleteByHashWithLabels removes the metric from the hash bucket h. If there -// are multiple matches in the bucket, use lvs to select a metric and remove -// only that metric. -func (m *metricMap) deleteByHashWithLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) bool { - m.mtx.Lock() - defer m.mtx.Unlock() - - metrics, ok := m.metrics[h] - if !ok { - return false - } - i := findMetricWithLabels(m.desc, metrics, labels, curry) - if i >= len(metrics) { - return false - } - - if len(metrics) > 1 { - m.metrics[h] = append(metrics[:i], metrics[i+1:]...) - } else { - delete(m.metrics, h) - } - return true -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabelValues( - hash uint64, lvs []string, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) - if !ok { - inlinedLVs := inlineLabelValues(lvs, curry) - metric = m.newMetric(inlinedLVs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) - } - return metric -} - -// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value -// or creates it and returns the new one. -// -// This function holds the mutex. -func (m *metricMap) getOrCreateMetricWithLabels( - hash uint64, labels Labels, curry []curriedLabelValue, -) Metric { - m.mtx.RLock() - metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) - m.mtx.RUnlock() - if ok { - return metric - } - - m.mtx.Lock() - defer m.mtx.Unlock() - metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) - if !ok { - lvs := extractLabelValues(m.desc, labels, curry) - metric = m.newMetric(lvs...) - m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) - } - return metric -} - -// getMetricWithHashAndLabelValues gets a metric while handling possible -// collisions in the hash space. Must be called while holding the read mutex. -func (m *metricMap) getMetricWithHashAndLabelValues( - h uint64, lvs []string, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// getMetricWithHashAndLabels gets a metric while handling possible collisions in -// the hash space. Must be called while holding read mutex. -func (m *metricMap) getMetricWithHashAndLabels( - h uint64, labels Labels, curry []curriedLabelValue, -) (Metric, bool) { - metrics, ok := m.metrics[h] - if ok { - if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { - return metrics[i].metric, true - } - } - return nil, false -} - -// findMetricWithLabelValues returns the index of the matching metric or -// len(metrics) if not found. -func findMetricWithLabelValues( - metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabelValues(metric.values, lvs, curry) { - return i - } - } - return len(metrics) -} - -// findMetricWithLabels returns the index of the matching metric or len(metrics) -// if not found. -func findMetricWithLabels( - desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, -) int { - for i, metric := range metrics { - if matchLabels(desc, metric.values, labels, curry) { - return i - } - } - return len(metrics) -} - -func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { - if len(values) != len(lvs)+len(curry) { - return false - } - var iLVs, iCurry int - for i, v := range values { - if iCurry < len(curry) && curry[iCurry].index == i { - if v != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if v != lvs[iLVs] { - return false - } - iLVs++ - } - return true -} - -func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { - if len(values) != len(labels)+len(curry) { - return false - } - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - if values[i] != curry[iCurry].value { - return false - } - iCurry++ - continue - } - if values[i] != labels[k] { - return false - } - } - return true -} - -func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { - labelValues := make([]string, len(labels)+len(curry)) - iCurry := 0 - for i, k := range desc.variableLabels { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = labels[k] - } - return labelValues -} - -func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { - labelValues := make([]string, len(lvs)+len(curry)) - var iCurry, iLVs int - for i := range labelValues { - if iCurry < len(curry) && curry[iCurry].index == i { - labelValues[i] = curry[iCurry].value - iCurry++ - continue - } - labelValues[i] = lvs[iLVs] - iLVs++ - } - return labelValues -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go deleted file mode 100644 index e303eef..0000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "fmt" - "sort" - - "github.com/golang/protobuf/proto" - - dto "github.com/prometheus/client_model/go" -) - -// WrapRegistererWith returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided Labels to all Metrics it collects (as -// ConstLabels). The Metrics collected by the unmodified Collector must not -// duplicate any of those labels. -// -// WrapRegistererWith provides a way to add fixed labels to a subset of -// Collectors. It should not be used to add fixed labels to all metrics exposed. -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -// -// The Collector example demonstrates a use of WrapRegistererWith. -func WrapRegistererWith(labels Labels, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - labels: labels, - } -} - -// WrapRegistererWithPrefix returns a Registerer wrapping the provided -// Registerer. Collectors registered with the returned Registerer will be -// registered with the wrapped Registerer in a modified way. The modified -// Collector adds the provided prefix to the name of all Metrics it collects. -// -// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of -// a sub-system. To make this work, register metrics of the sub-system with the -// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful -// to use the same prefix for all metrics exposed. In particular, do not prefix -// metric names that are standardized across applications, as that would break -// horizontal monitoring, for example the metrics provided by the Go collector -// (see NewGoCollector) and the process collector (see NewProcessCollector). (In -// fact, those metrics are already prefixed with “go_” or “process_”, -// respectively.) -// -// Conflicts between Collectors registered through the original Registerer with -// Collectors registered through the wrapping Registerer will still be -// detected. Any AlreadyRegisteredError returned by the Register method of -// either Registerer will contain the ExistingCollector in the form it was -// provided to the respective registry. -func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { - return &wrappingRegisterer{ - wrappedRegisterer: reg, - prefix: prefix, - } -} - -type wrappingRegisterer struct { - wrappedRegisterer Registerer - prefix string - labels Labels -} - -func (r *wrappingRegisterer) Register(c Collector) error { - return r.wrappedRegisterer.Register(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -func (r *wrappingRegisterer) MustRegister(cs ...Collector) { - for _, c := range cs { - if err := r.Register(c); err != nil { - panic(err) - } - } -} - -func (r *wrappingRegisterer) Unregister(c Collector) bool { - return r.wrappedRegisterer.Unregister(&wrappingCollector{ - wrappedCollector: c, - prefix: r.prefix, - labels: r.labels, - }) -} - -type wrappingCollector struct { - wrappedCollector Collector - prefix string - labels Labels -} - -func (c *wrappingCollector) Collect(ch chan<- Metric) { - wrappedCh := make(chan Metric) - go func() { - c.wrappedCollector.Collect(wrappedCh) - close(wrappedCh) - }() - for m := range wrappedCh { - ch <- &wrappingMetric{ - wrappedMetric: m, - prefix: c.prefix, - labels: c.labels, - } - } -} - -func (c *wrappingCollector) Describe(ch chan<- *Desc) { - wrappedCh := make(chan *Desc) - go func() { - c.wrappedCollector.Describe(wrappedCh) - close(wrappedCh) - }() - for desc := range wrappedCh { - ch <- wrapDesc(desc, c.prefix, c.labels) - } -} - -func (c *wrappingCollector) unwrapRecursively() Collector { - switch wc := c.wrappedCollector.(type) { - case *wrappingCollector: - return wc.unwrapRecursively() - default: - return wc - } -} - -type wrappingMetric struct { - wrappedMetric Metric - prefix string - labels Labels -} - -func (m *wrappingMetric) Desc() *Desc { - return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) -} - -func (m *wrappingMetric) Write(out *dto.Metric) error { - if err := m.wrappedMetric.Write(out); err != nil { - return err - } - if len(m.labels) == 0 { - // No wrapping labels. - return nil - } - for ln, lv := range m.labels { - out.Label = append(out.Label, &dto.LabelPair{ - Name: proto.String(ln), - Value: proto.String(lv), - }) - } - sort.Sort(labelPairSorter(out.Label)) - return nil -} - -func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { - constLabels := Labels{} - for _, lp := range desc.constLabelPairs { - constLabels[*lp.Name] = *lp.Value - } - for ln, lv := range labels { - if _, alreadyUsed := constLabels[ln]; alreadyUsed { - return &Desc{ - fqName: desc.fqName, - help: desc.help, - variableLabels: desc.variableLabels, - constLabelPairs: desc.constLabelPairs, - err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), - } - } - constLabels[ln] = lv - } - // NewDesc will do remaining validations. - newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) - // Propagate errors if there was any. This will override any errer - // created by NewDesc above, i.e. earlier errors get precedence. - if desc.err != nil { - newDesc.err = desc.err - } - return newDesc -} diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/prometheus/client_model/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE deleted file mode 100644 index 20110e4..0000000 --- a/vendor/github.com/prometheus/client_model/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Data model artifacts for Prometheus. -Copyright 2012-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go deleted file mode 100644 index 2f4930d..0000000 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ /dev/null @@ -1,723 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: metrics.proto - -package io_prometheus_client - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - timestamp "github.com/golang/protobuf/ptypes/timestamp" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type MetricType int32 - -const ( - MetricType_COUNTER MetricType = 0 - MetricType_GAUGE MetricType = 1 - MetricType_SUMMARY MetricType = 2 - MetricType_UNTYPED MetricType = 3 - MetricType_HISTOGRAM MetricType = 4 -) - -var MetricType_name = map[int32]string{ - 0: "COUNTER", - 1: "GAUGE", - 2: "SUMMARY", - 3: "UNTYPED", - 4: "HISTOGRAM", -} - -var MetricType_value = map[string]int32{ - "COUNTER": 0, - "GAUGE": 1, - "SUMMARY": 2, - "UNTYPED": 3, - "HISTOGRAM": 4, -} - -func (x MetricType) Enum() *MetricType { - p := new(MetricType) - *p = x - return p -} - -func (x MetricType) String() string { - return proto.EnumName(MetricType_name, int32(x)) -} - -func (x *MetricType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") - if err != nil { - return err - } - *x = MetricType(value) - return nil -} - -func (MetricType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} -} - -type LabelPair struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LabelPair) Reset() { *m = LabelPair{} } -func (m *LabelPair) String() string { return proto.CompactTextString(m) } -func (*LabelPair) ProtoMessage() {} -func (*LabelPair) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{0} -} - -func (m *LabelPair) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LabelPair.Unmarshal(m, b) -} -func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) -} -func (m *LabelPair) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelPair.Merge(m, src) -} -func (m *LabelPair) XXX_Size() int { - return xxx_messageInfo_LabelPair.Size(m) -} -func (m *LabelPair) XXX_DiscardUnknown() { - xxx_messageInfo_LabelPair.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelPair proto.InternalMessageInfo - -func (m *LabelPair) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *LabelPair) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Gauge struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Gauge) Reset() { *m = Gauge{} } -func (m *Gauge) String() string { return proto.CompactTextString(m) } -func (*Gauge) ProtoMessage() {} -func (*Gauge) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{1} -} - -func (m *Gauge) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Gauge.Unmarshal(m, b) -} -func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) -} -func (m *Gauge) XXX_Merge(src proto.Message) { - xxx_messageInfo_Gauge.Merge(m, src) -} -func (m *Gauge) XXX_Size() int { - return xxx_messageInfo_Gauge.Size(m) -} -func (m *Gauge) XXX_DiscardUnknown() { - xxx_messageInfo_Gauge.DiscardUnknown(m) -} - -var xxx_messageInfo_Gauge proto.InternalMessageInfo - -func (m *Gauge) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Counter struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Counter) Reset() { *m = Counter{} } -func (m *Counter) String() string { return proto.CompactTextString(m) } -func (*Counter) ProtoMessage() {} -func (*Counter) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{2} -} - -func (m *Counter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Counter.Unmarshal(m, b) -} -func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Counter.Marshal(b, m, deterministic) -} -func (m *Counter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Counter.Merge(m, src) -} -func (m *Counter) XXX_Size() int { - return xxx_messageInfo_Counter.Size(m) -} -func (m *Counter) XXX_DiscardUnknown() { - xxx_messageInfo_Counter.DiscardUnknown(m) -} - -var xxx_messageInfo_Counter proto.InternalMessageInfo - -func (m *Counter) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -func (m *Counter) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -type Quantile struct { - Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Quantile) Reset() { *m = Quantile{} } -func (m *Quantile) String() string { return proto.CompactTextString(m) } -func (*Quantile) ProtoMessage() {} -func (*Quantile) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{3} -} - -func (m *Quantile) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantile.Unmarshal(m, b) -} -func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) -} -func (m *Quantile) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantile.Merge(m, src) -} -func (m *Quantile) XXX_Size() int { - return xxx_messageInfo_Quantile.Size(m) -} -func (m *Quantile) XXX_DiscardUnknown() { - xxx_messageInfo_Quantile.DiscardUnknown(m) -} - -var xxx_messageInfo_Quantile proto.InternalMessageInfo - -func (m *Quantile) GetQuantile() float64 { - if m != nil && m.Quantile != nil { - return *m.Quantile - } - return 0 -} - -func (m *Quantile) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Summary struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Summary) Reset() { *m = Summary{} } -func (m *Summary) String() string { return proto.CompactTextString(m) } -func (*Summary) ProtoMessage() {} -func (*Summary) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{4} -} - -func (m *Summary) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Summary.Unmarshal(m, b) -} -func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Summary.Marshal(b, m, deterministic) -} -func (m *Summary) XXX_Merge(src proto.Message) { - xxx_messageInfo_Summary.Merge(m, src) -} -func (m *Summary) XXX_Size() int { - return xxx_messageInfo_Summary.Size(m) -} -func (m *Summary) XXX_DiscardUnknown() { - xxx_messageInfo_Summary.DiscardUnknown(m) -} - -var xxx_messageInfo_Summary proto.InternalMessageInfo - -func (m *Summary) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Summary) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Summary) GetQuantile() []*Quantile { - if m != nil { - return m.Quantile - } - return nil -} - -type Untyped struct { - Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Untyped) Reset() { *m = Untyped{} } -func (m *Untyped) String() string { return proto.CompactTextString(m) } -func (*Untyped) ProtoMessage() {} -func (*Untyped) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{5} -} - -func (m *Untyped) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Untyped.Unmarshal(m, b) -} -func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) -} -func (m *Untyped) XXX_Merge(src proto.Message) { - xxx_messageInfo_Untyped.Merge(m, src) -} -func (m *Untyped) XXX_Size() int { - return xxx_messageInfo_Untyped.Size(m) -} -func (m *Untyped) XXX_DiscardUnknown() { - xxx_messageInfo_Untyped.DiscardUnknown(m) -} - -var xxx_messageInfo_Untyped proto.InternalMessageInfo - -func (m *Untyped) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Histogram struct { - SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` - SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` - Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Histogram) Reset() { *m = Histogram{} } -func (m *Histogram) String() string { return proto.CompactTextString(m) } -func (*Histogram) ProtoMessage() {} -func (*Histogram) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{6} -} - -func (m *Histogram) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Histogram.Unmarshal(m, b) -} -func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) -} -func (m *Histogram) XXX_Merge(src proto.Message) { - xxx_messageInfo_Histogram.Merge(m, src) -} -func (m *Histogram) XXX_Size() int { - return xxx_messageInfo_Histogram.Size(m) -} -func (m *Histogram) XXX_DiscardUnknown() { - xxx_messageInfo_Histogram.DiscardUnknown(m) -} - -var xxx_messageInfo_Histogram proto.InternalMessageInfo - -func (m *Histogram) GetSampleCount() uint64 { - if m != nil && m.SampleCount != nil { - return *m.SampleCount - } - return 0 -} - -func (m *Histogram) GetSampleSum() float64 { - if m != nil && m.SampleSum != nil { - return *m.SampleSum - } - return 0 -} - -func (m *Histogram) GetBucket() []*Bucket { - if m != nil { - return m.Bucket - } - return nil -} - -type Bucket struct { - CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` - UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` - Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Bucket) Reset() { *m = Bucket{} } -func (m *Bucket) String() string { return proto.CompactTextString(m) } -func (*Bucket) ProtoMessage() {} -func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{7} -} - -func (m *Bucket) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Bucket.Unmarshal(m, b) -} -func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) -} -func (m *Bucket) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bucket.Merge(m, src) -} -func (m *Bucket) XXX_Size() int { - return xxx_messageInfo_Bucket.Size(m) -} -func (m *Bucket) XXX_DiscardUnknown() { - xxx_messageInfo_Bucket.DiscardUnknown(m) -} - -var xxx_messageInfo_Bucket proto.InternalMessageInfo - -func (m *Bucket) GetCumulativeCount() uint64 { - if m != nil && m.CumulativeCount != nil { - return *m.CumulativeCount - } - return 0 -} - -func (m *Bucket) GetUpperBound() float64 { - if m != nil && m.UpperBound != nil { - return *m.UpperBound - } - return 0 -} - -func (m *Bucket) GetExemplar() *Exemplar { - if m != nil { - return m.Exemplar - } - return nil -} - -type Exemplar struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Exemplar) Reset() { *m = Exemplar{} } -func (m *Exemplar) String() string { return proto.CompactTextString(m) } -func (*Exemplar) ProtoMessage() {} -func (*Exemplar) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{8} -} - -func (m *Exemplar) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Exemplar.Unmarshal(m, b) -} -func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic) -} -func (m *Exemplar) XXX_Merge(src proto.Message) { - xxx_messageInfo_Exemplar.Merge(m, src) -} -func (m *Exemplar) XXX_Size() int { - return xxx_messageInfo_Exemplar.Size(m) -} -func (m *Exemplar) XXX_DiscardUnknown() { - xxx_messageInfo_Exemplar.DiscardUnknown(m) -} - -var xxx_messageInfo_Exemplar proto.InternalMessageInfo - -func (m *Exemplar) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Exemplar) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -func (m *Exemplar) GetTimestamp() *timestamp.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type Metric struct { - Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` - Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` - Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` - Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` - Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` - Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` - TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metric) Reset() { *m = Metric{} } -func (m *Metric) String() string { return proto.CompactTextString(m) } -func (*Metric) ProtoMessage() {} -func (*Metric) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{9} -} - -func (m *Metric) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Metric.Unmarshal(m, b) -} -func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Metric.Marshal(b, m, deterministic) -} -func (m *Metric) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metric.Merge(m, src) -} -func (m *Metric) XXX_Size() int { - return xxx_messageInfo_Metric.Size(m) -} -func (m *Metric) XXX_DiscardUnknown() { - xxx_messageInfo_Metric.DiscardUnknown(m) -} - -var xxx_messageInfo_Metric proto.InternalMessageInfo - -func (m *Metric) GetLabel() []*LabelPair { - if m != nil { - return m.Label - } - return nil -} - -func (m *Metric) GetGauge() *Gauge { - if m != nil { - return m.Gauge - } - return nil -} - -func (m *Metric) GetCounter() *Counter { - if m != nil { - return m.Counter - } - return nil -} - -func (m *Metric) GetSummary() *Summary { - if m != nil { - return m.Summary - } - return nil -} - -func (m *Metric) GetUntyped() *Untyped { - if m != nil { - return m.Untyped - } - return nil -} - -func (m *Metric) GetHistogram() *Histogram { - if m != nil { - return m.Histogram - } - return nil -} - -func (m *Metric) GetTimestampMs() int64 { - if m != nil && m.TimestampMs != nil { - return *m.TimestampMs - } - return 0 -} - -type MetricFamily struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` - Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` - Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MetricFamily) Reset() { *m = MetricFamily{} } -func (m *MetricFamily) String() string { return proto.CompactTextString(m) } -func (*MetricFamily) ProtoMessage() {} -func (*MetricFamily) Descriptor() ([]byte, []int) { - return fileDescriptor_6039342a2ba47b72, []int{10} -} - -func (m *MetricFamily) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MetricFamily.Unmarshal(m, b) -} -func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) -} -func (m *MetricFamily) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricFamily.Merge(m, src) -} -func (m *MetricFamily) XXX_Size() int { - return xxx_messageInfo_MetricFamily.Size(m) -} -func (m *MetricFamily) XXX_DiscardUnknown() { - xxx_messageInfo_MetricFamily.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricFamily proto.InternalMessageInfo - -func (m *MetricFamily) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MetricFamily) GetHelp() string { - if m != nil && m.Help != nil { - return *m.Help - } - return "" -} - -func (m *MetricFamily) GetType() MetricType { - if m != nil && m.Type != nil { - return *m.Type - } - return MetricType_COUNTER -} - -func (m *MetricFamily) GetMetric() []*Metric { - if m != nil { - return m.Metric - } - return nil -} - -func init() { - proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) - proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") - proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") - proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") - proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") - proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") - proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") - proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") - proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") - proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar") - proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") - proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") -} - -func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) } - -var fileDescriptor_6039342a2ba47b72 = []byte{ - // 665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55, - 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2, - 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e, - 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa, - 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66, - 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4, - 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45, - 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a, - 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d, - 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b, - 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22, - 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79, - 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0, - 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00, - 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01, - 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe, - 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55, - 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f, - 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31, - 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16, - 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e, - 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c, - 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f, - 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57, - 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64, - 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76, - 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7, - 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95, - 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed, - 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33, - 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07, - 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72, - 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56, - 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6, - 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f, - 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f, - 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27, - 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83, - 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24, - 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00, -} diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/prometheus/common/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE deleted file mode 100644 index 636a2c1..0000000 --- a/vendor/github.com/prometheus/common/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Common libraries shared by Prometheus Go components. -Copyright 2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go deleted file mode 100644 index c092723..0000000 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "math" - "mime" - "net/http" - - dto "github.com/prometheus/client_model/go" - - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/model" -) - -// Decoder types decode an input stream into metric families. -type Decoder interface { - Decode(*dto.MetricFamily) error -} - -// DecodeOptions contains options used by the Decoder and in sample extraction. -type DecodeOptions struct { - // Timestamp is added to each value from the stream that has no explicit timestamp set. - Timestamp model.Time -} - -// ResponseFormat extracts the correct format from a HTTP response header. -// If no matching format can be found FormatUnknown is returned. -func ResponseFormat(h http.Header) Format { - ct := h.Get(hdrContentType) - - mediatype, params, err := mime.ParseMediaType(ct) - if err != nil { - return FmtUnknown - } - - const textType = "text/plain" - - switch mediatype { - case ProtoType: - if p, ok := params["proto"]; ok && p != ProtoProtocol { - return FmtUnknown - } - if e, ok := params["encoding"]; ok && e != "delimited" { - return FmtUnknown - } - return FmtProtoDelim - - case textType: - if v, ok := params["version"]; ok && v != TextVersion { - return FmtUnknown - } - return FmtText - } - - return FmtUnknown -} - -// NewDecoder returns a new decoder based on the given input format. -// If the input format does not imply otherwise, a text format decoder is returned. -func NewDecoder(r io.Reader, format Format) Decoder { - switch format { - case FmtProtoDelim: - return &protoDecoder{r: r} - } - return &textDecoder{r: r} -} - -// protoDecoder implements the Decoder interface for protocol buffers. -type protoDecoder struct { - r io.Reader -} - -// Decode implements the Decoder interface. -func (d *protoDecoder) Decode(v *dto.MetricFamily) error { - _, err := pbutil.ReadDelimited(d.r, v) - if err != nil { - return err - } - if !model.IsValidMetricName(model.LabelValue(v.GetName())) { - return fmt.Errorf("invalid metric name %q", v.GetName()) - } - for _, m := range v.GetMetric() { - if m == nil { - continue - } - for _, l := range m.GetLabel() { - if l == nil { - continue - } - if !model.LabelValue(l.GetValue()).IsValid() { - return fmt.Errorf("invalid label value %q", l.GetValue()) - } - if !model.LabelName(l.GetName()).IsValid() { - return fmt.Errorf("invalid label name %q", l.GetName()) - } - } - } - return nil -} - -// textDecoder implements the Decoder interface for the text protocol. -type textDecoder struct { - r io.Reader - p TextParser - fams []*dto.MetricFamily -} - -// Decode implements the Decoder interface. -func (d *textDecoder) Decode(v *dto.MetricFamily) error { - // TODO(fabxc): Wrap this as a line reader to make streaming safer. - if len(d.fams) == 0 { - // No cached metric families, read everything and parse metrics. - fams, err := d.p.TextToMetricFamilies(d.r) - if err != nil { - return err - } - if len(fams) == 0 { - return io.EOF - } - d.fams = make([]*dto.MetricFamily, 0, len(fams)) - for _, f := range fams { - d.fams = append(d.fams, f) - } - } - - *v = *d.fams[0] - d.fams = d.fams[1:] - - return nil -} - -// SampleDecoder wraps a Decoder to extract samples from the metric families -// decoded by the wrapped Decoder. -type SampleDecoder struct { - Dec Decoder - Opts *DecodeOptions - - f dto.MetricFamily -} - -// Decode calls the Decode method of the wrapped Decoder and then extracts the -// samples from the decoded MetricFamily into the provided model.Vector. -func (sd *SampleDecoder) Decode(s *model.Vector) error { - err := sd.Dec.Decode(&sd.f) - if err != nil { - return err - } - *s, err = extractSamples(&sd.f, sd.Opts) - return err -} - -// ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurrs during sample extraction, it continues to -// extract from the remaining metric families. The returned error is the last -// error that has occurred. -func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { - var ( - all model.Vector - lastErr error - ) - for _, f := range fams { - some, err := extractSamples(f, o) - if err != nil { - lastErr = err - continue - } - all = append(all, some...) - } - return all, lastErr -} - -func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { - switch f.GetType() { - case dto.MetricType_COUNTER: - return extractCounter(o, f), nil - case dto.MetricType_GAUGE: - return extractGauge(o, f), nil - case dto.MetricType_SUMMARY: - return extractSummary(o, f), nil - case dto.MetricType_UNTYPED: - return extractUntyped(o, f), nil - case dto.MetricType_HISTOGRAM: - return extractHistogram(o, f), nil - } - return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) -} - -func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Counter == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Counter.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Gauge == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Gauge.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Untyped == nil { - continue - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - smpl := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Untyped.GetValue()), - } - - if m.TimestampMs != nil { - smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } else { - smpl.Timestamp = o.Timestamp - } - - samples = append(samples, smpl) - } - - return samples -} - -func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Summary == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - for _, q := range m.Summary.Quantile { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - // BUG(matt): Update other names to "quantile". - lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetValue()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Summary.GetSampleCount()), - Timestamp: timestamp, - }) - } - - return samples -} - -func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { - samples := make(model.Vector, 0, len(f.Metric)) - - for _, m := range f.Metric { - if m.Histogram == nil { - continue - } - - timestamp := o.Timestamp - if m.TimestampMs != nil { - timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) - } - - infSeen := false - - for _, q := range m.Histogram.Bucket { - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - if math.IsInf(q.GetUpperBound(), +1) { - infSeen = true - } - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(q.GetCumulativeCount()), - Timestamp: timestamp, - }) - } - - lset := make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleSum()), - Timestamp: timestamp, - }) - - lset = make(model.LabelSet, len(m.Label)+1) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") - - count := &model.Sample{ - Metric: model.Metric(lset), - Value: model.SampleValue(m.Histogram.GetSampleCount()), - Timestamp: timestamp, - } - samples = append(samples, count) - - if !infSeen { - // Append an infinity bucket sample. - lset := make(model.LabelSet, len(m.Label)+2) - for _, p := range m.Label { - lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) - } - lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") - lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") - - samples = append(samples, &model.Sample{ - Metric: model.Metric(lset), - Value: count.Value, - Timestamp: timestamp, - }) - } - } - - return samples -} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go deleted file mode 100644 index bd4e347..0000000 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "fmt" - "io" - "net/http" - - "github.com/golang/protobuf/proto" - "github.com/matttproud/golang_protobuf_extensions/pbutil" - "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" - - dto "github.com/prometheus/client_model/go" -) - -// Encoder types encode metric families into an underlying wire protocol. -type Encoder interface { - Encode(*dto.MetricFamily) error -} - -// Closer is implemented by Encoders that need to be closed to finalize -// encoding. (For example, OpenMetrics needs a final `# EOF` line.) -// -// Note that all Encoder implementations returned from this package implement -// Closer, too, even if the Close call is a no-op. This happens in preparation -// for adding a Close method to the Encoder interface directly in a (mildly -// breaking) release in the future. -type Closer interface { - Close() error -} - -type encoderCloser struct { - encode func(*dto.MetricFamily) error - close func() error -} - -func (ec encoderCloser) Encode(v *dto.MetricFamily) error { - return ec.encode(v) -} - -func (ec encoderCloser) Close() error { - return ec.close() -} - -// Negotiate returns the Content-Type based on the given Accept header. If no -// appropriate accepted type is found, FmtText is returned (which is the -// Prometheus text format). This function will never negotiate FmtOpenMetrics, -// as the support is still experimental. To include the option to negotiate -// FmtOpenMetrics, use NegotiateOpenMetrics. -func Negotiate(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - } - return FmtText -} - -// NegotiateIncludingOpenMetrics works like Negotiate but includes -// FmtOpenMetrics as an option for the result. Note that this function is -// temporary and will disappear once FmtOpenMetrics is fully supported and as -// such may be negotiated by the normal Negotiate function. -func NegotiateIncludingOpenMetrics(h http.Header) Format { - for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { - ver := ac.Params["version"] - if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { - switch ac.Params["encoding"] { - case "delimited": - return FmtProtoDelim - case "text": - return FmtProtoText - case "compact-text": - return FmtProtoCompact - } - } - if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return FmtText - } - if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") { - return FmtOpenMetrics - } - } - return FmtText -} - -// NewEncoder returns a new encoder based on content type negotiation. All -// Encoder implementations returned by NewEncoder also implement Closer, and -// callers should always call the Close method. It is currently only required -// for FmtOpenMetrics, but a future (breaking) release will add the Close method -// to the Encoder interface directly. The current version of the Encoder -// interface is kept for backwards compatibility. -func NewEncoder(w io.Writer, format Format) Encoder { - switch format { - case FmtProtoDelim: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := pbutil.WriteDelimited(w, v) - return err - }, - close: func() error { return nil }, - } - case FmtProtoCompact: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, v.String()) - return err - }, - close: func() error { return nil }, - } - case FmtProtoText: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) - return err - }, - close: func() error { return nil }, - } - case FmtText: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToText(w, v) - return err - }, - close: func() error { return nil }, - } - case FmtOpenMetrics: - return encoderCloser{ - encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, v) - return err - }, - close: func() error { - _, err := FinalizeOpenMetrics(w) - return err - }, - } - } - panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format)) -} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go deleted file mode 100644 index 0f176fa..0000000 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package expfmt contains tools for reading and writing Prometheus metrics. -package expfmt - -// Format specifies the HTTP content type of the different wire protocols. -type Format string - -// Constants to assemble the Content-Type values for the different wire protocols. -const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" - OpenMetricsType = `application/openmetrics-text` - OpenMetricsVersion = "0.0.1" - - // The Content-Type values for the different wire protocols. - FmtUnknown Format = `` - FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` - FmtProtoText Format = ProtoFmt + ` encoding=text` - FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` - FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8` -) - -const ( - hdrContentType = "Content-Type" - hdrAccept = "Accept" -) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go deleted file mode 100644 index dc2eede..0000000 --- a/vendor/github.com/prometheus/common/expfmt/fuzz.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Build only when actually fuzzing -// +build gofuzz - -package expfmt - -import "bytes" - -// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: -// -// go-fuzz-build github.com/prometheus/common/expfmt -// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz -// -// Further input samples should go in the folder fuzz/corpus. -func Fuzz(in []byte) int { - parser := TextParser{} - _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) - - if err != nil { - return 0 - } - - return 1 -} diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go deleted file mode 100644 index 8a9313a..0000000 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - "github.com/golang/protobuf/ptypes" - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the -// OpenMetrics text format and writes the resulting lines to 'out'. It returns -// the number of bytes written and any error encountered. The output will have -// the same order as the input, no further sorting is performed. Furthermore, -// this function assumes the input is already sanitized and does not perform any -// sanity checks. If the input contains duplicate metrics or invalid metric or -// label names, the conversion will result in invalid text format output. -// -// This function fulfills the type 'expfmt.encoder'. -// -// Note that OpenMetrics requires a final `# EOF` line. Since this function acts -// on individual metric families, it is the responsibility of the caller to -// append this line to 'out' once all metric families have been written. -// Conveniently, this can be done by calling FinalizeOpenMetrics. -// -// The output should be fully OpenMetrics compliant. However, there are a few -// missing features and peculiarities to avoid complications when switching from -// Prometheus to OpenMetrics or vice versa: -// -// - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, -// its type will be set to `unknown` in that case to avoid invalid OpenMetrics -// output. -// -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. -// -// - The size of exemplar labels is not checked (i.e. it's possible to create -// exemplars that are larger than allowed by the OpenMetrics specification). -// -// - The value of Counters is not checked. (OpenMetrics doesn't allow counters -// with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var ( - n int - metricType = in.GetType() - shortName = name - ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] - } - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = w.WriteString(shortName) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, true) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = w.WriteString(shortName) - written += n - if err != nil { - return - } - switch metricType { - case dto.MetricType_COUNTER: - if strings.HasSuffix(name, "_total") { - n, err = w.WriteString(" counter\n") - } else { - n, err = w.WriteString(" unknown\n") - } - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" unknown\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), 0, false, - metric.Counter.Exemplar, - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), 0, false, - nil, - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), 0, false, - nil, - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeOpenMetricsSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, - 0, metric.Summary.GetSampleCount(), true, - nil, - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - 0, b.GetCumulativeCount(), true, - b.Exemplar, - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - written += n - if err != nil { - return - } - } - n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), 0, false, - nil, - ) - written += n - if err != nil { - return - } - n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, - 0, metric.Histogram.GetSampleCount(), true, - nil, - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics. -func FinalizeOpenMetrics(w io.Writer) (written int, err error) { - return w.Write([]byte("# EOF\n")) -} - -// writeOpenMetricsSample writes a single sample in OpenMetrics text format to -// w, given the metric name, the metric proto message itself, optionally an -// additional label name with a float64 value (use empty string as label name if -// not required), the value (optionally as float64 or uint64, determined by -// useIntValue), and optionally an exemplar (use nil if not required). The -// function returns the number of bytes written and any error encountered. -func writeOpenMetricsSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - floatValue float64, intValue uint64, useIntValue bool, - exemplar *dto.Exemplar, -) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeOpenMetricsLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - if useIntValue { - n, err = writeUint(w, intValue) - } else { - n, err = writeOpenMetricsFloat(w, floatValue) - } - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - // TODO(beorn7): Format this directly without converting to a float first. - n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000) - written += n - if err != nil { - return written, err - } - } - if exemplar != nil { - n, err = writeExemplar(w, exemplar) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float -// in OpenMetrics style. -func writeOpenMetricsLabelPairs( - w enhancedWriter, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var ( - written int - separator byte = '{' - ) - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeExemplar writes the provided exemplar in OpenMetrics format to w. The -// function returns the number of bytes written and any error encountered. -func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { - written := 0 - n, err := w.WriteString(" # ") - written += n - if err != nil { - return written, err - } - n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeOpenMetricsFloat(w, e.GetValue()) - written += n - if err != nil { - return written, err - } - if e.Timestamp != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - ts, err := ptypes.Timestamp((*e).Timestamp) - if err != nil { - return written, err - } - // TODO(beorn7): Format this directly from components of ts to - // avoid overflow/underflow and precision issues of the float - // conversion. - n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9) - written += n - if err != nil { - return written, err - } - } - return written, nil -} - -// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting -// number would otherwise contain neither a "." nor an "e". -func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return w.WriteString("1.0") - case f == 0: - return w.WriteString("0.0") - case f == -1: - return w.WriteString("-1.0") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - if !bytes.ContainsAny(*bp, "e.") { - *bp = append(*bp, '.', '0') - } - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeUint is like writeInt just for uint64. -func writeUint(w enhancedWriter, u uint64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendUint((*bp)[:0], u, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go deleted file mode 100644 index 5ba503b..0000000 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "math" - "strconv" - "strings" - "sync" - - "github.com/prometheus/common/model" - - dto "github.com/prometheus/client_model/go" -) - -// enhancedWriter has all the enhanced write functions needed here. bufio.Writer -// implements it. -type enhancedWriter interface { - io.Writer - WriteRune(r rune) (n int, err error) - WriteString(s string) (n int, err error) - WriteByte(c byte) error -} - -const ( - initialNumBufSize = 24 -) - -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return bufio.NewWriter(ioutil.Discard) - }, - } - numBufPool = sync.Pool{ - New: func() interface{} { - b := make([]byte, 0, initialNumBufSize) - return &b - }, - } -) - -// MetricFamilyToText converts a MetricFamily proto message into text format and -// writes the resulting lines to 'out'. It returns the number of bytes written -// and any error encountered. The output will have the same order as the input, -// no further sorting is performed. Furthermore, this function assumes the input -// is already sanitized and does not perform any sanity checks. If the input -// contains duplicate metrics or invalid metric or label names, the conversion -// will result in invalid text format output. -// -// This method fulfills the type 'prometheus.encoder'. -func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { - // Fail-fast checks. - if len(in.Metric) == 0 { - return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) - } - name := in.GetName() - if name == "" { - return 0, fmt.Errorf("MetricFamily has no name: %s", in) - } - - // Try the interface upgrade. If it doesn't work, we'll use a - // bufio.Writer from the sync.Pool. - w, ok := out.(enhancedWriter) - if !ok { - b := bufPool.Get().(*bufio.Writer) - b.Reset(out) - w = b - defer func() { - bErr := b.Flush() - if err == nil { - err = bErr - } - bufPool.Put(b) - }() - } - - var n int - - // Comments, first HELP, then TYPE. - if in.Help != nil { - n, err = w.WriteString("# HELP ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - err = w.WriteByte(' ') - written++ - if err != nil { - return - } - n, err = writeEscapedString(w, *in.Help, false) - written += n - if err != nil { - return - } - err = w.WriteByte('\n') - written++ - if err != nil { - return - } - } - n, err = w.WriteString("# TYPE ") - written += n - if err != nil { - return - } - n, err = w.WriteString(name) - written += n - if err != nil { - return - } - metricType := in.GetType() - switch metricType { - case dto.MetricType_COUNTER: - n, err = w.WriteString(" counter\n") - case dto.MetricType_GAUGE: - n, err = w.WriteString(" gauge\n") - case dto.MetricType_SUMMARY: - n, err = w.WriteString(" summary\n") - case dto.MetricType_UNTYPED: - n, err = w.WriteString(" untyped\n") - case dto.MetricType_HISTOGRAM: - n, err = w.WriteString(" histogram\n") - default: - return written, fmt.Errorf("unknown metric type %s", metricType.String()) - } - written += n - if err != nil { - return - } - - // Finally the samples, one line for each. - for _, metric := range in.Metric { - switch metricType { - case dto.MetricType_COUNTER: - if metric.Counter == nil { - return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Counter.GetValue(), - ) - case dto.MetricType_GAUGE: - if metric.Gauge == nil { - return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Gauge.GetValue(), - ) - case dto.MetricType_UNTYPED: - if metric.Untyped == nil { - return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, - ) - } - n, err = writeSample( - w, name, "", metric, "", 0, - metric.Untyped.GetValue(), - ) - case dto.MetricType_SUMMARY: - if metric.Summary == nil { - return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, - ) - } - for _, q := range metric.Summary.Quantile { - n, err = writeSample( - w, name, "", metric, - model.QuantileLabel, q.GetQuantile(), - q.GetValue(), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Summary.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Summary.GetSampleCount()), - ) - case dto.MetricType_HISTOGRAM: - if metric.Histogram == nil { - return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, - ) - } - infSeen := false - for _, b := range metric.Histogram.Bucket { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, b.GetUpperBound(), - float64(b.GetCumulativeCount()), - ) - written += n - if err != nil { - return - } - if math.IsInf(b.GetUpperBound(), +1) { - infSeen = true - } - } - if !infSeen { - n, err = writeSample( - w, name, "_bucket", metric, - model.BucketLabel, math.Inf(+1), - float64(metric.Histogram.GetSampleCount()), - ) - written += n - if err != nil { - return - } - } - n, err = writeSample( - w, name, "_sum", metric, "", 0, - metric.Histogram.GetSampleSum(), - ) - written += n - if err != nil { - return - } - n, err = writeSample( - w, name, "_count", metric, "", 0, - float64(metric.Histogram.GetSampleCount()), - ) - default: - return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, - ) - } - written += n - if err != nil { - return - } - } - return -} - -// writeSample writes a single sample in text format to w, given the metric -// name, the metric proto message itself, optionally an additional label name -// with a float64 value (use empty string as label name if not required), and -// the value. The function returns the number of bytes written and any error -// encountered. -func writeSample( - w enhancedWriter, - name, suffix string, - metric *dto.Metric, - additionalLabelName string, additionalLabelValue float64, - value float64, -) (int, error) { - var written int - n, err := w.WriteString(name) - written += n - if err != nil { - return written, err - } - if suffix != "" { - n, err = w.WriteString(suffix) - written += n - if err != nil { - return written, err - } - } - n, err = writeLabelPairs( - w, metric.Label, additionalLabelName, additionalLabelValue, - ) - written += n - if err != nil { - return written, err - } - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeFloat(w, value) - written += n - if err != nil { - return written, err - } - if metric.TimestampMs != nil { - err = w.WriteByte(' ') - written++ - if err != nil { - return written, err - } - n, err = writeInt(w, *metric.TimestampMs) - written += n - if err != nil { - return written, err - } - } - err = w.WriteByte('\n') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeLabelPairs converts a slice of LabelPair proto messages plus the -// explicitly given additional label pair into text formatted as required by the -// text format and writes it to 'w'. An empty slice in combination with an empty -// string 'additionalLabelName' results in nothing being written. Otherwise, the -// label pairs are written, escaped as required by the text format, and enclosed -// in '{...}'. The function returns the number of bytes written and any error -// encountered. -func writeLabelPairs( - w enhancedWriter, - in []*dto.LabelPair, - additionalLabelName string, additionalLabelValue float64, -) (int, error) { - if len(in) == 0 && additionalLabelName == "" { - return 0, nil - } - var ( - written int - separator byte = '{' - ) - for _, lp := range in { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(lp.GetName()) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeEscapedString(w, lp.GetValue(), true) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - separator = ',' - } - if additionalLabelName != "" { - err := w.WriteByte(separator) - written++ - if err != nil { - return written, err - } - n, err := w.WriteString(additionalLabelName) - written += n - if err != nil { - return written, err - } - n, err = w.WriteString(`="`) - written += n - if err != nil { - return written, err - } - n, err = writeFloat(w, additionalLabelValue) - written += n - if err != nil { - return written, err - } - err = w.WriteByte('"') - written++ - if err != nil { - return written, err - } - } - err := w.WriteByte('}') - written++ - if err != nil { - return written, err - } - return written, nil -} - -// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if -// includeDoubleQuote is true - '"' by '\"'. -var ( - escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) - quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) -) - -func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { - if includeDoubleQuote { - return quotedEscaper.WriteString(w, v) - } - return escaper.WriteString(w, v) -} - -// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes -// a few common cases for increased efficiency. For non-hardcoded cases, it uses -// strconv.AppendFloat to avoid allocations, similar to writeInt. -func writeFloat(w enhancedWriter, f float64) (int, error) { - switch { - case f == 1: - return 1, w.WriteByte('1') - case f == 0: - return 1, w.WriteByte('0') - case f == -1: - return w.WriteString("-1") - case math.IsNaN(f): - return w.WriteString("NaN") - case math.IsInf(f, +1): - return w.WriteString("+Inf") - case math.IsInf(f, -1): - return w.WriteString("-Inf") - default: - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err - } -} - -// writeInt is equivalent to fmt.Fprint with an int64 argument but uses -// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid -// allocations. -func writeInt(w enhancedWriter, i int64) (int, error) { - bp := numBufPool.Get().(*[]byte) - *bp = strconv.AppendInt((*bp)[:0], i, 10) - written, err := w.Write(*bp) - numBufPool.Put(bp) - return written, err -} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go deleted file mode 100644 index 342e594..0000000 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ /dev/null @@ -1,764 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package expfmt - -import ( - "bufio" - "bytes" - "fmt" - "io" - "math" - "strconv" - "strings" - - dto "github.com/prometheus/client_model/go" - - "github.com/golang/protobuf/proto" - "github.com/prometheus/common/model" -) - -// A stateFn is a function that represents a state in a state machine. By -// executing it, the state is progressed to the next state. The stateFn returns -// another stateFn, which represents the new state. The end state is represented -// by nil. -type stateFn func() stateFn - -// ParseError signals errors while parsing the simple and flat text-based -// exchange format. -type ParseError struct { - Line int - Msg string -} - -// Error implements the error interface. -func (e ParseError) Error() string { - return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) -} - -// TextParser is used to parse the simple and flat text-based exchange format. Its -// zero value is ready to use. -type TextParser struct { - metricFamiliesByName map[string]*dto.MetricFamily - buf *bufio.Reader // Where the parsed input is read through. - err error // Most recent error. - lineCount int // Tracks the line count for error messages. - currentByte byte // The most recent byte read. - currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. - currentMF *dto.MetricFamily - currentMetric *dto.Metric - currentLabelPair *dto.LabelPair - - // The remaining member variables are only used for summaries/histograms. - currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' - // Summary specific. - summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentQuantile float64 - // Histogram specific. - histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. - currentBucket float64 - // These tell us if the currently processed line ends on '_count' or - // '_sum' respectively and belong to a summary/histogram, representing the sample - // count and sum of that summary/histogram. - currentIsSummaryCount, currentIsSummarySum bool - currentIsHistogramCount, currentIsHistogramSum bool -} - -// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange -// format and creates MetricFamily proto messages. It returns the MetricFamily -// proto messages in a map where the metric names are the keys, along with any -// error encountered. -// -// If the input contains duplicate metrics (i.e. lines with the same metric name -// and exactly the same label set), the resulting MetricFamily will contain -// duplicate Metric proto messages. Similar is true for duplicate label -// names. Checks for duplicates have to be performed separately, if required. -// Also note that neither the metrics within each MetricFamily are sorted nor -// the label pairs within each Metric. Sorting is not required for the most -// frequent use of this method, which is sample ingestion in the Prometheus -// server. However, for presentation purposes, you might want to sort the -// metrics, and in some cases, you must sort the labels, e.g. for consumption by -// the metric family injection hook of the Prometheus registry. -// -// Summaries and histograms are rather special beasts. You would probably not -// use them in the simple text format anyway. This method can deal with -// summaries and histograms if they are presented in exactly the way the -// text.Create function creates them. -// -// This method must not be called concurrently. If you want to parse different -// input concurrently, instantiate a separate Parser for each goroutine. -func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { - p.reset(in) - for nextState := p.startOfLine; nextState != nil; nextState = nextState() { - // Magic happens here... - } - // Get rid of empty metric families. - for k, mf := range p.metricFamiliesByName { - if len(mf.GetMetric()) == 0 { - delete(p.metricFamiliesByName, k) - } - } - // If p.err is io.EOF now, we have run into a premature end of the input - // stream. Turn this error into something nicer and more - // meaningful. (io.EOF is often used as a signal for the legitimate end - // of an input stream.) - if p.err == io.EOF { - p.parseError("unexpected end of input stream") - } - return p.metricFamiliesByName, p.err -} - -func (p *TextParser) reset(in io.Reader) { - p.metricFamiliesByName = map[string]*dto.MetricFamily{} - if p.buf == nil { - p.buf = bufio.NewReader(in) - } else { - p.buf.Reset(in) - } - p.err = nil - p.lineCount = 0 - if p.summaries == nil || len(p.summaries) > 0 { - p.summaries = map[uint64]*dto.Metric{} - } - if p.histograms == nil || len(p.histograms) > 0 { - p.histograms = map[uint64]*dto.Metric{} - } - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() -} - -// startOfLine represents the state where the next byte read from p.buf is the -// start of a line (or whitespace leading up to it). -func (p *TextParser) startOfLine() stateFn { - p.lineCount++ - if p.skipBlankTab(); p.err != nil { - // End of input reached. This is the only case where - // that is not an error but a signal that we are done. - p.err = nil - return nil - } - switch p.currentByte { - case '#': - return p.startComment - case '\n': - return p.startOfLine // Empty line, start the next one. - } - return p.readingMetricName -} - -// startComment represents the state where the next byte read from p.buf is the -// start of a comment (or whitespace leading up to it). -func (p *TextParser) startComment() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - return p.startOfLine - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - // If we have hit the end of line already, there is nothing left - // to do. This is not considered a syntax error. - if p.currentByte == '\n' { - return p.startOfLine - } - keyword := p.currentToken.String() - if keyword != "HELP" && keyword != "TYPE" { - // Generic comment, ignore by fast forwarding to end of line. - for p.currentByte != '\n' { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return nil // Unexpected end of input. - } - } - return p.startOfLine - } - // There is something. Next has to be a metric name. - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenAsMetricName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - if !isBlankOrTab(p.currentByte) { - p.parseError("invalid metric name in comment") - return nil - } - p.setOrCreateCurrentMF() - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '\n' { - // At the end of the line already. - // Again, this is not considered a syntax error. - return p.startOfLine - } - switch keyword { - case "HELP": - return p.readingHelp - case "TYPE": - return p.readingType - } - panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) -} - -// readingMetricName represents the state where the last byte read (now in -// p.currentByte) is the first byte of a metric name. -func (p *TextParser) readingMetricName() stateFn { - if p.readTokenAsMetricName(); p.err != nil { - return nil - } - if p.currentToken.Len() == 0 { - p.parseError("invalid metric name") - return nil - } - p.setOrCreateCurrentMF() - // Now is the time to fix the type if it hasn't happened yet. - if p.currentMF.Type == nil { - p.currentMF.Type = dto.MetricType_UNTYPED.Enum() - } - p.currentMetric = &dto.Metric{} - // Do not append the newly created currentMetric to - // currentMF.Metric right now. First wait if this is a summary, - // and the metric exists already, which we can only know after - // having read all the labels. - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingLabels -} - -// readingLabels represents the state where the last byte read (now in -// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the -// first byte of the value (otherwise). -func (p *TextParser) readingLabels() stateFn { - // Summaries/histograms are special. We have to reset the - // currentLabels map, currentQuantile and currentBucket before starting to - // read labels. - if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - p.currentLabels = map[string]string{} - p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() - p.currentQuantile = math.NaN() - p.currentBucket = math.NaN() - } - if p.currentByte != '{' { - return p.readingValue - } - return p.startLabelName -} - -// startLabelName represents the state where the next byte read from p.buf is -// the start of a label name (or whitespace leading up to it). -func (p *TextParser) startLabelName() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte == '}' { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - } - if p.readTokenAsLabelName(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() == 0 { - p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) - return nil - } - p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} - if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { - p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) - return nil - } - // Special summary/histogram treatment. Don't add 'quantile' and 'le' - // labels to 'real' labels. - if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && - !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil - } - return p.startLabelValue -} - -// startLabelValue represents the state where the next byte read from p.buf is -// the start of a (quoted) label value (or whitespace leading up to it). -func (p *TextParser) startLabelValue() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '"' { - p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) - return nil - } - if p.readTokenAsLabelValue(); p.err != nil { - return nil - } - if !model.LabelValue(p.currentToken.String()).IsValid() { - p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) - return nil - } - p.currentLabelPair.Value = proto.String(p.currentToken.String()) - // Special treatment of summaries: - // - Quantile labels are special, will result in dto.Quantile later. - // - Other labels have to be added to currentLabels for signature calculation. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if p.currentLabelPair.GetName() == model.QuantileLabel { - if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - // Similar special treatment of histograms. - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if p.currentLabelPair.GetName() == model.BucketLabel { - if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) - return nil - } - } else { - p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() - } - } - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - switch p.currentByte { - case ',': - return p.startLabelName - - case '}': - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - return p.readingValue - default: - p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) - return nil - } -} - -// readingValue represents the state where the last byte read (now in -// p.currentByte) is the first byte of the sample value (i.e. a float). -func (p *TextParser) readingValue() stateFn { - // When we are here, we have read all the labels, so for the - // special case of a summary/histogram, we can finally find out - // if the metric already exists. - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - signature := model.LabelsToSignature(p.currentLabels) - if summary := p.summaries[signature]; summary != nil { - p.currentMetric = summary - } else { - p.summaries[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - signature := model.LabelsToSignature(p.currentLabels) - if histogram := p.histograms[signature]; histogram != nil { - p.currentMetric = histogram - } else { - p.histograms[signature] = p.currentMetric - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - } else { - p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - value, err := parseFloat(p.currentToken.String()) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) - return nil - } - switch p.currentMF.GetType() { - case dto.MetricType_COUNTER: - p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} - case dto.MetricType_GAUGE: - p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} - case dto.MetricType_UNTYPED: - p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} - case dto.MetricType_SUMMARY: - // *sigh* - if p.currentMetric.Summary == nil { - p.currentMetric.Summary = &dto.Summary{} - } - switch { - case p.currentIsSummaryCount: - p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsSummarySum: - p.currentMetric.Summary.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentQuantile): - p.currentMetric.Summary.Quantile = append( - p.currentMetric.Summary.Quantile, - &dto.Quantile{ - Quantile: proto.Float64(p.currentQuantile), - Value: proto.Float64(value), - }, - ) - } - case dto.MetricType_HISTOGRAM: - // *sigh* - if p.currentMetric.Histogram == nil { - p.currentMetric.Histogram = &dto.Histogram{} - } - switch { - case p.currentIsHistogramCount: - p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) - case p.currentIsHistogramSum: - p.currentMetric.Histogram.SampleSum = proto.Float64(value) - case !math.IsNaN(p.currentBucket): - p.currentMetric.Histogram.Bucket = append( - p.currentMetric.Histogram.Bucket, - &dto.Bucket{ - UpperBound: proto.Float64(p.currentBucket), - CumulativeCount: proto.Uint64(uint64(value)), - }, - ) - } - default: - p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) - } - if p.currentByte == '\n' { - return p.startOfLine - } - return p.startTimestamp -} - -// startTimestamp represents the state where the next byte read from p.buf is -// the start of the timestamp (or whitespace leading up to it). -func (p *TextParser) startTimestamp() stateFn { - if p.skipBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.readTokenUntilWhitespace(); p.err != nil { - return nil // Unexpected end of input. - } - timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) - if err != nil { - // Create a more helpful error message. - p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) - return nil - } - p.currentMetric.TimestampMs = proto.Int64(timestamp) - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentToken.Len() > 0 { - p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) - return nil - } - return p.startOfLine -} - -// readingHelp represents the state where the last byte read (now in -// p.currentByte) is the first byte of the docstring after 'HELP'. -func (p *TextParser) readingHelp() stateFn { - if p.currentMF.Help != nil { - p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) - return nil - } - // Rest of line is the docstring. - if p.readTokenUntilNewline(true); p.err != nil { - return nil // Unexpected end of input. - } - p.currentMF.Help = proto.String(p.currentToken.String()) - return p.startOfLine -} - -// readingType represents the state where the last byte read (now in -// p.currentByte) is the first byte of the type hint after 'HELP'. -func (p *TextParser) readingType() stateFn { - if p.currentMF.Type != nil { - p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) - return nil - } - // Rest of line is the type. - if p.readTokenUntilNewline(false); p.err != nil { - return nil // Unexpected end of input. - } - metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] - if !ok { - p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) - return nil - } - p.currentMF.Type = dto.MetricType(metricType).Enum() - return p.startOfLine -} - -// parseError sets p.err to a ParseError at the current line with the given -// message. -func (p *TextParser) parseError(msg string) { - p.err = ParseError{ - Line: p.lineCount, - Msg: msg, - } -} - -// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte -// that is neither ' ' nor '\t'. That byte is left in p.currentByte. -func (p *TextParser) skipBlankTab() { - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { - return - } - } -} - -// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do -// anything if p.currentByte is neither ' ' nor '\t'. -func (p *TextParser) skipBlankTabIfCurrentBlankTab() { - if isBlankOrTab(p.currentByte) { - p.skipBlankTab() - } -} - -// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The -// first byte considered is the byte already read (now in p.currentByte). The -// first whitespace byte encountered is still copied into p.currentByte, but not -// into p.currentToken. -func (p *TextParser) readTokenUntilWhitespace() { - p.currentToken.Reset() - for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first -// byte considered is the byte already read (now in p.currentByte). The first -// newline byte encountered is still copied into p.currentByte, but not into -// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' translates into '\', and '\n' into a line-feed character. -// All other escape sequences are invalid and cause an error. -func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { - p.currentToken.Reset() - escaped := false - for p.err == nil { - if recognizeEscapeSequence && escaped { - switch p.currentByte { - case '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - } else { - switch p.currentByte { - case '\n': - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } - p.currentByte, p.err = p.buf.ReadByte() - } -} - -// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a metric name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsMetricName() { - p.currentToken.Reset() - if !isValidMetricNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelName copies a label name from p.buf into p.currentToken. -// The first byte considered is the byte already read (now in p.currentByte). -// The first byte not part of a label name is still copied into p.currentByte, -// but not into p.currentToken. -func (p *TextParser) readTokenAsLabelName() { - p.currentToken.Reset() - if !isValidLabelNameStart(p.currentByte) { - return - } - for { - p.currentToken.WriteByte(p.currentByte) - p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { - return - } - } -} - -// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. -// In contrast to the other 'readTokenAs...' functions, which start with the -// last read byte in p.currentByte, this method ignores p.currentByte and starts -// with reading a new byte from p.buf. The first byte not part of a label value -// is still copied into p.currentByte, but not into p.currentToken. -func (p *TextParser) readTokenAsLabelValue() { - p.currentToken.Reset() - escaped := false - for { - if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { - return - } - if escaped { - switch p.currentByte { - case '"', '\\': - p.currentToken.WriteByte(p.currentByte) - case 'n': - p.currentToken.WriteByte('\n') - default: - p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) - return - } - escaped = false - continue - } - switch p.currentByte { - case '"': - return - case '\n': - p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) - return - case '\\': - escaped = true - default: - p.currentToken.WriteByte(p.currentByte) - } - } -} - -func (p *TextParser) setOrCreateCurrentMF() { - p.currentIsSummaryCount = false - p.currentIsSummarySum = false - p.currentIsHistogramCount = false - p.currentIsHistogramSum = false - name := p.currentToken.String() - if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { - return - } - // Try out if this is a _sum or _count for a summary/histogram. - summaryName := summaryMetricName(name) - if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_SUMMARY { - if isCount(name) { - p.currentIsSummaryCount = true - } - if isSum(name) { - p.currentIsSummarySum = true - } - return - } - } - histogramName := histogramMetricName(name) - if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { - if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { - if isCount(name) { - p.currentIsHistogramCount = true - } - if isSum(name) { - p.currentIsHistogramSum = true - } - return - } - } - p.currentMF = &dto.MetricFamily{Name: proto.String(name)} - p.metricFamiliesByName[name] = p.currentMF -} - -func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' -} - -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') -} - -func isValidMetricNameStart(b byte) bool { - return isValidLabelNameStart(b) || b == ':' -} - -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' -} - -func isBlankOrTab(b byte) bool { - return b == ' ' || b == '\t' -} - -func isCount(name string) bool { - return len(name) > 6 && name[len(name)-6:] == "_count" -} - -func isSum(name string) bool { - return len(name) > 4 && name[len(name)-4:] == "_sum" -} - -func isBucket(name string) bool { - return len(name) > 7 && name[len(name)-7:] == "_bucket" -} - -func summaryMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - default: - return name - } -} - -func histogramMetricName(name string) string { - switch { - case isCount(name): - return name[:len(name)-6] - case isSum(name): - return name[:len(name)-4] - case isBucket(name): - return name[:len(name)-7] - default: - return name - } -} - -func parseFloat(s string) (float64, error) { - if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") - } - return strconv.ParseFloat(s, 64) -} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656..0000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 26e9228..0000000 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go deleted file mode 100644 index 35e739c..0000000 --- a/vendor/github.com/prometheus/common/model/alert.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "time" -) - -type AlertStatus string - -const ( - AlertFiring AlertStatus = "firing" - AlertResolved AlertStatus = "resolved" -) - -// Alert is a generic representation of an alert in the Prometheus eco-system. -type Alert struct { - // Label value pairs for purpose of aggregation, matching, and disposition - // dispatching. This must minimally include an "alertname" label. - Labels LabelSet `json:"labels"` - - // Extra key/value information which does not define alert identity. - Annotations LabelSet `json:"annotations"` - - // The known time range for this alert. Both ends are optional. - StartsAt time.Time `json:"startsAt,omitempty"` - EndsAt time.Time `json:"endsAt,omitempty"` - GeneratorURL string `json:"generatorURL"` -} - -// Name returns the name of the alert. It is equivalent to the "alertname" label. -func (a *Alert) Name() string { - return string(a.Labels[AlertNameLabel]) -} - -// Fingerprint returns a unique hash for the alert. It is equivalent to -// the fingerprint of the alert's label set. -func (a *Alert) Fingerprint() Fingerprint { - return a.Labels.Fingerprint() -} - -func (a *Alert) String() string { - s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) - if a.Resolved() { - return s + "[resolved]" - } - return s + "[active]" -} - -// Resolved returns true iff the activity interval ended in the past. -func (a *Alert) Resolved() bool { - return a.ResolvedAt(time.Now()) -} - -// ResolvedAt returns true off the activity interval ended before -// the given timestamp. -func (a *Alert) ResolvedAt(ts time.Time) bool { - if a.EndsAt.IsZero() { - return false - } - return !a.EndsAt.After(ts) -} - -// Status returns the status of the alert. -func (a *Alert) Status() AlertStatus { - if a.Resolved() { - return AlertResolved - } - return AlertFiring -} - -// Validate checks whether the alert data is inconsistent. -func (a *Alert) Validate() error { - if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if err := a.Labels.Validate(); err != nil { - return fmt.Errorf("invalid label set: %s", err) - } - if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") - } - if err := a.Annotations.Validate(); err != nil { - return fmt.Errorf("invalid annotations: %s", err) - } - return nil -} - -// Alert is a list of alerts that can be sorted in chronological order. -type Alerts []*Alert - -func (as Alerts) Len() int { return len(as) } -func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } - -func (as Alerts) Less(i, j int) bool { - if as[i].StartsAt.Before(as[j].StartsAt) { - return true - } - if as[i].EndsAt.Before(as[j].EndsAt) { - return true - } - return as[i].Fingerprint() < as[j].Fingerprint() -} - -// HasFiring returns true iff one of the alerts is not resolved. -func (as Alerts) HasFiring() bool { - for _, a := range as { - if !a.Resolved() { - return true - } - } - return false -} - -// Status returns StatusFiring iff at least one of the alerts is firing. -func (as Alerts) Status() AlertStatus { - if as.HasFiring() { - return AlertFiring - } - return AlertResolved -} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go deleted file mode 100644 index fc4de41..0000000 --- a/vendor/github.com/prometheus/common/model/fingerprinting.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "strconv" -) - -// Fingerprint provides a hash-capable representation of a Metric. -// For our purposes, FNV-1A 64-bit is used. -type Fingerprint uint64 - -// FingerprintFromString transforms a string representation into a Fingerprint. -func FingerprintFromString(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - return Fingerprint(num), err -} - -// ParseFingerprint parses the input string into a fingerprint. -func ParseFingerprint(s string) (Fingerprint, error) { - num, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, err - } - return Fingerprint(num), nil -} - -func (f Fingerprint) String() string { - return fmt.Sprintf("%016x", uint64(f)) -} - -// Fingerprints represents a collection of Fingerprint subject to a given -// natural sorting scheme. It implements sort.Interface. -type Fingerprints []Fingerprint - -// Len implements sort.Interface. -func (f Fingerprints) Len() int { - return len(f) -} - -// Less implements sort.Interface. -func (f Fingerprints) Less(i, j int) bool { - return f[i] < f[j] -} - -// Swap implements sort.Interface. -func (f Fingerprints) Swap(i, j int) { - f[i], f[j] = f[j], f[i] -} - -// FingerprintSet is a set of Fingerprints. -type FingerprintSet map[Fingerprint]struct{} - -// Equal returns true if both sets contain the same elements (and not more). -func (s FingerprintSet) Equal(o FingerprintSet) bool { - if len(s) != len(o) { - return false - } - - for k := range s { - if _, ok := o[k]; !ok { - return false - } - } - - return true -} - -// Intersection returns the elements contained in both sets. -func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { - myLength, otherLength := len(s), len(o) - if myLength == 0 || otherLength == 0 { - return FingerprintSet{} - } - - subSet := s - superSet := o - - if otherLength < myLength { - subSet = o - superSet = s - } - - out := FingerprintSet{} - - for k := range subSet { - if _, ok := superSet[k]; ok { - out[k] = struct{}{} - } - } - - return out -} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go deleted file mode 100644 index 038fc1c..0000000 --- a/vendor/github.com/prometheus/common/model/fnv.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -// Inline and byte-free variant of hash/fnv's fnv64a. - -const ( - offset64 = 14695981039346656037 - prime64 = 1099511628211 -) - -// hashNew initializies a new fnv64a hash value. -func hashNew() uint64 { - return offset64 -} - -// hashAdd adds a string to a fnv64a hash value, returning the updated hash. -func hashAdd(h uint64, s string) uint64 { - for i := 0; i < len(s); i++ { - h ^= uint64(s[i]) - h *= prime64 - } - return h -} - -// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. -func hashAddByte(h uint64, b byte) uint64 { - h ^= uint64(b) - h *= prime64 - return h -} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go deleted file mode 100644 index 41051a0..0000000 --- a/vendor/github.com/prometheus/common/model/labels.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "strings" - "unicode/utf8" -) - -const ( - // AlertNameLabel is the name of the label containing the an alert's name. - AlertNameLabel = "alertname" - - // ExportedLabelPrefix is the prefix to prepend to the label names present in - // exported metrics if a label of the same name is added by the server. - ExportedLabelPrefix = "exported_" - - // MetricNameLabel is the label name indicating the metric name of a - // timeseries. - MetricNameLabel = "__name__" - - // SchemeLabel is the name of the label that holds the scheme on which to - // scrape a target. - SchemeLabel = "__scheme__" - - // AddressLabel is the name of the label that holds the address of - // a scrape target. - AddressLabel = "__address__" - - // MetricsPathLabel is the name of the label that holds the path on which to - // scrape a target. - MetricsPathLabel = "__metrics_path__" - - // ReservedLabelPrefix is a prefix which is not legal in user-supplied - // label names. - ReservedLabelPrefix = "__" - - // MetaLabelPrefix is a prefix for labels that provide meta information. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. - MetaLabelPrefix = "__meta_" - - // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. - // Labels with this prefix are used for intermediate label processing and - // will not be attached to time series. This is reserved for use in - // Prometheus configuration files by users. - TmpLabelPrefix = "__tmp_" - - // ParamLabelPrefix is a prefix for labels that provide URL parameters - // used to scrape a target. - ParamLabelPrefix = "__param_" - - // JobLabel is the label name indicating the job from which a timeseries - // was scraped. - JobLabel = "job" - - // InstanceLabel is the label name used for the instance label. - InstanceLabel = "instance" - - // BucketLabel is used for the label that defines the upper bound of a - // bucket of a histogram ("le" -> "less or equal"). - BucketLabel = "le" - - // QuantileLabel is used for the label that defines the quantile in a - // summary. - QuantileLabel = "quantile" -) - -// LabelNameRE is a regular expression matching valid label names. Note that the -// IsValid method of LabelName performs the same check but faster than a match -// with this regular expression. -var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") - -// A LabelName is a key for a LabelSet or Metric. It has a value associated -// therewith. -type LabelName string - -// IsValid is true iff the label name matches the pattern of LabelNameRE. This -// method, however, does not use LabelNameRE for the check but a much faster -// hardcoded implementation. -func (ln LabelName) IsValid() bool { - if len(ln) == 0 { - return false - } - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (ln *LabelName) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - if !LabelName(s).IsValid() { - return fmt.Errorf("%q is not a valid label name", s) - } - *ln = LabelName(s) - return nil -} - -// LabelNames is a sortable LabelName slice. In implements sort.Interface. -type LabelNames []LabelName - -func (l LabelNames) Len() int { - return len(l) -} - -func (l LabelNames) Less(i, j int) bool { - return l[i] < l[j] -} - -func (l LabelNames) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -func (l LabelNames) String() string { - labelStrings := make([]string, 0, len(l)) - for _, label := range l { - labelStrings = append(labelStrings, string(label)) - } - return strings.Join(labelStrings, ", ") -} - -// A LabelValue is an associated value for a LabelName. -type LabelValue string - -// IsValid returns true iff the string is a valid UTF8. -func (lv LabelValue) IsValid() bool { - return utf8.ValidString(string(lv)) -} - -// LabelValues is a sortable LabelValue slice. It implements sort.Interface. -type LabelValues []LabelValue - -func (l LabelValues) Len() int { - return len(l) -} - -func (l LabelValues) Less(i, j int) bool { - return string(l[i]) < string(l[j]) -} - -func (l LabelValues) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// LabelPair pairs a name with a value. -type LabelPair struct { - Name LabelName - Value LabelValue -} - -// LabelPairs is a sortable slice of LabelPair pointers. It implements -// sort.Interface. -type LabelPairs []*LabelPair - -func (l LabelPairs) Len() int { - return len(l) -} - -func (l LabelPairs) Less(i, j int) bool { - switch { - case l[i].Name > l[j].Name: - return false - case l[i].Name < l[j].Name: - return true - case l[i].Value > l[j].Value: - return false - case l[i].Value < l[j].Value: - return true - default: - return false - } -} - -func (l LabelPairs) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go deleted file mode 100644 index 6eda08a..0000000 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "sort" - "strings" -) - -// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet -// may be fully-qualified down to the point where it may resolve to a single -// Metric in the data store or not. All operations that occur within the realm -// of a LabelSet can emit a vector of Metric entities to which the LabelSet may -// match. -type LabelSet map[LabelName]LabelValue - -// Validate checks whether all names and values in the label set -// are valid. -func (ls LabelSet) Validate() error { - for ln, lv := range ls { - if !ln.IsValid() { - return fmt.Errorf("invalid name %q", ln) - } - if !lv.IsValid() { - return fmt.Errorf("invalid value %q", lv) - } - } - return nil -} - -// Equal returns true iff both label sets have exactly the same key/value pairs. -func (ls LabelSet) Equal(o LabelSet) bool { - if len(ls) != len(o) { - return false - } - for ln, lv := range ls { - olv, ok := o[ln] - if !ok { - return false - } - if olv != lv { - return false - } - } - return true -} - -// Before compares the metrics, using the following criteria: -// -// If m has fewer labels than o, it is before o. If it has more, it is not. -// -// If the number of labels is the same, the superset of all label names is -// sorted alphanumerically. The first differing label pair found in that order -// determines the outcome: If the label does not exist at all in m, then m is -// before o, and vice versa. Otherwise the label value is compared -// alphanumerically. -// -// If m and o are equal, the method returns false. -func (ls LabelSet) Before(o LabelSet) bool { - if len(ls) < len(o) { - return true - } - if len(ls) > len(o) { - return false - } - - lns := make(LabelNames, 0, len(ls)+len(o)) - for ln := range ls { - lns = append(lns, ln) - } - for ln := range o { - lns = append(lns, ln) - } - // It's probably not worth it to de-dup lns. - sort.Sort(lns) - for _, ln := range lns { - mlv, ok := ls[ln] - if !ok { - return true - } - olv, ok := o[ln] - if !ok { - return false - } - if mlv < olv { - return true - } - if mlv > olv { - return false - } - } - return false -} - -// Clone returns a copy of the label set. -func (ls LabelSet) Clone() LabelSet { - lsn := make(LabelSet, len(ls)) - for ln, lv := range ls { - lsn[ln] = lv - } - return lsn -} - -// Merge is a helper function to non-destructively merge two label sets. -func (l LabelSet) Merge(other LabelSet) LabelSet { - result := make(LabelSet, len(l)) - - for k, v := range l { - result[k] = v - } - - for k, v := range other { - result[k] = v - } - - return result -} - -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - -// Fingerprint returns the LabelSet's fingerprint. -func (ls LabelSet) Fingerprint() Fingerprint { - return labelSetToFingerprint(ls) -} - -// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (ls LabelSet) FastFingerprint() Fingerprint { - return labelSetToFastFingerprint(ls) -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (l *LabelSet) UnmarshalJSON(b []byte) error { - var m map[LabelName]LabelValue - if err := json.Unmarshal(b, &m); err != nil { - return err - } - // encoding/json only unmarshals maps of the form map[string]T. It treats - // LabelName as a string and does not call its UnmarshalJSON method. - // Thus, we have to replicate the behavior here. - for ln := range m { - if !ln.IsValid() { - return fmt.Errorf("%q is not a valid label name", ln) - } - } - *l = LabelSet(m) - return nil -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go deleted file mode 100644 index 00804b7..0000000 --- a/vendor/github.com/prometheus/common/model/metric.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "regexp" - "sort" - "strings" -) - -var ( - // MetricNameRE is a regular expression matching valid metric - // names. Note that the IsValidMetricName function performs the same - // check but faster than a match with this regular expression. - MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) -) - -// A Metric is similar to a LabelSet, but the key difference is that a Metric is -// a singleton and refers to one and only one stream of samples. -type Metric LabelSet - -// Equal compares the metrics. -func (m Metric) Equal(o Metric) bool { - return LabelSet(m).Equal(LabelSet(o)) -} - -// Before compares the metrics' underlying label sets. -func (m Metric) Before(o Metric) bool { - return LabelSet(m).Before(LabelSet(o)) -} - -// Clone returns a copy of the Metric. -func (m Metric) Clone() Metric { - clone := make(Metric, len(m)) - for k, v := range m { - clone[k] = v - } - return clone -} - -func (m Metric) String() string { - metricName, hasName := m[MetricNameLabel] - numLabels := len(m) - 1 - if !hasName { - numLabels = len(m) - } - labelStrings := make([]string, 0, numLabels) - for label, value := range m { - if label != MetricNameLabel { - labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) - } - } - - switch numLabels { - case 0: - if hasName { - return string(metricName) - } - return "{}" - default: - sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) - } -} - -// Fingerprint returns a Metric's Fingerprint. -func (m Metric) Fingerprint() Fingerprint { - return LabelSet(m).Fingerprint() -} - -// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing -// algorithm, which is, however, more susceptible to hash collisions. -func (m Metric) FastFingerprint() Fingerprint { - return LabelSet(m).FastFingerprint() -} - -// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. -// This function, however, does not use MetricNameRE for the check but a much -// faster hardcoded implementation. -func IsValidMetricName(n LabelValue) bool { - if len(n) == 0 { - return false - } - for i, b := range n { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } - return true -} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go deleted file mode 100644 index a7b9691..0000000 --- a/vendor/github.com/prometheus/common/model/model.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package model contains common data structures that are shared across -// Prometheus components and libraries. -package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go deleted file mode 100644 index 8762b13..0000000 --- a/vendor/github.com/prometheus/common/model/signature.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2014 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" -) - -// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is -// used to separate label names, label values, and other strings from each other -// when calculating their combined hash value (aka signature aka fingerprint). -const SeparatorByte byte = 255 - -var ( - // cache the signature of an empty label set. - emptyLabelSignature = hashNew() -) - -// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a -// given label set. (Collisions are possible but unlikely if the number of label -// sets the function is applied to is small.) -func LabelsToSignature(labels map[string]string) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - labelNames := make([]string, 0, len(labels)) - for labelName := range labels { - labelNames = append(labelNames, labelName) - } - sort.Strings(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, labelName) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, labels[labelName]) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as -// parameter (rather than a label map) and returns a Fingerprint. -func labelSetToFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - labelNames := make(LabelNames, 0, len(ls)) - for labelName := range ls { - labelNames = append(labelNames, labelName) - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(ls[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return Fingerprint(sum) -} - -// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a -// faster and less allocation-heavy hash function, which is more susceptible to -// create hash collisions. Therefore, collision detection should be applied. -func labelSetToFastFingerprint(ls LabelSet) Fingerprint { - if len(ls) == 0 { - return Fingerprint(emptyLabelSignature) - } - - var result uint64 - for labelName, labelValue := range ls { - sum := hashNew() - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(labelValue)) - result ^= sum - } - return Fingerprint(result) -} - -// SignatureForLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and only includes the labels with the -// specified LabelNames into the signature calculation. The labels passed in -// will be sorted by this function. -func SignatureForLabels(m Metric, labels ...LabelName) uint64 { - if len(labels) == 0 { - return emptyLabelSignature - } - - sort.Sort(LabelNames(labels)) - - sum := hashNew() - for _, label := range labels { - sum = hashAdd(sum, string(label)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[label])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} - -// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as -// parameter (rather than a label map) and excludes the labels with any of the -// specified LabelNames from the signature calculation. -func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { - if len(m) == 0 { - return emptyLabelSignature - } - - labelNames := make(LabelNames, 0, len(m)) - for labelName := range m { - if _, exclude := labels[labelName]; !exclude { - labelNames = append(labelNames, labelName) - } - } - if len(labelNames) == 0 { - return emptyLabelSignature - } - sort.Sort(labelNames) - - sum := hashNew() - for _, labelName := range labelNames { - sum = hashAdd(sum, string(labelName)) - sum = hashAddByte(sum, SeparatorByte) - sum = hashAdd(sum, string(m[labelName])) - sum = hashAddByte(sum, SeparatorByte) - } - return sum -} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go deleted file mode 100644 index bb99889..0000000 --- a/vendor/github.com/prometheus/common/model/silence.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "regexp" - "time" -) - -// Matcher describes a matches the value of a given label. -type Matcher struct { - Name LabelName `json:"name"` - Value string `json:"value"` - IsRegex bool `json:"isRegex"` -} - -func (m *Matcher) UnmarshalJSON(b []byte) error { - type plain Matcher - if err := json.Unmarshal(b, (*plain)(m)); err != nil { - return err - } - - if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return err - } - } - return nil -} - -// Validate returns true iff all fields of the matcher have valid values. -func (m *Matcher) Validate() error { - if !m.Name.IsValid() { - return fmt.Errorf("invalid name %q", m.Name) - } - if m.IsRegex { - if _, err := regexp.Compile(m.Value); err != nil { - return fmt.Errorf("invalid regular expression %q", m.Value) - } - } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { - return fmt.Errorf("invalid value %q", m.Value) - } - return nil -} - -// Silence defines the representation of a silence definition in the Prometheus -// eco-system. -type Silence struct { - ID uint64 `json:"id,omitempty"` - - Matchers []*Matcher `json:"matchers"` - - StartsAt time.Time `json:"startsAt"` - EndsAt time.Time `json:"endsAt"` - - CreatedAt time.Time `json:"createdAt,omitempty"` - CreatedBy string `json:"createdBy"` - Comment string `json:"comment,omitempty"` -} - -// Validate returns true iff all fields of the silence have valid values. -func (s *Silence) Validate() error { - if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") - } - for _, m := range s.Matchers { - if err := m.Validate(); err != nil { - return fmt.Errorf("invalid matcher: %s", err) - } - } - if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") - } - if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") - } - if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") - } - if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") - } - if s.Comment == "" { - return fmt.Errorf("comment missing") - } - if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") - } - return nil -} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go deleted file mode 100644 index 7b0064f..0000000 --- a/vendor/github.com/prometheus/common/model/time.go +++ /dev/null @@ -1,270 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "fmt" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -const ( - // MinimumTick is the minimum supported time resolution. This has to be - // at least time.Second in order for the code below to work. - minimumTick = time.Millisecond - // second is the Time duration equivalent to one second. - second = int64(time.Second / minimumTick) - // The number of nanoseconds per minimum tick. - nanosPerTick = int64(minimumTick / time.Nanosecond) - - // Earliest is the earliest Time representable. Handy for - // initializing a high watermark. - Earliest = Time(math.MinInt64) - // Latest is the latest Time representable. Handy for initializing - // a low watermark. - Latest = Time(math.MaxInt64) -) - -// Time is the number of milliseconds since the epoch -// (1970-01-01 00:00 UTC) excluding leap seconds. -type Time int64 - -// Interval describes an interval between two timestamps. -type Interval struct { - Start, End Time -} - -// Now returns the current time as a Time. -func Now() Time { - return TimeFromUnixNano(time.Now().UnixNano()) -} - -// TimeFromUnix returns the Time equivalent to the Unix Time t -// provided in seconds. -func TimeFromUnix(t int64) Time { - return Time(t * second) -} - -// TimeFromUnixNano returns the Time equivalent to the Unix Time -// t provided in nanoseconds. -func TimeFromUnixNano(t int64) Time { - return Time(t / nanosPerTick) -} - -// Equal reports whether two Times represent the same instant. -func (t Time) Equal(o Time) bool { - return t == o -} - -// Before reports whether the Time t is before o. -func (t Time) Before(o Time) bool { - return t < o -} - -// After reports whether the Time t is after o. -func (t Time) After(o Time) bool { - return t > o -} - -// Add returns the Time t + d. -func (t Time) Add(d time.Duration) Time { - return t + Time(d/minimumTick) -} - -// Sub returns the Duration t - o. -func (t Time) Sub(o Time) time.Duration { - return time.Duration(t-o) * minimumTick -} - -// Time returns the time.Time representation of t. -func (t Time) Time() time.Time { - return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) -} - -// Unix returns t as a Unix time, the number of seconds elapsed -// since January 1, 1970 UTC. -func (t Time) Unix() int64 { - return int64(t) / second -} - -// UnixNano returns t as a Unix time, the number of nanoseconds elapsed -// since January 1, 1970 UTC. -func (t Time) UnixNano() int64 { - return int64(t) * nanosPerTick -} - -// The number of digits after the dot. -var dotPrecision = int(math.Log10(float64(second))) - -// String returns a string representation of the Time. -func (t Time) String() string { - return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) -} - -// MarshalJSON implements the json.Marshaler interface. -func (t Time) MarshalJSON() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalJSON implements the json.Unmarshaler interface. -func (t *Time) UnmarshalJSON(b []byte) error { - p := strings.Split(string(b), ".") - switch len(p) { - case 1: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - *t = Time(v * second) - - case 2: - v, err := strconv.ParseInt(string(p[0]), 10, 64) - if err != nil { - return err - } - v *= second - - prec := dotPrecision - len(p[1]) - if prec < 0 { - p[1] = p[1][:dotPrecision] - } else if prec > 0 { - p[1] = p[1] + strings.Repeat("0", prec) - } - - va, err := strconv.ParseInt(p[1], 10, 32) - if err != nil { - return err - } - - // If the value was something like -0.1 the negative is lost in the - // parsing because of the leading zero, this ensures that we capture it. - if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { - *t = Time(v+va) * -1 - } else { - *t = Time(v + va) - } - - default: - return fmt.Errorf("invalid time %q", string(b)) - } - return nil -} - -// Duration wraps time.Duration. It is used to parse the custom duration format -// from YAML. -// This type should not propagate beyond the scope of input/output processing. -type Duration time.Duration - -// Set implements pflag/flag.Value -func (d *Duration) Set(s string) error { - var err error - *d, err = ParseDuration(s) - return err -} - -// Type implements pflag.Value -func (d *Duration) Type() string { - return "duration" -} - -var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") - -// ParseDuration parses a string into a time.Duration, assuming that a year -// always has 365d, a week always has 7d, and a day always has 24h. -func ParseDuration(durationStr string) (Duration, error) { - matches := durationRE.FindStringSubmatch(durationStr) - if len(matches) != 3 { - return 0, fmt.Errorf("not a valid duration string: %q", durationStr) - } - var ( - n, _ = strconv.Atoi(matches[1]) - dur = time.Duration(n) * time.Millisecond - ) - switch unit := matches[2]; unit { - case "y": - dur *= 1000 * 60 * 60 * 24 * 365 - case "w": - dur *= 1000 * 60 * 60 * 24 * 7 - case "d": - dur *= 1000 * 60 * 60 * 24 - case "h": - dur *= 1000 * 60 * 60 - case "m": - dur *= 1000 * 60 - case "s": - dur *= 1000 - case "ms": - // Value already correct - default: - return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) - } - return Duration(dur), nil -} - -func (d Duration) String() string { - var ( - ms = int64(time.Duration(d) / time.Millisecond) - unit = "ms" - ) - if ms == 0 { - return "0s" - } - factors := map[string]int64{ - "y": 1000 * 60 * 60 * 24 * 365, - "w": 1000 * 60 * 60 * 24 * 7, - "d": 1000 * 60 * 60 * 24, - "h": 1000 * 60 * 60, - "m": 1000 * 60, - "s": 1000, - "ms": 1, - } - - switch int64(0) { - case ms % factors["y"]: - unit = "y" - case ms % factors["w"]: - unit = "w" - case ms % factors["d"]: - unit = "d" - case ms % factors["h"]: - unit = "h" - case ms % factors["m"]: - unit = "m" - case ms % factors["s"]: - unit = "s" - } - return fmt.Sprintf("%v%v", ms/factors[unit], unit) -} - -// MarshalYAML implements the yaml.Marshaler interface. -func (d Duration) MarshalYAML() (interface{}, error) { - return d.String(), nil -} - -// UnmarshalYAML implements the yaml.Unmarshaler interface. -func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { - var s string - if err := unmarshal(&s); err != nil { - return err - } - dur, err := ParseDuration(s) - if err != nil { - return err - } - *d = dur - return nil -} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go deleted file mode 100644 index c9d8fb1..0000000 --- a/vendor/github.com/prometheus/common/model/value.go +++ /dev/null @@ -1,416 +0,0 @@ -// Copyright 2013 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "fmt" - "math" - "sort" - "strconv" - "strings" -) - -var ( - // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a - // non-existing sample pair. It is a SamplePair with timestamp Earliest and - // value 0.0. Note that the natural zero value of SamplePair has a timestamp - // of 0, which is possible to appear in a real SamplePair and thus not - // suitable to signal a non-existing SamplePair. - ZeroSamplePair = SamplePair{Timestamp: Earliest} - - // ZeroSample is the pseudo zero-value of Sample used to signal a - // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, - // and metric nil. Note that the natural zero value of Sample has a timestamp - // of 0, which is possible to appear in a real Sample and thus not suitable - // to signal a non-existing Sample. - ZeroSample = Sample{Timestamp: Earliest} -) - -// A SampleValue is a representation of a value for a given sample at a given -// time. -type SampleValue float64 - -// MarshalJSON implements json.Marshaler. -func (v SampleValue) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (v *SampleValue) UnmarshalJSON(b []byte) error { - if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") - } - f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) - if err != nil { - return err - } - *v = SampleValue(f) - return nil -} - -// Equal returns true if the value of v and o is equal or if both are NaN. Note -// that v==o is false if both are NaN. If you want the conventional float -// behavior, use == to compare two SampleValues. -func (v SampleValue) Equal(o SampleValue) bool { - if v == o { - return true - } - return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) -} - -func (v SampleValue) String() string { - return strconv.FormatFloat(float64(v), 'f', -1, 64) -} - -// SamplePair pairs a SampleValue with a Timestamp. -type SamplePair struct { - Timestamp Time - Value SampleValue -} - -// MarshalJSON implements json.Marshaler. -func (s SamplePair) MarshalJSON() ([]byte, error) { - t, err := json.Marshal(s.Timestamp) - if err != nil { - return nil, err - } - v, err := json.Marshal(s.Value) - if err != nil { - return nil, err - } - return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *SamplePair) UnmarshalJSON(b []byte) error { - v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. -func (s *SamplePair) Equal(o *SamplePair) bool { - return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) -} - -func (s SamplePair) String() string { - return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) -} - -// Sample is a sample pair associated with a metric. -type Sample struct { - Metric Metric `json:"metric"` - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -// Equal compares first the metrics, then the timestamp, then the value. The -// semantics of value equality is defined by SampleValue.Equal. -func (s *Sample) Equal(o *Sample) bool { - if s == o { - return true - } - - if !s.Metric.Equal(o.Metric) { - return false - } - if !s.Timestamp.Equal(o.Timestamp) { - return false - } - - return s.Value.Equal(o.Value) -} - -func (s Sample) String() string { - return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }) -} - -// MarshalJSON implements json.Marshaler. -func (s Sample) MarshalJSON() ([]byte, error) { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - return json.Marshal(&v) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Sample) UnmarshalJSON(b []byte) error { - v := struct { - Metric Metric `json:"metric"` - Value SamplePair `json:"value"` - }{ - Metric: s.Metric, - Value: SamplePair{ - Timestamp: s.Timestamp, - Value: s.Value, - }, - } - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - s.Metric = v.Metric - s.Timestamp = v.Value.Timestamp - s.Value = v.Value.Value - - return nil -} - -// Samples is a sortable Sample slice. It implements sort.Interface. -type Samples []*Sample - -func (s Samples) Len() int { - return len(s) -} - -// Less compares first the metrics, then the timestamp. -func (s Samples) Less(i, j int) bool { - switch { - case s[i].Metric.Before(s[j].Metric): - return true - case s[j].Metric.Before(s[i].Metric): - return false - case s[i].Timestamp.Before(s[j].Timestamp): - return true - default: - return false - } -} - -func (s Samples) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Equal compares two sets of samples and returns true if they are equal. -func (s Samples) Equal(o Samples) bool { - if len(s) != len(o) { - return false - } - - for i, sample := range s { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// SampleStream is a stream of Values belonging to an attached COWMetric. -type SampleStream struct { - Metric Metric `json:"metric"` - Values []SamplePair `json:"values"` -} - -func (ss SampleStream) String() string { - vals := make([]string, len(ss.Values)) - for i, v := range ss.Values { - vals[i] = v.String() - } - return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) -} - -// Value is a generic interface for values resulting from a query evaluation. -type Value interface { - Type() ValueType - String() string -} - -func (Matrix) Type() ValueType { return ValMatrix } -func (Vector) Type() ValueType { return ValVector } -func (*Scalar) Type() ValueType { return ValScalar } -func (*String) Type() ValueType { return ValString } - -type ValueType int - -const ( - ValNone ValueType = iota - ValScalar - ValVector - ValMatrix - ValString -) - -// MarshalJSON implements json.Marshaler. -func (et ValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(et.String()) -} - -func (et *ValueType) UnmarshalJSON(b []byte) error { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - switch s { - case "": - *et = ValNone - case "scalar": - *et = ValScalar - case "vector": - *et = ValVector - case "matrix": - *et = ValMatrix - case "string": - *et = ValString - default: - return fmt.Errorf("unknown value type %q", s) - } - return nil -} - -func (e ValueType) String() string { - switch e { - case ValNone: - return "" - case ValScalar: - return "scalar" - case ValVector: - return "vector" - case ValMatrix: - return "matrix" - case ValString: - return "string" - } - panic("ValueType.String: unhandled value type") -} - -// Scalar is a scalar value evaluated at the set timestamp. -type Scalar struct { - Value SampleValue `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s Scalar) String() string { - return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) -} - -// MarshalJSON implements json.Marshaler. -func (s Scalar) MarshalJSON() ([]byte, error) { - v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) - return json.Marshal([...]interface{}{s.Timestamp, string(v)}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *Scalar) UnmarshalJSON(b []byte) error { - var f string - v := [...]interface{}{&s.Timestamp, &f} - - if err := json.Unmarshal(b, &v); err != nil { - return err - } - - value, err := strconv.ParseFloat(f, 64) - if err != nil { - return fmt.Errorf("error parsing sample value: %s", err) - } - s.Value = SampleValue(value) - return nil -} - -// String is a string value evaluated at the set timestamp. -type String struct { - Value string `json:"value"` - Timestamp Time `json:"timestamp"` -} - -func (s *String) String() string { - return s.Value -} - -// MarshalJSON implements json.Marshaler. -func (s String) MarshalJSON() ([]byte, error) { - return json.Marshal([]interface{}{s.Timestamp, s.Value}) -} - -// UnmarshalJSON implements json.Unmarshaler. -func (s *String) UnmarshalJSON(b []byte) error { - v := [...]interface{}{&s.Timestamp, &s.Value} - return json.Unmarshal(b, &v) -} - -// Vector is basically only an alias for Samples, but the -// contract is that in a Vector, all Samples have the same timestamp. -type Vector []*Sample - -func (vec Vector) String() string { - entries := make([]string, len(vec)) - for i, s := range vec { - entries[i] = s.String() - } - return strings.Join(entries, "\n") -} - -func (vec Vector) Len() int { return len(vec) } -func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } - -// Less compares first the metrics, then the timestamp. -func (vec Vector) Less(i, j int) bool { - switch { - case vec[i].Metric.Before(vec[j].Metric): - return true - case vec[j].Metric.Before(vec[i].Metric): - return false - case vec[i].Timestamp.Before(vec[j].Timestamp): - return true - default: - return false - } -} - -// Equal compares two sets of samples and returns true if they are equal. -func (vec Vector) Equal(o Vector) bool { - if len(vec) != len(o) { - return false - } - - for i, sample := range vec { - if !sample.Equal(o[i]) { - return false - } - } - return true -} - -// Matrix is a list of time series. -type Matrix []*SampleStream - -func (m Matrix) Len() int { return len(m) } -func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } -func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } - -func (mat Matrix) String() string { - matCp := make(Matrix, len(mat)) - copy(matCp, mat) - sort.Sort(matCp) - - strs := make([]string, len(matCp)) - - for i, ss := range matCp { - strs[i] = ss.String() - } - - return strings.Join(strs, "\n") -} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore deleted file mode 100644 index 25e3659..0000000 --- a/vendor/github.com/prometheus/procfs/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml deleted file mode 100644 index 7c4ce1f..0000000 --- a/vendor/github.com/prometheus/procfs/.golangci.yml +++ /dev/null @@ -1,4 +0,0 @@ -linters: - enable: - - staticcheck - - govet diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md deleted file mode 100644 index 943de76..0000000 --- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md +++ /dev/null @@ -1,121 +0,0 @@ -# Contributing - -Prometheus uses GitHub to manage reviews of pull requests. - -* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute) - -* If you have a trivial fix or improvement, go ahead and create a pull request, - addressing (with `@...`) a suitable maintainer of this repository (see - [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. - -* If you plan to do something more involved, first discuss your ideas - on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). - This will avoid unnecessary work and surely give you and us a good deal - of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on. - -* Relevant coding style guidelines are the [Go Code Review - Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) - and the _Formatting and style_ section of Peter Bourgon's [Go: Best - Practices for Production - Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style). - -* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works) - -## Steps to Contribute - -Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue. - -Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community). - -For quickly compiling and testing your changes do: -``` -make test # Make sure all the tests pass before you commit and push :) -``` - -We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action. - -## Pull Request Checklist - -* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes. - -* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests). - -* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)). - -* Add tests relevant to the fixed bug or new feature. - -## Dependency management - -The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed. - -All dependencies are vendored in the `vendor/` directory. - -To add or update a new dependency, use the `go get` command: - -```bash -# Pick the latest tagged release. -go get example.com/some/module/pkg - -# Pick a specific version. -go get example.com/some/module/pkg@vX.Y.Z -``` - -Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory: - - -```bash -# The GO111MODULE variable can be omitted when the code isn't located in GOPATH. -GO111MODULE=on go mod tidy - -GO111MODULE=on go mod vendor -``` - -You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request. - - -## API Implementation Guidelines - -### Naming and Documentation - -Public functions and structs should normally be named according to the file(s) being read and parsed. For example, -the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`. In addition, the godoc for each public function -should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s). - -### Reading vs. Parsing - -Most functionality in this library consists of reading files and then parsing the text into structured data. In most -cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and -a private `parseThing(r Reader)` function. This provides a logical separation and allows parsing to be tested -directly without the need to read from the filesystem. Using a `Reader` argument is preferred over other data types -such as `string` or `*File` because it provides the most flexibility regarding the data source. When a set of files -in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead. - -### /proc and /sys filesystem I/O - -The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O. -Many of the files are changing continuously and the data being read can in some cases change between subsequent -reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls -to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the -full file in a single operation using an internal utility function called `util.ReadFileNoStat`. -This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of -the file. - -Note that parsing the file's contents can still be performed one line at a time. This is done by first reading -the full file, and then using a scanner on the `[]byte` or `string` containing the data. - -``` - data, err := util.ReadFileNoStat("/proc/cpuinfo") - if err != nil { - return err - } - reader := bytes.NewReader(data) - scanner := bufio.NewScanner(reader) -``` - -The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files -can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does -not bother to check the size of the file before reading. -``` - data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity") -``` - diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE deleted file mode 100644 index 261eeb9..0000000 --- a/vendor/github.com/prometheus/procfs/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md deleted file mode 100644 index 56ba67d..0000000 --- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md +++ /dev/null @@ -1,2 +0,0 @@ -* Johannes 'fish' Ziemke @discordianfish -* Paul Gier @pgier diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile deleted file mode 100644 index 616a0d2..0000000 --- a/vendor/github.com/prometheus/procfs/Makefile +++ /dev/null @@ -1,29 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include Makefile.common - -%/.unpacked: %.ttar - @echo ">> extracting fixtures" - ./ttar -C $(dir $*) -x -f $*.ttar - touch $@ - -update_fixtures: - rm -vf fixtures/.unpacked - ./ttar -c -f fixtures.ttar fixtures/ - -.PHONY: build -build: - -.PHONY: test -test: fixtures/.unpacked common-test diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common deleted file mode 100644 index d7aea1b..0000000 --- a/vendor/github.com/prometheus/procfs/Makefile.common +++ /dev/null @@ -1,275 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -PROMU_VERSION ?= 0.4.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -GOLANGCI_LINT := -GOLANGCI_LINT_OPTS ?= -GOLANGCI_LINT_VERSION ?= v1.16.0 -# golangci-lint only supports linux, darwin and windows platforms on i386/amd64. -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKERFILE_PATH ?= ./ -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license lint unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif - -.PHONY: common-test-short -common-test-short: - @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: - @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) - -.PHONY: common-format -common-format: - @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-lint -common-lint: $(GOLANGCI_LINT) -ifdef GOLANGCI_LINT - @echo ">> running golangci-lint" -ifdef GO111MODULE -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs) -else - $(GOLANGCI_LINT) run $(pkgs) -endif -endif - -# For backward-compatibility. -.PHONY: common-staticcheck -common-staticcheck: lint - -.PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE - @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) - @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - $(DOCKERFILE_PATH) - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" - -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef GOLANGCI_LINT -$(GOLANGCI_LINT): - mkdir -p $(FIRST_GOPATH)/bin - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ - | sed -e '/install -d/d' \ - | sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION) -endif - -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE deleted file mode 100644 index 53c5e9a..0000000 --- a/vendor/github.com/prometheus/procfs/NOTICE +++ /dev/null @@ -1,7 +0,0 @@ -procfs provides functions to retrieve system, kernel and process -metrics from the pseudo-filesystem proc. - -Copyright 2014-2015 The Prometheus Authors - -This product includes software developed at -SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md deleted file mode 100644 index 55d1e32..0000000 --- a/vendor/github.com/prometheus/procfs/README.md +++ /dev/null @@ -1,61 +0,0 @@ -# procfs - -This package provides functions to retrieve system, kernel, and process -metrics from the pseudo-filesystems /proc and /sys. - -*WARNING*: This package is a work in progress. Its API may still break in -backwards-incompatible ways without warnings. Use it at your own risk. - -[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) -[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) -[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) - -## Usage - -The procfs library is organized by packages based on whether the gathered data is coming from -/proc, /sys, or both. Each package contains an `FS` type which represents the path to either /proc, -/sys, or both. For example, cpu statistics are gathered from -`/proc/stat` and are available via the root procfs package. First, the proc filesystem mount -point is initialized, and then the stat information is read. - -```go -fs, err := procfs.NewFS("/proc") -stats, err := fs.Stat() -``` - -Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems. - -```go - fs, err := blockdevice.NewFS("/proc", "/sys") - stats, err := fs.ProcDiskstats() -``` - -## Package Organization - -The packages in this project are organized according to (1) whether the data comes from the `/proc` or -`/sys` filesystem and (2) the type of information being retrieved. For example, most process information -can be gathered from the functions in the root `procfs` package. Information about block devices such as disk drives -is available in the `blockdevices` sub-package. - -## Building and Testing - -The procfs library is intended to be built as part of another application, so there are no distributable binaries. -However, most of the API includes unit tests which can be run with `make test`. - -### Updating Test Fixtures - -The procfs library includes a set of test fixtures which include many example files from -the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file -which is extracted automatically during testing. To add/update the test fixtures, first -ensure the `fixtures` directory is up to date by removing the existing directory and then -extracting the ttar file using `make fixtures/.unpacked` or just `make test`. - -```bash -rm -rf fixtures -make test -``` - -Next, make the required changes to the extracted files in the `fixtures` directory. When -the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file -based on the updated `fixtures` directory. And finally, verify the changes using -`git diff fixtures.ttar`. diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go deleted file mode 100644 index 916c918..0000000 --- a/vendor/github.com/prometheus/procfs/arp.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "net" - "strings" -) - -// ARPEntry contains a single row of the columnar data represented in -// /proc/net/arp. -type ARPEntry struct { - // IP address - IPAddr net.IP - // MAC address - HWAddr net.HardwareAddr - // Name of the device - Device string -} - -// GatherARPEntries retrieves all the ARP entries, parse the relevant columns, -// and then return a slice of ARPEntry's. -func (fs FS) GatherARPEntries() ([]ARPEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/arp")) - if err != nil { - return nil, fmt.Errorf("error reading arp %s: %s", fs.proc.Path("net/arp"), err) - } - - return parseARPEntries(data) -} - -func parseARPEntries(data []byte) ([]ARPEntry, error) { - lines := strings.Split(string(data), "\n") - entries := make([]ARPEntry, 0) - var err error - const ( - expectedDataWidth = 6 - expectedHeaderWidth = 9 - ) - for _, line := range lines { - columns := strings.Fields(line) - width := len(columns) - - if width == expectedHeaderWidth || width == 0 { - continue - } else if width == expectedDataWidth { - entry, err := parseARPEntry(columns) - if err != nil { - return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %s", err) - } - entries = append(entries, entry) - } else { - return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth) - } - - } - - return entries, err -} - -func parseARPEntry(columns []string) (ARPEntry, error) { - ip := net.ParseIP(columns[0]) - mac := net.HardwareAddr(columns[3]) - - entry := ARPEntry{ - IPAddr: ip, - HWAddr: mac, - Device: columns[5], - } - - return entry, nil -} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go deleted file mode 100644 index 10bd067..0000000 --- a/vendor/github.com/prometheus/procfs/buddyinfo.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// A BuddyInfo is the details parsed from /proc/buddyinfo. -// The data is comprised of an array of free fragments of each size. -// The sizes are 2^n*PAGE_SIZE, where n is the array index. -type BuddyInfo struct { - Node string - Zone string - Sizes []float64 -} - -// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. -func (fs FS) BuddyInfo() ([]BuddyInfo, error) { - file, err := os.Open(fs.proc.Path("buddyinfo")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseBuddyInfo(file) -} - -func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { - var ( - buddyInfo = []BuddyInfo{} - scanner = bufio.NewScanner(r) - bucketCount = -1 - ) - - for scanner.Scan() { - var err error - line := scanner.Text() - parts := strings.Fields(line) - - if len(parts) < 4 { - return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") - } - - node := strings.TrimRight(parts[1], ",") - zone := strings.TrimRight(parts[3], ",") - arraySize := len(parts[4:]) - - if bucketCount == -1 { - bucketCount = arraySize - } else { - if bucketCount != arraySize { - return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) - } - } - - sizes := make([]float64, arraySize) - for i := 0; i < arraySize; i++ { - sizes[i], err = strconv.ParseFloat(parts[i+4], 64) - if err != nil { - return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) - } - } - - buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) - } - - return buddyInfo, scanner.Err() -} diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go deleted file mode 100644 index 2e02215..0000000 --- a/vendor/github.com/prometheus/procfs/cpuinfo.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// CPUInfo contains general information about a system CPU found in /proc/cpuinfo -type CPUInfo struct { - Processor uint - VendorID string - CPUFamily string - Model string - ModelName string - Stepping string - Microcode string - CPUMHz float64 - CacheSize string - PhysicalID string - Siblings uint - CoreID string - CPUCores uint - APICID string - InitialAPICID string - FPU string - FPUException string - CPUIDLevel uint - WP string - Flags []string - Bugs []string - BogoMips float64 - CLFlushSize uint - CacheAlignment uint - AddressSizes string - PowerManagement string -} - -// CPUInfo returns information about current system CPUs. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) CPUInfo() ([]CPUInfo, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo")) - if err != nil { - return nil, err - } - return parseCPUInfo(data) -} - -// parseCPUInfo parses data from /proc/cpuinfo -func parseCPUInfo(info []byte) ([]CPUInfo, error) { - cpuinfo := []CPUInfo{} - i := -1 - scanner := bufio.NewScanner(bytes.NewReader(info)) - for scanner.Scan() { - line := scanner.Text() - if strings.TrimSpace(line) == "" { - continue - } - field := strings.SplitN(line, ": ", 2) - switch strings.TrimSpace(field[0]) { - case "processor": - cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor - i++ - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Processor = uint(v) - case "vendor_id": - cpuinfo[i].VendorID = field[1] - case "cpu family": - cpuinfo[i].CPUFamily = field[1] - case "model": - cpuinfo[i].Model = field[1] - case "model name": - cpuinfo[i].ModelName = field[1] - case "stepping": - cpuinfo[i].Stepping = field[1] - case "microcode": - cpuinfo[i].Microcode = field[1] - case "cpu MHz": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].CPUMHz = v - case "cache size": - cpuinfo[i].CacheSize = field[1] - case "physical id": - cpuinfo[i].PhysicalID = field[1] - case "siblings": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].Siblings = uint(v) - case "core id": - cpuinfo[i].CoreID = field[1] - case "cpu cores": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUCores = uint(v) - case "apicid": - cpuinfo[i].APICID = field[1] - case "initial apicid": - cpuinfo[i].InitialAPICID = field[1] - case "fpu": - cpuinfo[i].FPU = field[1] - case "fpu_exception": - cpuinfo[i].FPUException = field[1] - case "cpuid level": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CPUIDLevel = uint(v) - case "wp": - cpuinfo[i].WP = field[1] - case "flags": - cpuinfo[i].Flags = strings.Fields(field[1]) - case "bugs": - cpuinfo[i].Bugs = strings.Fields(field[1]) - case "bogomips": - v, err := strconv.ParseFloat(field[1], 64) - if err != nil { - return nil, err - } - cpuinfo[i].BogoMips = v - case "clflush size": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CLFlushSize = uint(v) - case "cache_alignment": - v, err := strconv.ParseUint(field[1], 0, 32) - if err != nil { - return nil, err - } - cpuinfo[i].CacheAlignment = uint(v) - case "address sizes": - cpuinfo[i].AddressSizes = field[1] - case "power management": - cpuinfo[i].PowerManagement = field[1] - } - } - return cpuinfo, nil - -} diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go deleted file mode 100644 index 19d4041..0000000 --- a/vendor/github.com/prometheus/procfs/crypto.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Crypto holds info parsed from /proc/crypto. -type Crypto struct { - Alignmask *uint64 - Async bool - Blocksize *uint64 - Chunksize *uint64 - Ctxsize *uint64 - Digestsize *uint64 - Driver string - Geniv string - Internal string - Ivsize *uint64 - Maxauthsize *uint64 - MaxKeysize *uint64 - MinKeysize *uint64 - Module string - Name string - Priority *int64 - Refcnt *int64 - Seedsize *uint64 - Selftest string - Type string - Walksize *uint64 -} - -// Crypto parses an crypto-file (/proc/crypto) and returns a slice of -// structs containing the relevant info. More information available here: -// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html -func (fs FS) Crypto() ([]Crypto, error) { - data, err := ioutil.ReadFile(fs.proc.Path("crypto")) - if err != nil { - return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) - } - crypto, err := parseCrypto(data) - if err != nil { - return nil, fmt.Errorf("error parsing crypto %s: %s", fs.proc.Path("crypto"), err) - } - return crypto, nil -} - -func parseCrypto(cryptoData []byte) ([]Crypto, error) { - crypto := []Crypto{} - - cryptoBlocks := bytes.Split(cryptoData, []byte("\n\n")) - - for _, block := range cryptoBlocks { - var newCryptoElem Crypto - - lines := strings.Split(string(block), "\n") - for _, line := range lines { - if strings.TrimSpace(line) == "" || line[0] == ' ' { - continue - } - fields := strings.Split(line, ":") - key := strings.TrimSpace(fields[0]) - value := strings.TrimSpace(fields[1]) - vp := util.NewValueParser(value) - - switch strings.TrimSpace(key) { - case "async": - b, err := strconv.ParseBool(value) - if err == nil { - newCryptoElem.Async = b - } - case "blocksize": - newCryptoElem.Blocksize = vp.PUInt64() - case "chunksize": - newCryptoElem.Chunksize = vp.PUInt64() - case "digestsize": - newCryptoElem.Digestsize = vp.PUInt64() - case "driver": - newCryptoElem.Driver = value - case "geniv": - newCryptoElem.Geniv = value - case "internal": - newCryptoElem.Internal = value - case "ivsize": - newCryptoElem.Ivsize = vp.PUInt64() - case "maxauthsize": - newCryptoElem.Maxauthsize = vp.PUInt64() - case "max keysize": - newCryptoElem.MaxKeysize = vp.PUInt64() - case "min keysize": - newCryptoElem.MinKeysize = vp.PUInt64() - case "module": - newCryptoElem.Module = value - case "name": - newCryptoElem.Name = value - case "priority": - newCryptoElem.Priority = vp.PInt64() - case "refcnt": - newCryptoElem.Refcnt = vp.PInt64() - case "seedsize": - newCryptoElem.Seedsize = vp.PUInt64() - case "selftest": - newCryptoElem.Selftest = value - case "type": - newCryptoElem.Type = value - case "walksize": - newCryptoElem.Walksize = vp.PUInt64() - } - } - crypto = append(crypto, newCryptoElem) - } - return crypto, nil -} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go deleted file mode 100644 index e2acd6d..0000000 --- a/vendor/github.com/prometheus/procfs/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2014 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package procfs provides functions to retrieve system, kernel and process -// metrics from the pseudo-filesystem proc. -// -// Example: -// -// package main -// -// import ( -// "fmt" -// "log" -// -// "github.com/prometheus/procfs" -// ) -// -// func main() { -// p, err := procfs.Self() -// if err != nil { -// log.Fatalf("could not get process: %s", err) -// } -// -// stat, err := p.NewStat() -// if err != nil { -// log.Fatalf("could not get process stat: %s", err) -// } -// -// fmt.Printf("command: %s\n", stat.Comm) -// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) -// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) -// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) -// } -// -package procfs diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar deleted file mode 100644 index c50a18a..0000000 --- a/vendor/github.com/prometheus/procfs/fixtures.ttar +++ /dev/null @@ -1,5318 +0,0 @@ -# Archive created by ttar -c -f fixtures.ttar fixtures/ -Directory: fixtures -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cmdline -Lines: 1 -vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/comm -Lines: 1 -vim -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/cwd -SymlinkTo: /usr/bin -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/environ -Lines: 1 -PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/exe -SymlinkTo: /usr/bin/vim -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/10 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/fdinfo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/0 -Lines: 6 -pos: 0 -flags: 02004000 -mnt_id: 13 -inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000 -inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a -inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/1 -Lines: 4 -pos: 0 -flags: 02004002 -mnt_id: 13 -eventfd-count: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/10 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/2 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/fdinfo/3 -Lines: 3 -pos: 0 -flags: 02004002 -mnt_id: 9 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/io -Lines: 7 -rchar: 750339 -wchar: 818609 -syscr: 7405 -syscw: 5245 -read_bytes: 1024 -write_bytes: 2048 -cancelled_write_bytes: -1024 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 62898 62898 processes -Max open files 2048 4096 files -Max locked memory 65536 65536 bytes -Max address space 8589934592 unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 62898 62898 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/mountstats -Lines: 20 -device rootfs mounted on / with fstype rootfs -device sysfs mounted on /sys with fstype sysfs -device proc mounted on /proc with fstype proc -device /dev/sda1 mounted on / with fstype ext4 -device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 - opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none - age: 13968 - caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 - nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured - sec: flavor=1,pseudoflavor=1 - events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 - bytes: 1207640230 0 0 0 1210214218 0 295483 0 - RPC iostats version: 1.0 p/v: 100003/4 (nfs) - xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 - per-op statistics - NULL: 0 0 0 0 0 0 0 0 - READ: 1298 1298 0 207680 1210292152 6 79386 79407 - WRITE: 0 0 0 0 0 0 0 0 - ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/net/dev -Lines: 4 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed - lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26231/ns -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/mnt -SymlinkTo: mnt:[4026531840] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/ns/net -SymlinkTo: net:[4026531993] -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/root -SymlinkTo: / -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/schedstat -Lines: 1 -411605849 93680043 79 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/stat -Lines: 1 -26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26231/status -Lines: 53 - -Name: prometheus -Umask: 0022 -State: S (sleeping) -Tgid: 26231 -Ngid: 0 -Pid: 26231 -PPid: 1 -TracerPid: 0 -Uid: 0 0 0 0 -Gid: 0 0 0 0 -FDSize: 128 -Groups: -NStgid: 1 -NSpid: 1 -NSpgid: 1 -NSsid: 1 -VmPeak: 58472 kB -VmSize: 58440 kB -VmLck: 0 kB -VmPin: 0 kB -VmHWM: 8028 kB -VmRSS: 6716 kB -RssAnon: 2092 kB -RssFile: 4624 kB -RssShmem: 0 kB -VmData: 2580 kB -VmStk: 136 kB -VmExe: 948 kB -VmLib: 6816 kB -VmPTE: 128 kB -VmPMD: 12 kB -VmSwap: 660 kB -HugetlbPages: 0 kB -Threads: 1 -SigQ: 8/63965 -SigPnd: 0000000000000000 -ShdPnd: 0000000000000000 -SigBlk: 7be3c0fe28014a03 -SigIgn: 0000000000001000 -SigCgt: 00000001800004ec -CapInh: 0000000000000000 -CapPrm: 0000003fffffffff -CapEff: 0000003fffffffff -CapBnd: 0000003fffffffff -CapAmb: 0000000000000000 -Seccomp: 0 -Cpus_allowed: ff -Cpus_allowed_list: 0-7 -Mems_allowed: 00000000,00000001 -Mems_allowed_list: 0 -voluntary_ctxt_switches: 4742839 -nonvoluntary_ctxt_switches: 1727500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cmdline -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/comm -Lines: 1 -ata_sff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/cwd -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26232/fd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/0 -SymlinkTo: ../../symlinktargets/abc -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/1 -SymlinkTo: ../../symlinktargets/def -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/2 -SymlinkTo: ../../symlinktargets/ghi -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/3 -SymlinkTo: ../../symlinktargets/uvw -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/fd/4 -SymlinkTo: ../../symlinktargets/xyz -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/limits -Lines: 17 -Limit Soft Limit Hard Limit Units -Max cpu time unlimited unlimited seconds -Max file size unlimited unlimited bytes -Max data size unlimited unlimited bytes -Max stack size 8388608 unlimited bytes -Max core file size 0 unlimited bytes -Max resident set unlimited unlimited bytes -Max processes 29436 29436 processes -Max open files 1024 4096 files -Max locked memory 65536 65536 bytes -Max address space unlimited unlimited bytes -Max file locks unlimited unlimited locks -Max pending signals 29436 29436 signals -Max msgqueue size 819200 819200 bytes -Max nice priority 0 0 -Max realtime priority 0 0 -Max realtime timeout unlimited unlimited us -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/root -SymlinkTo: /does/not/exist -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26232/stat -Lines: 1 -33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/26233 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/cmdline -Lines: 1 -com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/26233/schedstat -Lines: 8 - ____________________________________ -< this is a malformed schedstat file > - ------------------------------------ - \ ^__^ - \ (oo)\_______ - (__)\ )\/\ - ||----w | - || || -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/584 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/584/stat -Lines: 2 -1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 -#!/bin/cat /proc/self/stat -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/buddyinfo -Lines: 3 -Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 -Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 -Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/cpuinfo -Lines: 216 -processor : 0 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.998 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 0 -initial apicid : 0 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 1 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.037 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 2 -initial apicid : 2 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 2 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.010 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 4 -initial apicid : 4 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 3 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.028 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 6 -initial apicid : 6 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 4 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 799.989 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 0 -cpu cores : 4 -apicid : 1 -initial apicid : 1 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 5 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.083 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 1 -cpu cores : 4 -apicid : 3 -initial apicid : 3 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 6 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.017 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 2 -cpu cores : 4 -apicid : 5 -initial apicid : 5 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -processor : 7 -vendor_id : GenuineIntel -cpu family : 6 -model : 142 -model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz -stepping : 10 -microcode : 0xb4 -cpu MHz : 800.030 -cache size : 8192 KB -physical id : 0 -siblings : 8 -core id : 3 -cpu cores : 4 -apicid : 7 -initial apicid : 7 -fpu : yes -fpu_exception : yes -cpuid level : 22 -wp : yes -flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d -bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs -bogomips : 4224.00 -clflush size : 64 -cache_alignment : 64 -address sizes : 39 bits physical, 48 bits virtual -power management: - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/crypto -Lines: 971 -name : ccm(aes) -driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni)) -module : ccm -priority : 300 -refcnt : 4 -selftest : passed -internal : no -type : aead -async : no -blocksize : 1 -ivsize : 16 -maxauthsize : 16 -geniv : - -name : cbcmac(aes) -driver : cbcmac(aes-aesni) -module : ccm -priority : 300 -refcnt : 7 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 16 - -name : ecdh -driver : ecdh-generic -module : ecdh_generic -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp - -name : ecb(arc4) -driver : ecb(arc4)-generic -module : arc4 -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 1 -max keysize : 256 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : arc4 -driver : arc4-generic -module : arc4 -priority : 0 -refcnt : 3 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 1 -max keysize : 256 - -name : crct10dif -driver : crct10dif-pclmul -module : crct10dif_pclmul -priority : 200 -refcnt : 2 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32 -driver : crc32-pclmul -module : crc32_pclmul -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : __ghash -driver : cryptd(__ghash-pclmulqdqni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : ghash -driver : ghash-clmulni -module : ghash_clmulni_intel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : ahash -async : yes -blocksize : 16 -digestsize : 16 - -name : __ghash -driver : __ghash-pclmulqdqni -module : ghash_clmulni_intel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : shash -blocksize : 16 -digestsize : 16 - -name : crc32c -driver : crc32c-intel -module : crc32c_intel -priority : 200 -refcnt : 5 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : cbc(aes) -driver : cbc(aes-aesni) -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr(aes-aesni) -module : kernel -priority : 300 -refcnt : 5 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : pkcs1pad(rsa,sha256) -driver : pkcs1pad(rsa-generic,sha256) -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : __xts(aes) -driver : cryptd(__xts-aes-aesni) -module : kernel -priority : 451 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : xts(aes) -driver : xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : cryptd(__ctr-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : ctr(aes) -driver : ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : cryptd(__cbc-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : cbc(aes) -driver : cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : cryptd(__ecb-aes-aesni) -module : kernel -priority : 450 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : ecb(aes) -driver : ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : yes -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __generic-gcm-aes-aesni -driver : cryptd(__driver-generic-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : gcm(aes) -driver : generic-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __generic-gcm-aes-aesni -driver : __driver-generic-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 12 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : cryptd(__driver-gcm-aes-aesni) -module : kernel -priority : 50 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : rfc4106(gcm(aes)) -driver : rfc4106-gcm-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : no -type : aead -async : yes -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __gcm-aes-aesni -driver : __driver-gcm-aes-aesni -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : yes -type : aead -async : no -blocksize : 1 -ivsize : 8 -maxauthsize : 16 -geniv : - -name : __xts(aes) -driver : __xts-aes-aesni -module : kernel -priority : 401 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 32 -max keysize : 64 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ctr(aes) -driver : __ctr-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 1 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __cbc(aes) -driver : __cbc-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 16 -chunksize : 16 -walksize : 16 - -name : __ecb(aes) -driver : __ecb-aes-aesni -module : kernel -priority : 400 -refcnt : 1 -selftest : passed -internal : yes -type : skcipher -async : no -blocksize : 16 -min keysize : 16 -max keysize : 32 -ivsize : 0 -chunksize : 16 -walksize : 16 - -name : __aes -driver : __aes-aesni -module : kernel -priority : 300 -refcnt : 1 -selftest : passed -internal : yes -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : aes -driver : aes-aesni -module : kernel -priority : 300 -refcnt : 8 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : hmac(sha1) -driver : hmac(sha1-generic) -module : kernel -priority : 100 -refcnt : 9 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : ghash -driver : ghash-generic -module : kernel -priority : 100 -refcnt : 3 -selftest : passed -internal : no -type : shash -blocksize : 16 -digestsize : 16 - -name : jitterentropy_rng -driver : jitterentropy_rng -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha256 -module : kernel -priority : 221 -refcnt : 2 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha512 -module : kernel -priority : 220 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha384 -module : kernel -priority : 219 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_hmac_sha1 -module : kernel -priority : 218 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha256 -module : kernel -priority : 217 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha512 -module : kernel -priority : 216 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha384 -module : kernel -priority : 215 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_sha1 -module : kernel -priority : 214 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes256 -module : kernel -priority : 213 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes192 -module : kernel -priority : 212 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_nopr_ctr_aes128 -module : kernel -priority : 211 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : hmac(sha256) -driver : hmac(sha256-generic) -module : kernel -priority : 100 -refcnt : 10 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : stdrng -driver : drbg_pr_hmac_sha256 -module : kernel -priority : 210 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha512 -module : kernel -priority : 209 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha384 -module : kernel -priority : 208 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_hmac_sha1 -module : kernel -priority : 207 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha256 -module : kernel -priority : 206 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha512 -module : kernel -priority : 205 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha384 -module : kernel -priority : 204 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_sha1 -module : kernel -priority : 203 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes256 -module : kernel -priority : 202 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes192 -module : kernel -priority : 201 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : stdrng -driver : drbg_pr_ctr_aes128 -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : rng -seedsize : 0 - -name : 842 -driver : 842-scomp -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : 842 -driver : 842-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo-rle -driver : lzo-rle-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo-rle -driver : lzo-rle-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : lzo -driver : lzo-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : lzo -driver : lzo-generic -module : kernel -priority : 0 -refcnt : 9 -selftest : passed -internal : no -type : compression - -name : crct10dif -driver : crct10dif-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 2 - -name : crc32c -driver : crc32c-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 4 - -name : zlib-deflate -driver : zlib-deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-scomp -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : scomp - -name : deflate -driver : deflate-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : aes -driver : aes-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -name : sha224 -driver : sha224-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 28 - -name : sha256 -driver : sha256-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 32 - -name : sha1 -driver : sha1-generic -module : kernel -priority : 100 -refcnt : 11 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 20 - -name : md5 -driver : md5-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 64 -digestsize : 16 - -name : ecb(cipher_null) -driver : ecb-cipher_null -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : skcipher -async : no -blocksize : 1 -min keysize : 0 -max keysize : 0 -ivsize : 0 -chunksize : 1 -walksize : 1 - -name : digest_null -driver : digest_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : shash -blocksize : 1 -digestsize : 0 - -name : compress_null -driver : compress_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : compression - -name : cipher_null -driver : cipher_null-generic -module : kernel -priority : 0 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 1 -min keysize : 0 -max keysize : 0 - -name : rsa -driver : rsa-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : akcipher - -name : dh -driver : dh-generic -module : kernel -priority : 100 -refcnt : 1 -selftest : passed -internal : no -type : kpp - -name : aes -driver : aes-asm -module : kernel -priority : 200 -refcnt : 1 -selftest : passed -internal : no -type : cipher -blocksize : 16 -min keysize : 16 -max keysize : 32 - -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/diskstats -Lines: 49 - 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0 - 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0 - 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0 - 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0 - 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0 - 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0 - 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0 - 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0 - 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0 - 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0 - 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0 - 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0 - 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0 - 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0 - 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0 - 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0 - 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0 - 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0 - 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0 - 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0 - 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0 - 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0 - 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0 - 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0 - 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804 - 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36 - 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32 - 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60 - 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428 - 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256 - 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84 - 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416 - 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104 - 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44 - 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632 - 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156 - 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24 - 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68 - 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80 - 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228 - 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720 - 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992 - 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0 - 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546 - 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16 - 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970 - 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130 - 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0 - 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/fs/xfs/stat -Lines: 23 -extent_alloc 92447 97589 92448 93751 -abt 0 0 0 0 -blk_map 1767055 188820 184891 92447 92448 2140766 0 -bmbt 0 0 0 0 -dir 185039 92447 92444 136422 -trans 706 944304 0 -ig 185045 58807 0 126238 0 33637 22 -log 2883 113448 9 17360 739 -push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 -xstrat 92447 0 -rw 107739 94045 -attr 4 0 0 0 -icluster 8677 7849 135802 -vnodes 92601 0 0 0 92444 92444 92444 0 -buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 -abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 -abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 -bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 -fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -qm 0 0 0 0 0 0 0 0 -xpc 399724544 92823103 86219234 -debug 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/mdstat -Lines: 56 -Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] - -md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S) - 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] - -md127 : active raid1 sdi2[0] sdj2[1] - 312319552 blocks [2/2] [UU] - -md0 : active raid1 sdi1[0] sdj1[1] - 248896 blocks [2/2] [UU] - -md4 : inactive raid1 sda3[0](F) sdb3[1](S) - 4883648 blocks [2/2] [UU] - -md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0] - 195310144 blocks [2/1] [U_] - [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S) - 195310144 blocks [2/2] [UU] - [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec - -md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F) - 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] - bitmap: 0/30 pages [0KB], 65536KB chunk - -md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S) - 523968 blocks super 1.2 [4/4] [UUUU] - resync=DELAYED - -md10 : active raid0 sda1[0] sdb1[1] - 314159265 blocks 64k chunks - -md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S) - 4190208 blocks super 1.2 [2/2] [UU] - resync=PENDING - -md12 : active raid0 sdc2[0] sdd2[1] - 3886394368 blocks super 1.2 512k chunks - -md126 : active raid0 sdb[1] sdc[0] - 1855870976 blocks super external:/md127/0 128k chunks - -md219 : inactive sdb[2](S) sdc[1](S) sda[0](S) - 7932 blocks super external:imsm - -md00 : active raid0 xvdb[0] - 4186624 blocks super 1.2 256k chunks - -md120 : active linear sda1[1] sdb1[0] - 2095104 blocks super 1.2 0k rounding - -md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0] - 322560 blocks super 1.2 512k chunks - -unused devices: -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/meminfo -Lines: 42 -MemTotal: 15666184 kB -MemFree: 440324 kB -Buffers: 1020128 kB -Cached: 12007640 kB -SwapCached: 0 kB -Active: 6761276 kB -Inactive: 6532708 kB -Active(anon): 267256 kB -Inactive(anon): 268 kB -Active(file): 6494020 kB -Inactive(file): 6532440 kB -Unevictable: 0 kB -Mlocked: 0 kB -SwapTotal: 0 kB -SwapFree: 0 kB -Dirty: 768 kB -Writeback: 0 kB -AnonPages: 266216 kB -Mapped: 44204 kB -Shmem: 1308 kB -Slab: 1807264 kB -SReclaimable: 1738124 kB -SUnreclaim: 69140 kB -KernelStack: 1616 kB -PageTables: 5288 kB -NFS_Unstable: 0 kB -Bounce: 0 kB -WritebackTmp: 0 kB -CommitLimit: 7833092 kB -Committed_AS: 530844 kB -VmallocTotal: 34359738367 kB -VmallocUsed: 36596 kB -VmallocChunk: 34359637840 kB -HardwareCorrupted: 0 kB -AnonHugePages: 12288 kB -HugePages_Total: 0 -HugePages_Free: 0 -HugePages_Rsvd: 0 -HugePages_Surp: 0 -Hugepagesize: 2048 kB -DirectMap4k: 91136 kB -DirectMap2M: 16039936 kB -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/arp -Lines: 2 -IP address HW type Flags HW address Mask Device -192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/dev -Lines: 6 -Inter-| Receive | Transmit - face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed -vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 -docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 - eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs -Lines: 21 -IP Virtual Server version 1.2.1 (size=4096) -Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn -TCP C0A80016:0CEA wlc - -> C0A85216:0CEA Tunnel 100 248 2 - -> C0A85318:0CEA Tunnel 100 248 2 - -> C0A85315:0CEA Tunnel 100 248 1 -TCP C0A80039:0CEA wlc - -> C0A85416:0CEA Tunnel 0 0 0 - -> C0A85215:0CEA Tunnel 100 1499 0 - -> C0A83215:0CEA Tunnel 100 1498 0 -TCP C0A80037:0CEA wlc - -> C0A8321A:0CEA Tunnel 0 0 0 - -> C0A83120:0CEA Tunnel 100 0 0 -TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh - -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 - -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 -FWM 10001000 wlc - -> C0A8321A:0CEA Route 0 0 1 - -> C0A83215:0CEA Route 0 0 2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/ip_vs_stats -Lines: 6 - Total Incoming Outgoing Incoming Outgoing - Conns Packets Packets Bytes Bytes - 16AA370 E33656E5 0 51D8C8883AB3 0 - - Conns/s Pkts/s Pkts/s Bytes/s Bytes/s - 4 1FB3C 0 1282A8F 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/net/rpc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfs -Lines: 5 -net 18628 0 18628 6 -rpc 4329785 0 4338291 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 -proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/rpc/nfsd -Lines: 11 -rc 0 6 18622 -fh 0 0 0 0 0 -io 157286400 0 -th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 -ra 32 0 0 0 0 0 0 0 0 0 0 0 -net 18628 0 18628 6 -rpc 18628 0 0 0 0 -proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 -proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 -proc4 2 2 10853 -proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat -Lines: 6 -sockets: used 1602 -TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22 -UDP: inuse 12 mem 62 -UDPLITE: inuse 0 -RAW: inuse 0 -FRAG: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/sockstat6 -Lines: 5 -TCP6: inuse 17 -UDP6: inuse 9 -UDPLITE6: inuse 0 -RAW6: inuse 1 -FRAG6: inuse 0 memory 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/softnet_stat -Lines: 1 -00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix -Lines: 6 -Num RefCount Protocol Flags Type St Inode Path -0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 5091797 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/unix_without_inode -Lines: 6 -Num RefCount Protocol Flags Type St Path -0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control -0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log -0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432 -0000000000000000: 00000003 00000000 00000000 0001 03 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/net/xfrm_stat -Lines: 28 -XfrmInError 1 -XfrmInBufferError 2 -XfrmInHdrError 4 -XfrmInNoStates 3 -XfrmInStateProtoError 40 -XfrmInStateModeError 100 -XfrmInStateSeqError 6000 -XfrmInStateExpired 4 -XfrmInStateMismatch 23451 -XfrmInStateInvalid 55555 -XfrmInTmplMismatch 51 -XfrmInNoPols 65432 -XfrmInPolBlock 100 -XfrmInPolError 10000 -XfrmOutError 1000000 -XfrmOutBundleGenError 43321 -XfrmOutBundleCheckError 555 -XfrmOutNoStates 869 -XfrmOutStateProtoError 4542 -XfrmOutStateModeError 4 -XfrmOutStateSeqError 543 -XfrmOutStateExpired 565 -XfrmOutPolBlock 43456 -XfrmOutPolDead 7656 -XfrmOutPolError 1454 -XfrmFwdHdrError 6654 -XfrmOutStateInvalid 28765 -XfrmAcquireError 24532 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/pressure -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/cpu -Lines: 1 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/io -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/pressure/memory -Lines: 2 -some avg10=0.10 avg60=2.00 avg300=3.85 total=15 -full avg10=0.20 avg60=3.00 avg300=4.95 total=25 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/schedstat -Lines: 6 -version 15 -timestamp 15819019232 -cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306 -domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0 -cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945 -domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/self -SymlinkTo: 26231 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/stat -Lines: 16 -cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 -cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 -cpu1 47869 23 16474 1110787 591 0 46 0 0 0 -cpu2 46504 36 15916 1112321 441 0 326 0 0 0 -cpu3 47054 102 15683 1113230 533 0 60 0 0 0 -cpu4 28413 25 10776 1140321 217 0 8 0 0 0 -cpu5 29271 101 11586 1136270 672 0 30 0 0 0 -cpu6 29152 36 10276 1139721 319 0 29 0 0 0 -cpu7 29098 268 10164 1139282 555 0 31 0 0 0 -intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -ctxt 38014093 -btime 1418183276 -processes 26442 -procs_running 2 -procs_blocked 1 -softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/symlinktargets -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/README -Lines: 2 -This directory contains some empty files that are the symlinks the files in the "fd" directory point to. -They are otherwise ignored by the tests -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/abc -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/def -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/ghi -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/uvw -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/symlinktargets/xyz -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/proc/sys/vm -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/admin_reserve_kbytes -Lines: 1 -8192 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/block_dump -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/compact_unevictable_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_background_ratio -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_bytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_expire_centisecs -Lines: 1 -3000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_ratio -Lines: 1 -20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirty_writeback_centisecs -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/dirtytime_expire_seconds -Lines: 1 -43200 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/drop_caches -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/extfrag_threshold -Lines: 1 -500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/hugetlb_shm_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/laptop_mode -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/legacy_va_layout -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/lowmem_reserve_ratio -Lines: 1 -256 256 32 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/max_map_count -Lines: 1 -65530 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_early_kill -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/memory_failure_recovery -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_free_kbytes -Lines: 1 -67584 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_slab_ratio -Lines: 1 -5 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/min_unmapped_ratio -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/mmap_min_addr -Lines: 1 -65536 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/nr_overcommit_hugepages -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_stat -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/numa_zonelist_order -Lines: 1 -Node -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_dump_tasks -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/oom_kill_allocating_task -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_kbytes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_memory -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/overcommit_ratio -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/page-cluster -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/panic_on_oom -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/percpu_pagelist_fraction -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/stat_interval -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/swappiness -Lines: 1 -60 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/user_reserve_kbytes -Lines: 1 -131072 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/vfs_cache_pressure -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_boost_factor -Lines: 1 -15000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/watermark_scale_factor -Lines: 1 -10 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/sys/vm/zone_reclaim_mode -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/proc/zoneinfo -Lines: 262 -Node 0, zone DMA - per-node stats - nr_inactive_anon 230981 - nr_active_anon 547580 - nr_inactive_file 316904 - nr_active_file 346282 - nr_unevictable 115467 - nr_slab_reclaimable 131220 - nr_slab_unreclaimable 47320 - nr_isolated_anon 0 - nr_isolated_file 0 - workingset_nodes 11627 - workingset_refault 466886 - workingset_activate 276925 - workingset_restore 84055 - workingset_nodereclaim 487 - nr_anon_pages 795576 - nr_mapped 215483 - nr_file_pages 761874 - nr_dirty 908 - nr_writeback 0 - nr_writeback_temp 0 - nr_shmem 224925 - nr_shmem_hugepages 0 - nr_shmem_pmdmapped 0 - nr_anon_transparent_hugepages 0 - nr_unstable 0 - nr_vmscan_write 12950 - nr_vmscan_immediate_reclaim 3033 - nr_dirtied 8007423 - nr_written 7752121 - nr_kernel_misc_reclaimable 0 - pages free 3952 - min 33 - low 41 - high 49 - spanned 4095 - present 3975 - managed 3956 - protection: (0, 2877, 7826, 7826, 7826) - nr_free_pages 3952 - nr_zone_inactive_anon 0 - nr_zone_active_anon 0 - nr_zone_inactive_file 0 - nr_zone_active_file 0 - nr_zone_unevictable 0 - nr_zone_write_pending 0 - nr_mlock 0 - nr_page_table_pages 0 - nr_kernel_stack 0 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 1 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 1 - numa_other 0 - pagesets - cpu: 0 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 1 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 2 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 3 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 4 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 5 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 6 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - cpu: 7 - count: 0 - high: 0 - batch: 1 - vm stats threshold: 8 - node_unreclaimable: 0 - start_pfn: 1 -Node 0, zone DMA32 - pages free 204252 - min 19510 - low 21059 - high 22608 - spanned 1044480 - present 759231 - managed 742806 - protection: (0, 0, 4949, 4949, 4949) - nr_free_pages 204252 - nr_zone_inactive_anon 118558 - nr_zone_active_anon 106598 - nr_zone_inactive_file 75475 - nr_zone_active_file 70293 - nr_zone_unevictable 66195 - nr_zone_write_pending 64 - nr_mlock 4 - nr_page_table_pages 1756 - nr_kernel_stack 2208 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 113952967 - numa_miss 0 - numa_foreign 0 - numa_interleave 0 - numa_local 113952967 - numa_other 0 - pagesets - cpu: 0 - count: 345 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 1 - count: 356 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 2 - count: 325 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 3 - count: 346 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 4 - count: 321 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 5 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 6 - count: 373 - high: 378 - batch: 63 - vm stats threshold: 48 - cpu: 7 - count: 339 - high: 378 - batch: 63 - vm stats threshold: 48 - node_unreclaimable: 0 - start_pfn: 4096 -Node 0, zone Normal - pages free 18553 - min 11176 - low 13842 - high 16508 - spanned 1308160 - present 1308160 - managed 1268711 - protection: (0, 0, 0, 0, 0) - nr_free_pages 18553 - nr_zone_inactive_anon 112423 - nr_zone_active_anon 440982 - nr_zone_inactive_file 241429 - nr_zone_active_file 275989 - nr_zone_unevictable 49272 - nr_zone_write_pending 844 - nr_mlock 154 - nr_page_table_pages 9750 - nr_kernel_stack 15136 - nr_bounce 0 - nr_zspages 0 - nr_free_cma 0 - numa_hit 162718019 - numa_miss 0 - numa_foreign 0 - numa_interleave 26812 - numa_local 162718019 - numa_other 0 - pagesets - cpu: 0 - count: 316 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 1 - count: 366 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 2 - count: 60 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 3 - count: 256 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 4 - count: 253 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 5 - count: 159 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 6 - count: 311 - high: 378 - batch: 63 - vm stats threshold: 56 - cpu: 7 - count: 264 - high: 378 - batch: 63 - vm stats threshold: 56 - node_unreclaimable: 0 - start_pfn: 1048576 -Node 0, zone Movable - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Node 0, zone Device - pages free 0 - min 0 - low 0 - high 0 - spanned 0 - present 0 - managed 0 - protection: (0, 0, 0, 0, 0) -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/dm-0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/dm-0/stat -Lines: 1 -6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/block/sda -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/block/sda/stat -Lines: 1 -9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/board_id -Lines: 1 -SM_1141000001000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver -Lines: 1 -2.31.5050 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/hca_type -Lines: 1 -MT4099 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data -Lines: 1 -2221223609 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets -Lines: 1 -87169372 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data -Lines: 1 -26509113295 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets -Lines: 1 -85734114 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait -Lines: 1 -3599 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data -Lines: 1 -2460436784 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets -Lines: 1 -89332064 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data -Lines: 1 -26540356890 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets -Lines: 1 -88622850 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait -Lines: 1 -3846 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state -Lines: 1 -5: LinkUp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate -Lines: 1 -40 Gb/sec (4X QDR) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state -Lines: 1 -4: ACTIVE -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/net/eth0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_assign_type -Lines: 1 -3 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/addr_len -Lines: 1 -6 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/address -Lines: 1 -01:01:01:01:01:01 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/broadcast -Lines: 1 -ff:ff:ff:ff:ff:ff -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_changes -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_down_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/carrier_up_count -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dev_id -Lines: 1 -0x20 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/device -SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/ -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/dormant -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/duplex -Lines: 1 -full -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/flags -Lines: 1 -0x1303 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifalias -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/ifindex -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/iflink -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/link_mode -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/mtu -Lines: 1 -1500 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/name_assign_type -Lines: 1 -2 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/netdev_group -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/operstate -Lines: 1 -up -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_port_name -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/phys_switch_id -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/speed -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/tx_queue_len -Lines: 1 -1000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/net/eth0/type -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/AC -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/power_supply/BAT0 -SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw -Lines: 1 -95000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us -Lines: 1 -999424 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name -Lines: 1 -short_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw -Lines: 1 -4090000000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us -Lines: 1 -2440 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/enabled -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj -Lines: 1 -240422366267 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/name -Lines: 1 -package-0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/powercap/intel-rapl:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw -Lines: 0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name -Lines: 1 -long_term -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us -Lines: 1 -976 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj -Lines: 1 -118821284256 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj -Lines: 1 -262143328850 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/name -Lines: 1 -core -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/cur_state -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/max_state -Lines: 1 -50 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device0/type -Lines: 1 -Processor -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/cooling_device1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/cur_state -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/max_state -Lines: 1 -27 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/cooling_device1/type -Lines: 1 -intel_powerclamp -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/temp -Lines: 1 -49925 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone0/type -Lines: 1 -bcm2835_thermal -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/class/thermal/thermal_zone1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/mode -Lines: 1 -enabled -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/passive -Lines: 1 -0 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/policy -Lines: 1 -step_wise -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/temp -Lines: 1 -44000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/class/thermal/thermal_zone1/type -Lines: 1 -acpitz -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device -SymlinkTo: ../../../ACPI0003:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup -Lines: 1 -enabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms -Lines: 1 -10598 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type -Lines: 1 -Mains -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent -Lines: 2 -POWER_SUPPLY_NAME=AC -POWER_SUPPLY_ONLINE=0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm -Lines: 1 -2369000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity -Lines: 1 -98 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level -Lines: 1 -Normal -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold -Lines: 1 -95 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device -SymlinkTo: ../../../PNP0C0A:00 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full -Lines: 1 -50060000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design -Lines: 1 -47520000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now -Lines: 1 -49450000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer -Lines: 1 -LGC -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name -Lines: 1 -LNV-45N1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async -Lines: 1 -disabled -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control -Lines: 1 -auto -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled -Lines: 1 -disabled -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status -Lines: 1 -unsupported -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now -Lines: 1 -4830000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number -Lines: 1 -38109 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status -Lines: 1 -Discharging -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem -SymlinkTo: ../../../../../../../../../class/power_supply -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology -Lines: 1 -Li-ion -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type -Lines: 1 -Battery -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent -Lines: 16 -POWER_SUPPLY_NAME=BAT0 -POWER_SUPPLY_STATUS=Discharging -POWER_SUPPLY_PRESENT=1 -POWER_SUPPLY_TECHNOLOGY=Li-ion -POWER_SUPPLY_CYCLE_COUNT=0 -POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000 -POWER_SUPPLY_VOLTAGE_NOW=11750000 -POWER_SUPPLY_POWER_NOW=5064000 -POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000 -POWER_SUPPLY_ENERGY_FULL=47390000 -POWER_SUPPLY_ENERGY_NOW=40730000 -POWER_SUPPLY_CAPACITY=85 -POWER_SUPPLY_CAPACITY_LEVEL=Normal -POWER_SUPPLY_MODEL_NAME=LNV-45N1 -POWER_SUPPLY_MANUFACTURER=LGC -POWER_SUPPLY_SERIAL_NUMBER=38109 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design -Lines: 1 -10800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now -Lines: 1 -12229000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class -Lines: 1 -0x020000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device -Lines: 1 -0x15d7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits -Lines: 1 -64 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override -Lines: 1 -(null) -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq -Lines: 1 -140 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias -Lines: 1 -pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node -Lines: 1 --1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource -Lines: 13 -0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -0x0000000000000000 0x0000000000000000 0x0000000000000000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision -Lines: 1 -0x21 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device -Lines: 1 -0x225a -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor -Lines: 1 -0x17aa -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent -Lines: 6 -DRIVER=e1000e -PCI_CLASS=20000 -PCI_ID=8086:15D7 -PCI_SUBSYS_ID=17AA:225A -PCI_SLOT_NAME=0000:00:1f.6 -MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor -Lines: 1 -0x8086 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/name -Lines: 1 -demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/0/pool -Lines: 1 -iscsi-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/rbd/1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/name -Lines: 1 -wrong -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/rbd/1/pool -Lines: 1 -wrong-images -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/clocksource/clocksource0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource -Lines: 1 -tsc hpet acpi_pm -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource -Lines: 1 -tsc -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq -SymlinkTo: ../cpufreq/policy0 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count -Lines: 1 -10084 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu0/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings -Lines: 1 -11 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list -Lines: 1 -0,4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq -Lines: 1 -1200195 -Mode: 400 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency -Lines: 1 -4294967295 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus -Lines: 1 -1 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors -Lines: 1 -performance powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver -Lines: 1 -intel_pstate -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor -Lines: 1 -powersave -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq -Lines: 1 -3300000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq -Lines: 1 -1200000 -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed -Lines: 1 - -Mode: 664 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count -Lines: 1 -523 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count -Lines: 1 -34818 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpu1/topology -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings -Lines: 1 -ff -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list -Lines: 1 -0-7 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings -Lines: 1 -22 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list -Lines: 1 -1,5 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0 -Mode: 775 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq -Lines: 1 -2400000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq -Lines: 1 -800000 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors -Lines: 1 -performance powersave -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq -Lines: 1 -1219917 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver -Lines: 1 -intel_pstate -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor -Lines: 1 -powersave -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq -Lines: 1 -2400000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq -Lines: 1 -800000 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed -Lines: 1 - -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0 -Mode: 777 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written -Lines: 1 -512 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats -Lines: 5 -Unused: 99% -Metadata: 0% -Average: 10473 -Sectors per Q: 64 -Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946] -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us -Lines: 1 -1305 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits -Lines: 1 -289 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio -Lines: 1 -100 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits -Lines: 1 -546 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes -Lines: 1 -808189952 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly -Lines: 1 -131072 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total -Lines: 1 -2147483648 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used -Lines: 1 -1867776 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes -Lines: 1 -933888 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes -Lines: 1 -1073741824 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used -Lines: 1 -32768 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes -Lines: 1 -8388608 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size -Lines: 1 -20971520 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label -Lines: 1 -fixture -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid -Lines: 1 -0abb23a9-579b-43e6-ad30-227ef47fcb9d -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes -Lines: 1 -644087808 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly -Lines: 1 -262144 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags -Lines: 1 -4 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes -Lines: 1 -114688 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes -Lines: 1 -429391872 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags -Lines: 1 -2 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes -Lines: 1 -16777216 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned -Lines: 1 -0 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22 -SymlinkTo: ../../../../devices/virtual/block/loop22 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23 -SymlinkTo: ../../../../devices/virtual/block/loop23 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24 -SymlinkTo: ../../../../devices/virtual/block/loop24 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25 -SymlinkTo: ../../../../devices/virtual/block/loop25 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56 -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata -Lines: 1 -1 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid -Lines: 1 -7f07c59f-6136-449c-ab87-e1cf2328731b -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize -Lines: 1 -16384 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override -Lines: 1 -0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize -Lines: 1 -4096 -Mode: 444 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sda1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sda1/stats/stats -Lines: 1 -extent_alloc 1 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/fs/xfs/sdb1/stats -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/fs/xfs/sdb1/stats/stats -Lines: 1 -extent_alloc 2 0 0 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path -Lines: 1 -/home/iscsi/file_back_1G -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path -Lines: 1 -/dev/rbd1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path -Lines: 1 -/dev/rbd/iscsi-images/demo -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path -Lines: 0 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d -SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -204950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -40325 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026 -SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -104950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -20095 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -71235 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686 -SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -301950 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -10195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -30195 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable -Lines: 1 -1 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0 -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893 -SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port -Mode: 755 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds -Lines: 1 -1234 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes -Lines: 1 -1504 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes -Lines: 1 -4733 -Mode: 644 -# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go deleted file mode 100644 index 0102ab0..0000000 --- a/vendor/github.com/prometheus/procfs/fs.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "github.com/prometheus/procfs/internal/fs" -) - -// FS represents the pseudo-filesystem sys, which provides an interface to -// kernel data structures. -type FS struct { - proc fs.FS -} - -// DefaultMountPoint is the common mount point of the proc filesystem. -const DefaultMountPoint = fs.DefaultProcMountPoint - -// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint. -// It will error if the mount point directory can't be read or is a file. -func NewDefaultFS() (FS, error) { - return NewFS(DefaultMountPoint) -} - -// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error -// if the mount point directory can't be read or is a file. -func NewFS(mountPoint string) (FS, error) { - fs, err := fs.NewFS(mountPoint) - if err != nil { - return FS{}, err - } - return FS{fs}, nil -} diff --git a/vendor/github.com/prometheus/procfs/go.mod b/vendor/github.com/prometheus/procfs/go.mod deleted file mode 100644 index 0e04e5d..0000000 --- a/vendor/github.com/prometheus/procfs/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/prometheus/procfs - -go 1.12 - -require ( - github.com/google/go-cmp v0.3.1 - golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e -) diff --git a/vendor/github.com/prometheus/procfs/go.sum b/vendor/github.com/prometheus/procfs/go.sum deleted file mode 100644 index 33b824b..0000000 --- a/vendor/github.com/prometheus/procfs/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go deleted file mode 100644 index 565e89e..0000000 --- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fs - -import ( - "fmt" - "os" - "path/filepath" -) - -const ( - // DefaultProcMountPoint is the common mount point of the proc filesystem. - DefaultProcMountPoint = "/proc" - - // DefaultSysMountPoint is the common mount point of the sys filesystem. - DefaultSysMountPoint = "/sys" - - // DefaultConfigfsMountPoint is the common mount point of the configfs - DefaultConfigfsMountPoint = "/sys/kernel/config" -) - -// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an -// interface to kernel data structures. -type FS string - -// NewFS returns a new FS mounted under the given mountPoint. It will error -// if the mount point can't be read. -func NewFS(mountPoint string) (FS, error) { - info, err := os.Stat(mountPoint) - if err != nil { - return "", fmt.Errorf("could not read %s: %s", mountPoint, err) - } - if !info.IsDir() { - return "", fmt.Errorf("mount point %s is not a directory", mountPoint) - } - - return FS(mountPoint), nil -} - -// Path appends the given path elements to the filesystem path, adding separators -// as necessary. -func (fs FS) Path(p ...string) string { - return filepath.Join(append([]string{string(fs)}, p...)...) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go deleted file mode 100644 index 755591d..0000000 --- a/vendor/github.com/prometheus/procfs/internal/util/parse.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io/ioutil" - "strconv" - "strings" -) - -// ParseUint32s parses a slice of strings into a slice of uint32s. -func ParseUint32s(ss []string) ([]uint32, error) { - us := make([]uint32, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 32) - if err != nil { - return nil, err - } - - us = append(us, uint32(u)) - } - - return us, nil -} - -// ParseUint64s parses a slice of strings into a slice of uint64s. -func ParseUint64s(ss []string) ([]uint64, error) { - us := make([]uint64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, u) - } - - return us, nil -} - -// ParsePInt64s parses a slice of strings into a slice of int64 pointers. -func ParsePInt64s(ss []string) ([]*int64, error) { - us := make([]*int64, 0, len(ss)) - for _, s := range ss { - u, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return nil, err - } - - us = append(us, &u) - } - - return us, nil -} - -// ReadUintFromFile reads a file and attempts to parse a uint64 from it. -func ReadUintFromFile(path string) (uint64, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) -} - -// ParseBool parses a string into a boolean pointer. -func ParseBool(b string) *bool { - var truth bool - switch b { - case "enabled": - truth = true - case "disabled": - truth = false - default: - return nil - } - return &truth -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go deleted file mode 100644 index 8051161..0000000 --- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "io" - "io/ioutil" - "os" -) - -// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file. -// This is similar to ioutil.ReadFile but without the call to os.Stat, because -// many files in /proc and /sys report incorrect file sizes (either 0 or 4096). -// Reads a max file size of 512kB. For files larger than this, a scanner -// should be used. -func ReadFileNoStat(filename string) ([]byte, error) { - const maxBufferSize = 1024 * 512 - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - defer f.Close() - - reader := io.LimitReader(f, maxBufferSize) - return ioutil.ReadAll(reader) -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go deleted file mode 100644 index c07de0b..0000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux,!appengine - -package util - -import ( - "bytes" - "os" - "syscall" -) - -// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly. -// https://github.com/prometheus/node_exporter/pull/728/files -// -// Note that this function will not read files larger than 128 bytes. -func SysReadFile(file string) (string, error) { - f, err := os.Open(file) - if err != nil { - return "", err - } - defer f.Close() - - // On some machines, hwmon drivers are broken and return EAGAIN. This causes - // Go's ioutil.ReadFile implementation to poll forever. - // - // Since we either want to read data or bail immediately, do the simplest - // possible read using syscall directly. - const sysFileBufferSize = 128 - b := make([]byte, sysFileBufferSize) - n, err := syscall.Read(int(f.Fd()), b) - if err != nil { - return "", err - } - - return string(bytes.TrimSpace(b[:n])), nil -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go deleted file mode 100644 index bd55b45..0000000 --- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux,appengine !linux - -package util - -import ( - "fmt" -) - -// SysReadFile is here implemented as a noop for builds that do not support -// the read syscall. For example Windows, or Linux on Google App Engine. -func SysReadFile(file string) (string, error) { - return "", fmt.Errorf("not supported on this platform") -} diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go deleted file mode 100644 index fe2355d..0000000 --- a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package util - -import ( - "strconv" -) - -// TODO(mdlayher): util packages are an anti-pattern and this should be moved -// somewhere else that is more focused in the future. - -// A ValueParser enables parsing a single string into a variety of data types -// in a concise and safe way. The Err method must be invoked after invoking -// any other methods to ensure a value was successfully parsed. -type ValueParser struct { - v string - err error -} - -// NewValueParser creates a ValueParser using the input string. -func NewValueParser(v string) *ValueParser { - return &ValueParser{v: v} -} - -// Int interprets the underlying value as an int and returns that value. -func (vp *ValueParser) Int() int { return int(vp.int64()) } - -// PInt64 interprets the underlying value as an int64 and returns a pointer to -// that value. -func (vp *ValueParser) PInt64() *int64 { - if vp.err != nil { - return nil - } - - v := vp.int64() - return &v -} - -// int64 interprets the underlying value as an int64 and returns that value. -// TODO: export if/when necessary. -func (vp *ValueParser) int64() int64 { - if vp.err != nil { - return 0 - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseInt(vp.v, base, 64) - if err != nil { - vp.err = err - return 0 - } - - return v -} - -// PUInt64 interprets the underlying value as an uint64 and returns a pointer to -// that value. -func (vp *ValueParser) PUInt64() *uint64 { - if vp.err != nil { - return nil - } - - // A base value of zero makes ParseInt infer the correct base using the - // string's prefix, if any. - const base = 0 - v, err := strconv.ParseUint(vp.v, base, 64) - if err != nil { - vp.err = err - return nil - } - - return &v -} - -// Err returns the last error, if any, encountered by the ValueParser. -func (vp *ValueParser) Err() error { - return vp.err -} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go deleted file mode 100644 index 89e4477..0000000 --- a/vendor/github.com/prometheus/procfs/ipvs.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. -type IPVSStats struct { - // Total count of connections. - Connections uint64 - // Total incoming packages processed. - IncomingPackets uint64 - // Total outgoing packages processed. - OutgoingPackets uint64 - // Total incoming traffic. - IncomingBytes uint64 - // Total outgoing traffic. - OutgoingBytes uint64 -} - -// IPVSBackendStatus holds current metrics of one virtual / real address pair. -type IPVSBackendStatus struct { - // The local (virtual) IP address. - LocalAddress net.IP - // The remote (real) IP address. - RemoteAddress net.IP - // The local (virtual) port. - LocalPort uint16 - // The remote (real) port. - RemotePort uint16 - // The local firewall mark - LocalMark string - // The transport protocol (TCP, UDP). - Proto string - // The current number of active connections for this virtual/real address pair. - ActiveConn uint64 - // The current number of inactive connections for this virtual/real address pair. - InactConn uint64 - // The current weight of this virtual/real address pair. - Weight uint64 -} - -// IPVSStats reads the IPVS statistics from the specified `proc` filesystem. -func (fs FS) IPVSStats() (IPVSStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats")) - if err != nil { - return IPVSStats{}, err - } - - return parseIPVSStats(bytes.NewReader(data)) -} - -// parseIPVSStats performs the actual parsing of `ip_vs_stats`. -func parseIPVSStats(r io.Reader) (IPVSStats, error) { - var ( - statContent []byte - statLines []string - statFields []string - stats IPVSStats - ) - - statContent, err := ioutil.ReadAll(r) - if err != nil { - return IPVSStats{}, err - } - - statLines = strings.SplitN(string(statContent), "\n", 4) - if len(statLines) != 4 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") - } - - statFields = strings.Fields(statLines[2]) - if len(statFields) != 5 { - return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") - } - - stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) - if err != nil { - return IPVSStats{}, err - } - stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) - if err != nil { - return IPVSStats{}, err - } - - return stats, nil -} - -// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. -func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) { - file, err := os.Open(fs.proc.Path("net/ip_vs")) - if err != nil { - return nil, err - } - defer file.Close() - - return parseIPVSBackendStatus(file) -} - -func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { - var ( - status []IPVSBackendStatus - scanner = bufio.NewScanner(file) - proto string - localMark string - localAddress net.IP - localPort uint16 - err error - ) - - for scanner.Scan() { - fields := strings.Fields(scanner.Text()) - if len(fields) == 0 { - continue - } - switch { - case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": - continue - case fields[0] == "TCP" || fields[0] == "UDP": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = "" - localAddress, localPort, err = parseIPPort(fields[1]) - if err != nil { - return nil, err - } - case fields[0] == "FWM": - if len(fields) < 2 { - continue - } - proto = fields[0] - localMark = fields[1] - localAddress = nil - localPort = 0 - case fields[0] == "->": - if len(fields) < 6 { - continue - } - remoteAddress, remotePort, err := parseIPPort(fields[1]) - if err != nil { - return nil, err - } - weight, err := strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - activeConn, err := strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - inactConn, err := strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - status = append(status, IPVSBackendStatus{ - LocalAddress: localAddress, - LocalPort: localPort, - LocalMark: localMark, - RemoteAddress: remoteAddress, - RemotePort: remotePort, - Proto: proto, - Weight: weight, - ActiveConn: activeConn, - InactConn: inactConn, - }) - } - } - return status, nil -} - -func parseIPPort(s string) (net.IP, uint16, error) { - var ( - ip net.IP - err error - ) - - switch len(s) { - case 13: - ip, err = hex.DecodeString(s[0:8]) - if err != nil { - return nil, 0, err - } - case 46: - ip = net.ParseIP(s[1:40]) - if ip == nil { - return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) - } - default: - return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) - } - - portString := s[len(s)-4:] - if len(portString) != 4 { - return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) - } - port, err := strconv.ParseUint(portString, 16, 16) - if err != nil { - return nil, 0, err - } - - return ip, uint16(port), nil -} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go deleted file mode 100644 index 2af3ada..0000000 --- a/vendor/github.com/prometheus/procfs/mdstat.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" -) - -var ( - statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) - recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`) -) - -// MDStat holds info parsed from /proc/mdstat. -type MDStat struct { - // Name of the device. - Name string - // activity-state of the device. - ActivityState string - // Number of active disks. - DisksActive int64 - // Total number of disks the device requires. - DisksTotal int64 - // Number of failed disks. - DisksFailed int64 - // Spare disks in the device. - DisksSpare int64 - // Number of blocks the device holds. - BlocksTotal int64 - // Number of blocks on the device that are in sync. - BlocksSynced int64 -} - -// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. More information available here: -// https://raid.wiki.kernel.org/index.php/Mdstat -func (fs FS) MDStat() ([]MDStat, error) { - data, err := ioutil.ReadFile(fs.proc.Path("mdstat")) - if err != nil { - return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) - } - mdstat, err := parseMDStat(data) - if err != nil { - return nil, fmt.Errorf("error parsing mdstat %s: %s", fs.proc.Path("mdstat"), err) - } - return mdstat, nil -} - -// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of -// structs containing the relevant info. -func parseMDStat(mdStatData []byte) ([]MDStat, error) { - mdStats := []MDStat{} - lines := strings.Split(string(mdStatData), "\n") - - for i, line := range lines { - if strings.TrimSpace(line) == "" || line[0] == ' ' || - strings.HasPrefix(line, "Personalities") || - strings.HasPrefix(line, "unused") { - continue - } - - deviceFields := strings.Fields(line) - if len(deviceFields) < 3 { - return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line) - } - mdName := deviceFields[0] // mdx - state := deviceFields[2] // active or inactive - - if len(lines) <= i+3 { - return nil, fmt.Errorf( - "error parsing %s: too few lines for md device", - mdName, - ) - } - - // Failed disks have the suffix (F) & Spare disks have the suffix (S). - fail := int64(strings.Count(line, "(F)")) - spare := int64(strings.Count(line, "(S)")) - active, total, size, err := evalStatusLine(lines[i], lines[i+1]) - - if err != nil { - return nil, fmt.Errorf("error parsing md device lines: %s", err) - } - - syncLineIdx := i + 2 - if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line - syncLineIdx++ - } - - // If device is syncing at the moment, get the number of currently - // synced bytes, otherwise that number equals the size of the device. - syncedBlocks := size - recovering := strings.Contains(lines[syncLineIdx], "recovery") - resyncing := strings.Contains(lines[syncLineIdx], "resync") - - // Append recovery and resyncing state info. - if recovering || resyncing { - if recovering { - state = "recovering" - } else { - state = "resyncing" - } - - // Handle case when resync=PENDING or resync=DELAYED. - if strings.Contains(lines[syncLineIdx], "PENDING") || - strings.Contains(lines[syncLineIdx], "DELAYED") { - syncedBlocks = 0 - } else { - syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx]) - if err != nil { - return nil, fmt.Errorf("error parsing sync line in md device %s: %s", mdName, err) - } - } - } - - mdStats = append(mdStats, MDStat{ - Name: mdName, - ActivityState: state, - DisksActive: active, - DisksFailed: fail, - DisksSpare: spare, - DisksTotal: total, - BlocksTotal: size, - BlocksSynced: syncedBlocks, - }) - } - - return mdStats, nil -} - -func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) { - - sizeStr := strings.Fields(statusLine)[0] - size, err = strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) - } - - if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") { - // In the device deviceLine, only disks have a number associated with them in []. - total = int64(strings.Count(deviceLine, "[")) - return total, total, size, nil - } - - if strings.Contains(deviceLine, "inactive") { - return 0, 0, size, nil - } - - matches := statusLineRE.FindStringSubmatch(statusLine) - if len(matches) != 4 { - return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine) - } - - total, err = strconv.ParseInt(matches[2], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) - } - - active, err = strconv.ParseInt(matches[3], 10, 64) - if err != nil { - return 0, 0, 0, fmt.Errorf("unexpected statusLine %s: %s", statusLine, err) - } - - return active, total, size, nil -} - -func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) { - matches := recoveryLineRE.FindStringSubmatch(recoveryLine) - if len(matches) != 2 { - return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine) - } - - syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) - if err != nil { - return 0, fmt.Errorf("%s in recoveryLine: %s", err, recoveryLine) - } - - return syncedBlocks, nil -} diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go deleted file mode 100644 index 50dab4b..0000000 --- a/vendor/github.com/prometheus/procfs/meminfo.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Meminfo represents memory statistics. -type Meminfo struct { - // Total usable ram (i.e. physical ram minus a few reserved - // bits and the kernel binary code) - MemTotal uint64 - // The sum of LowFree+HighFree - MemFree uint64 - // An estimate of how much memory is available for starting - // new applications, without swapping. Calculated from - // MemFree, SReclaimable, the size of the file LRU lists, and - // the low watermarks in each zone. The estimate takes into - // account that the system needs some page cache to function - // well, and that not all reclaimable slab will be - // reclaimable, due to items being in use. The impact of those - // factors will vary from system to system. - MemAvailable uint64 - // Relatively temporary storage for raw disk blocks shouldn't - // get tremendously large (20MB or so) - Buffers uint64 - Cached uint64 - // Memory that once was swapped out, is swapped back in but - // still also is in the swapfile (if memory is needed it - // doesn't need to be swapped out AGAIN because it is already - // in the swapfile. This saves I/O) - SwapCached uint64 - // Memory that has been used more recently and usually not - // reclaimed unless absolutely necessary. - Active uint64 - // Memory which has been less recently used. It is more - // eligible to be reclaimed for other purposes - Inactive uint64 - ActiveAnon uint64 - InactiveAnon uint64 - ActiveFile uint64 - InactiveFile uint64 - Unevictable uint64 - Mlocked uint64 - // total amount of swap space available - SwapTotal uint64 - // Memory which has been evicted from RAM, and is temporarily - // on the disk - SwapFree uint64 - // Memory which is waiting to get written back to the disk - Dirty uint64 - // Memory which is actively being written back to the disk - Writeback uint64 - // Non-file backed pages mapped into userspace page tables - AnonPages uint64 - // files which have been mapped, such as libraries - Mapped uint64 - Shmem uint64 - // in-kernel data structures cache - Slab uint64 - // Part of Slab, that might be reclaimed, such as caches - SReclaimable uint64 - // Part of Slab, that cannot be reclaimed on memory pressure - SUnreclaim uint64 - KernelStack uint64 - // amount of memory dedicated to the lowest level of page - // tables. - PageTables uint64 - // NFS pages sent to the server, but not yet committed to - // stable storage - NFSUnstable uint64 - // Memory used for block device "bounce buffers" - Bounce uint64 - // Memory used by FUSE for temporary writeback buffers - WritebackTmp uint64 - // Based on the overcommit ratio ('vm.overcommit_ratio'), - // this is the total amount of memory currently available to - // be allocated on the system. This limit is only adhered to - // if strict overcommit accounting is enabled (mode 2 in - // 'vm.overcommit_memory'). - // The CommitLimit is calculated with the following formula: - // CommitLimit = ([total RAM pages] - [total huge TLB pages]) * - // overcommit_ratio / 100 + [total swap pages] - // For example, on a system with 1G of physical RAM and 7G - // of swap with a `vm.overcommit_ratio` of 30 it would - // yield a CommitLimit of 7.3G. - // For more details, see the memory overcommit documentation - // in vm/overcommit-accounting. - CommitLimit uint64 - // The amount of memory presently allocated on the system. - // The committed memory is a sum of all of the memory which - // has been allocated by processes, even if it has not been - // "used" by them as of yet. A process which malloc()'s 1G - // of memory, but only touches 300M of it will show up as - // using 1G. This 1G is memory which has been "committed" to - // by the VM and can be used at any time by the allocating - // application. With strict overcommit enabled on the system - // (mode 2 in 'vm.overcommit_memory'),allocations which would - // exceed the CommitLimit (detailed above) will not be permitted. - // This is useful if one needs to guarantee that processes will - // not fail due to lack of memory once that memory has been - // successfully allocated. - CommittedAS uint64 - // total size of vmalloc memory area - VmallocTotal uint64 - // amount of vmalloc area which is used - VmallocUsed uint64 - // largest contiguous block of vmalloc area which is free - VmallocChunk uint64 - HardwareCorrupted uint64 - AnonHugePages uint64 - ShmemHugePages uint64 - ShmemPmdMapped uint64 - CmaTotal uint64 - CmaFree uint64 - HugePagesTotal uint64 - HugePagesFree uint64 - HugePagesRsvd uint64 - HugePagesSurp uint64 - Hugepagesize uint64 - DirectMap4k uint64 - DirectMap2M uint64 - DirectMap1G uint64 -} - -// Meminfo returns an information about current kernel/system memory statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Meminfo() (Meminfo, error) { - b, err := util.ReadFileNoStat(fs.proc.Path("meminfo")) - if err != nil { - return Meminfo{}, err - } - - m, err := parseMemInfo(bytes.NewReader(b)) - if err != nil { - return Meminfo{}, fmt.Errorf("failed to parse meminfo: %v", err) - } - - return *m, nil -} - -func parseMemInfo(r io.Reader) (*Meminfo, error) { - var m Meminfo - s := bufio.NewScanner(r) - for s.Scan() { - // Each line has at least a name and value; we ignore the unit. - fields := strings.Fields(s.Text()) - if len(fields) < 2 { - return nil, fmt.Errorf("malformed meminfo line: %q", s.Text()) - } - - v, err := strconv.ParseUint(fields[1], 0, 64) - if err != nil { - return nil, err - } - - switch fields[0] { - case "MemTotal:": - m.MemTotal = v - case "MemFree:": - m.MemFree = v - case "MemAvailable:": - m.MemAvailable = v - case "Buffers:": - m.Buffers = v - case "Cached:": - m.Cached = v - case "SwapCached:": - m.SwapCached = v - case "Active:": - m.Active = v - case "Inactive:": - m.Inactive = v - case "Active(anon):": - m.ActiveAnon = v - case "Inactive(anon):": - m.InactiveAnon = v - case "Active(file):": - m.ActiveFile = v - case "Inactive(file):": - m.InactiveFile = v - case "Unevictable:": - m.Unevictable = v - case "Mlocked:": - m.Mlocked = v - case "SwapTotal:": - m.SwapTotal = v - case "SwapFree:": - m.SwapFree = v - case "Dirty:": - m.Dirty = v - case "Writeback:": - m.Writeback = v - case "AnonPages:": - m.AnonPages = v - case "Mapped:": - m.Mapped = v - case "Shmem:": - m.Shmem = v - case "Slab:": - m.Slab = v - case "SReclaimable:": - m.SReclaimable = v - case "SUnreclaim:": - m.SUnreclaim = v - case "KernelStack:": - m.KernelStack = v - case "PageTables:": - m.PageTables = v - case "NFS_Unstable:": - m.NFSUnstable = v - case "Bounce:": - m.Bounce = v - case "WritebackTmp:": - m.WritebackTmp = v - case "CommitLimit:": - m.CommitLimit = v - case "Committed_AS:": - m.CommittedAS = v - case "VmallocTotal:": - m.VmallocTotal = v - case "VmallocUsed:": - m.VmallocUsed = v - case "VmallocChunk:": - m.VmallocChunk = v - case "HardwareCorrupted:": - m.HardwareCorrupted = v - case "AnonHugePages:": - m.AnonHugePages = v - case "ShmemHugePages:": - m.ShmemHugePages = v - case "ShmemPmdMapped:": - m.ShmemPmdMapped = v - case "CmaTotal:": - m.CmaTotal = v - case "CmaFree:": - m.CmaFree = v - case "HugePages_Total:": - m.HugePagesTotal = v - case "HugePages_Free:": - m.HugePagesFree = v - case "HugePages_Rsvd:": - m.HugePagesRsvd = v - case "HugePages_Surp:": - m.HugePagesSurp = v - case "Hugepagesize:": - m.Hugepagesize = v - case "DirectMap4k:": - m.DirectMap4k = v - case "DirectMap2M:": - m.DirectMap2M = v - case "DirectMap1G:": - m.DirectMap1G = v - } - } - - return &m, nil -} diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go deleted file mode 100644 index bb01bb5..0000000 --- a/vendor/github.com/prometheus/procfs/mountinfo.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A MountInfo is a type that describes the details, options -// for each mount, parsed from /proc/self/mountinfo. -// The fields described in each entry of /proc/self/mountinfo -// is described in the following man page. -// http://man7.org/linux/man-pages/man5/proc.5.html -type MountInfo struct { - // Unique Id for the mount - MountId int - // The Id of the parent mount - ParentId int - // The value of `st_dev` for the files on this FS - MajorMinorVer string - // The pathname of the directory in the FS that forms - // the root for this mount - Root string - // The pathname of the mount point relative to the root - MountPoint string - // Mount options - Options map[string]string - // Zero or more optional fields - OptionalFields map[string]string - // The Filesystem type - FSType string - // FS specific information or "none" - Source string - // Superblock options - SuperOptions map[string]string -} - -// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs. -func parseMountInfo(info []byte) ([]*MountInfo, error) { - mounts := []*MountInfo{} - scanner := bufio.NewScanner(bytes.NewReader(info)) - for scanner.Scan() { - mountString := scanner.Text() - parsedMounts, err := parseMountInfoString(mountString) - if err != nil { - return nil, err - } - mounts = append(mounts, parsedMounts) - } - - err := scanner.Err() - return mounts, err -} - -// Parses a mountinfo file line, and converts it to a MountInfo struct. -// An important check here is to see if the hyphen separator, as if it does not exist, -// it means that the line is malformed. -func parseMountInfoString(mountString string) (*MountInfo, error) { - var err error - - mountInfo := strings.Split(mountString, " ") - mountInfoLength := len(mountInfo) - if mountInfoLength < 11 { - return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString) - } - - if mountInfo[mountInfoLength-4] != "-" { - return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4]) - } - - mount := &MountInfo{ - MajorMinorVer: mountInfo[2], - Root: mountInfo[3], - MountPoint: mountInfo[4], - Options: mountOptionsParser(mountInfo[5]), - OptionalFields: nil, - FSType: mountInfo[mountInfoLength-3], - Source: mountInfo[mountInfoLength-2], - SuperOptions: mountOptionsParser(mountInfo[mountInfoLength-1]), - } - - mount.MountId, err = strconv.Atoi(mountInfo[0]) - if err != nil { - return nil, fmt.Errorf("failed to parse mount ID") - } - mount.ParentId, err = strconv.Atoi(mountInfo[1]) - if err != nil { - return nil, fmt.Errorf("failed to parse parent ID") - } - // Has optional fields, which is a space separated list of values. - // Example: shared:2 master:7 - if mountInfo[6] != "" { - mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4]) - if err != nil { - return nil, err - } - } - return mount, nil -} - -// mountOptionsIsValidField checks a string against a valid list of optional fields keys. -func mountOptionsIsValidField(s string) bool { - switch s { - case - "shared", - "master", - "propagate_from", - "unbindable": - return true - } - return false -} - -// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings. -func mountOptionsParseOptionalFields(o []string) (map[string]string, error) { - optionalFields := make(map[string]string) - for _, field := range o { - optionSplit := strings.SplitN(field, ":", 2) - value := "" - if len(optionSplit) == 2 { - value = optionSplit[1] - } - if mountOptionsIsValidField(optionSplit[0]) { - optionalFields[optionSplit[0]] = value - } - } - return optionalFields, nil -} - -// Parses the mount options, superblock options. -func mountOptionsParser(mountOptions string) map[string]string { - opts := make(map[string]string) - options := strings.Split(mountOptions, ",") - for _, opt := range options { - splitOption := strings.Split(opt, "=") - if len(splitOption) < 2 { - key := splitOption[0] - opts[key] = "" - } else { - key, value := splitOption[0], splitOption[1] - opts[key] = value - } - } - return opts -} - -// Retrieves mountinfo information from `/proc/self/mountinfo`. -func GetMounts() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat("/proc/self/mountinfo") - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -// Retrieves mountinfo information from a processes' `/proc//mountinfo`. -func GetProcMounts(pid int) ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go deleted file mode 100644 index 35b2ef3..0000000 --- a/vendor/github.com/prometheus/procfs/mountstats.go +++ /dev/null @@ -1,621 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// While implementing parsing of /proc/[pid]/mountstats, this blog was used -// heavily as a reference: -// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex -// -// Special thanks to Chris Siebenmann for all of his posts explaining the -// various statistics available for NFS. - -import ( - "bufio" - "fmt" - "io" - "strconv" - "strings" - "time" -) - -// Constants shared between multiple functions. -const ( - deviceEntryLen = 8 - - fieldBytesLen = 8 - fieldEventsLen = 27 - - statVersion10 = "1.0" - statVersion11 = "1.1" - - fieldTransport10TCPLen = 10 - fieldTransport10UDPLen = 7 - - fieldTransport11TCPLen = 13 - fieldTransport11UDPLen = 10 -) - -// A Mount is a device mount parsed from /proc/[pid]/mountstats. -type Mount struct { - // Name of the device. - Device string - // The mount point of the device. - Mount string - // The filesystem type used by the device. - Type string - // If available additional statistics related to this Mount. - // Use a type assertion to determine if additional statistics are available. - Stats MountStats -} - -// A MountStats is a type which contains detailed statistics for a specific -// type of Mount. -type MountStats interface { - mountStats() -} - -// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. -type MountStatsNFS struct { - // The version of statistics provided. - StatVersion string - // The mount options of the NFS mount. - Opts map[string]string - // The age of the NFS mount. - Age time.Duration - // Statistics related to byte counters for various operations. - Bytes NFSBytesStats - // Statistics related to various NFS event occurrences. - Events NFSEventsStats - // Statistics broken down by filesystem operation. - Operations []NFSOperationStats - // Statistics about the NFS RPC transport. - Transport NFSTransportStats -} - -// mountStats implements MountStats. -func (m MountStatsNFS) mountStats() {} - -// A NFSBytesStats contains statistics about the number of bytes read and written -// by an NFS client to and from an NFS server. -type NFSBytesStats struct { - // Number of bytes read using the read() syscall. - Read uint64 - // Number of bytes written using the write() syscall. - Write uint64 - // Number of bytes read using the read() syscall in O_DIRECT mode. - DirectRead uint64 - // Number of bytes written using the write() syscall in O_DIRECT mode. - DirectWrite uint64 - // Number of bytes read from the NFS server, in total. - ReadTotal uint64 - // Number of bytes written to the NFS server, in total. - WriteTotal uint64 - // Number of pages read directly via mmap()'d files. - ReadPages uint64 - // Number of pages written directly via mmap()'d files. - WritePages uint64 -} - -// A NFSEventsStats contains statistics about NFS event occurrences. -type NFSEventsStats struct { - // Number of times cached inode attributes are re-validated from the server. - InodeRevalidate uint64 - // Number of times cached dentry nodes are re-validated from the server. - DnodeRevalidate uint64 - // Number of times an inode cache is cleared. - DataInvalidate uint64 - // Number of times cached inode attributes are invalidated. - AttributeInvalidate uint64 - // Number of times files or directories have been open()'d. - VFSOpen uint64 - // Number of times a directory lookup has occurred. - VFSLookup uint64 - // Number of times permissions have been checked. - VFSAccess uint64 - // Number of updates (and potential writes) to pages. - VFSUpdatePage uint64 - // Number of pages read directly via mmap()'d files. - VFSReadPage uint64 - // Number of times a group of pages have been read. - VFSReadPages uint64 - // Number of pages written directly via mmap()'d files. - VFSWritePage uint64 - // Number of times a group of pages have been written. - VFSWritePages uint64 - // Number of times directory entries have been read with getdents(). - VFSGetdents uint64 - // Number of times attributes have been set on inodes. - VFSSetattr uint64 - // Number of pending writes that have been forcefully flushed to the server. - VFSFlush uint64 - // Number of times fsync() has been called on directories and files. - VFSFsync uint64 - // Number of times locking has been attempted on a file. - VFSLock uint64 - // Number of times files have been closed and released. - VFSFileRelease uint64 - // Unknown. Possibly unused. - CongestionWait uint64 - // Number of times files have been truncated. - Truncation uint64 - // Number of times a file has been grown due to writes beyond its existing end. - WriteExtension uint64 - // Number of times a file was removed while still open by another process. - SillyRename uint64 - // Number of times the NFS server gave less data than expected while reading. - ShortRead uint64 - // Number of times the NFS server wrote less data than expected while writing. - ShortWrite uint64 - // Number of times the NFS server indicated EJUKEBOX; retrieving data from - // offline storage. - JukeboxDelay uint64 - // Number of NFS v4.1+ pNFS reads. - PNFSRead uint64 - // Number of NFS v4.1+ pNFS writes. - PNFSWrite uint64 -} - -// A NFSOperationStats contains statistics for a single operation. -type NFSOperationStats struct { - // The name of the operation. - Operation string - // Number of requests performed for this operation. - Requests uint64 - // Number of times an actual RPC request has been transmitted for this operation. - Transmissions uint64 - // Number of times a request has had a major timeout. - MajorTimeouts uint64 - // Number of bytes sent for this operation, including RPC headers and payload. - BytesSent uint64 - // Number of bytes received for this operation, including RPC headers and payload. - BytesReceived uint64 - // Duration all requests spent queued for transmission before they were sent. - CumulativeQueueMilliseconds uint64 - // Duration it took to get a reply back after the request was transmitted. - CumulativeTotalResponseMilliseconds uint64 - // Duration from when a request was enqueued to when it was completely handled. - CumulativeTotalRequestMilliseconds uint64 -} - -// A NFSTransportStats contains statistics for the NFS mount RPC requests and -// responses. -type NFSTransportStats struct { - // The transport protocol used for the NFS mount. - Protocol string - // The local port used for the NFS mount. - Port uint64 - // Number of times the client has had to establish a connection from scratch - // to the NFS server. - Bind uint64 - // Number of times the client has made a TCP connection to the NFS server. - Connect uint64 - // Duration (in jiffies, a kernel internal unit of time) the NFS mount has - // spent waiting for connections to the server to be established. - ConnectIdleTime uint64 - // Duration since the NFS mount last saw any RPC traffic. - IdleTimeSeconds uint64 - // Number of RPC requests for this mount sent to the NFS server. - Sends uint64 - // Number of RPC responses for this mount received from the NFS server. - Receives uint64 - // Number of times the NFS server sent a response with a transaction ID - // unknown to this client. - BadTransactionIDs uint64 - // A running counter, incremented on each request as the current difference - // ebetween sends and receives. - CumulativeActiveRequests uint64 - // A running counter, incremented on each request by the current backlog - // queue size. - CumulativeBacklog uint64 - - // Stats below only available with stat version 1.1. - - // Maximum number of simultaneously active RPC requests ever used. - MaximumRPCSlotsUsed uint64 - // A running counter, incremented on each request as the current size of the - // sending queue. - CumulativeSendingQueue uint64 - // A running counter, incremented on each request as the current size of the - // pending queue. - CumulativePendingQueue uint64 -} - -// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice -// of Mount structures containing detailed information about each mount. -// If available, statistics for each mount are parsed as well. -func parseMountStats(r io.Reader) ([]*Mount, error) { - const ( - device = "device" - statVersionPrefix = "statvers=" - - nfs3Type = "nfs" - nfs4Type = "nfs4" - ) - - var mounts []*Mount - - s := bufio.NewScanner(r) - for s.Scan() { - // Only look for device entries in this function - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 || ss[0] != device { - continue - } - - m, err := parseMount(ss) - if err != nil { - return nil, err - } - - // Does this mount also possess statistics information? - if len(ss) > deviceEntryLen { - // Only NFSv3 and v4 are supported for parsing statistics - if m.Type != nfs3Type && m.Type != nfs4Type { - return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) - } - - statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) - - stats, err := parseMountStatsNFS(s, statVersion) - if err != nil { - return nil, err - } - - m.Stats = stats - } - - mounts = append(mounts, m) - } - - return mounts, s.Err() -} - -// parseMount parses an entry in /proc/[pid]/mountstats in the format: -// device [device] mounted on [mount] with fstype [type] -func parseMount(ss []string) (*Mount, error) { - if len(ss) < deviceEntryLen { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - - // Check for specific words appearing at specific indices to ensure - // the format is consistent with what we expect - format := []struct { - i int - s string - }{ - {i: 0, s: "device"}, - {i: 2, s: "mounted"}, - {i: 3, s: "on"}, - {i: 5, s: "with"}, - {i: 6, s: "fstype"}, - } - - for _, f := range format { - if ss[f.i] != f.s { - return nil, fmt.Errorf("invalid device entry: %v", ss) - } - } - - return &Mount{ - Device: ss[1], - Mount: ss[4], - Type: ss[7], - }, nil -} - -// parseMountStatsNFS parses a MountStatsNFS by scanning additional information -// related to NFS statistics. -func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { - // Field indicators for parsing specific types of data - const ( - fieldOpts = "opts:" - fieldAge = "age:" - fieldBytes = "bytes:" - fieldEvents = "events:" - fieldPerOpStats = "per-op" - fieldTransport = "xprt:" - ) - - stats := &MountStatsNFS{ - StatVersion: statVersion, - } - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - break - } - if len(ss) < 2 { - return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) - } - - switch ss[0] { - case fieldOpts: - if stats.Opts == nil { - stats.Opts = map[string]string{} - } - for _, opt := range strings.Split(ss[1], ",") { - split := strings.Split(opt, "=") - if len(split) == 2 { - stats.Opts[split[0]] = split[1] - } else { - stats.Opts[opt] = "" - } - } - case fieldAge: - // Age integer is in seconds - d, err := time.ParseDuration(ss[1] + "s") - if err != nil { - return nil, err - } - - stats.Age = d - case fieldBytes: - bstats, err := parseNFSBytesStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Bytes = *bstats - case fieldEvents: - estats, err := parseNFSEventsStats(ss[1:]) - if err != nil { - return nil, err - } - - stats.Events = *estats - case fieldTransport: - if len(ss) < 3 { - return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) - } - - tstats, err := parseNFSTransportStats(ss[1:], statVersion) - if err != nil { - return nil, err - } - - stats.Transport = *tstats - } - - // When encountering "per-operation statistics", we must break this - // loop and parse them separately to ensure we can terminate parsing - // before reaching another device entry; hence why this 'if' statement - // is not just another switch case - if ss[0] == fieldPerOpStats { - break - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - // NFS per-operation stats appear last before the next device entry - perOpStats, err := parseNFSOperationStats(s) - if err != nil { - return nil, err - } - - stats.Operations = perOpStats - - return stats, nil -} - -// parseNFSBytesStats parses a NFSBytesStats line using an input set of -// integer fields. -func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { - if len(ss) != fieldBytesLen { - return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) - } - - ns := make([]uint64, 0, fieldBytesLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSBytesStats{ - Read: ns[0], - Write: ns[1], - DirectRead: ns[2], - DirectWrite: ns[3], - ReadTotal: ns[4], - WriteTotal: ns[5], - ReadPages: ns[6], - WritePages: ns[7], - }, nil -} - -// parseNFSEventsStats parses a NFSEventsStats line using an input set of -// integer fields. -func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { - if len(ss) != fieldEventsLen { - return nil, fmt.Errorf("invalid NFS events stats: %v", ss) - } - - ns := make([]uint64, 0, fieldEventsLen) - for _, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - return &NFSEventsStats{ - InodeRevalidate: ns[0], - DnodeRevalidate: ns[1], - DataInvalidate: ns[2], - AttributeInvalidate: ns[3], - VFSOpen: ns[4], - VFSLookup: ns[5], - VFSAccess: ns[6], - VFSUpdatePage: ns[7], - VFSReadPage: ns[8], - VFSReadPages: ns[9], - VFSWritePage: ns[10], - VFSWritePages: ns[11], - VFSGetdents: ns[12], - VFSSetattr: ns[13], - VFSFlush: ns[14], - VFSFsync: ns[15], - VFSLock: ns[16], - VFSFileRelease: ns[17], - CongestionWait: ns[18], - Truncation: ns[19], - WriteExtension: ns[20], - SillyRename: ns[21], - ShortRead: ns[22], - ShortWrite: ns[23], - JukeboxDelay: ns[24], - PNFSRead: ns[25], - PNFSWrite: ns[26], - }, nil -} - -// parseNFSOperationStats parses a slice of NFSOperationStats by scanning -// additional information about per-operation statistics until an empty -// line is reached. -func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { - const ( - // Number of expected fields in each per-operation statistics set - numFields = 9 - ) - - var ops []NFSOperationStats - - for s.Scan() { - ss := strings.Fields(string(s.Bytes())) - if len(ss) == 0 { - // Must break when reading a blank line after per-operation stats to - // enable top-level function to parse the next device entry - break - } - - if len(ss) != numFields { - return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) - } - - // Skip string operation name for integers - ns := make([]uint64, 0, numFields-1) - for _, st := range ss[1:] { - n, err := strconv.ParseUint(st, 10, 64) - if err != nil { - return nil, err - } - - ns = append(ns, n) - } - - ops = append(ops, NFSOperationStats{ - Operation: strings.TrimSuffix(ss[0], ":"), - Requests: ns[0], - Transmissions: ns[1], - MajorTimeouts: ns[2], - BytesSent: ns[3], - BytesReceived: ns[4], - CumulativeQueueMilliseconds: ns[5], - CumulativeTotalResponseMilliseconds: ns[6], - CumulativeTotalRequestMilliseconds: ns[7], - }) - } - - return ops, s.Err() -} - -// parseNFSTransportStats parses a NFSTransportStats line using an input set of -// integer fields matched to a specific stats version. -func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { - // Extract the protocol field. It is the only string value in the line - protocol := ss[0] - ss = ss[1:] - - switch statVersion { - case statVersion10: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport10TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport10UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) - } - case statVersion11: - var expectedLength int - if protocol == "tcp" { - expectedLength = fieldTransport11TCPLen - } else if protocol == "udp" { - expectedLength = fieldTransport11UDPLen - } else { - return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss) - } - if len(ss) != expectedLength { - return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) - } - default: - return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) - } - - // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay - // in a v1.0 response. Since the stat length is bigger for TCP stats, we use - // the TCP length here. - // - // Note: slice length must be set to length of v1.1 stats to avoid a panic when - // only v1.0 stats are present. - // See: https://github.com/prometheus/node_exporter/issues/571. - ns := make([]uint64, fieldTransport11TCPLen) - for i, s := range ss { - n, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return nil, err - } - - ns[i] = n - } - - // The fields differ depending on the transport protocol (TCP or UDP) - // From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt - // - // For the udp RPC transport there is no connection count, connect idle time, - // or idle time (fields #3, #4, and #5); all other fields are the same. So - // we set them to 0 here. - if protocol == "udp" { - ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...) - } - - return &NFSTransportStats{ - Protocol: protocol, - Port: ns[0], - Bind: ns[1], - Connect: ns[2], - ConnectIdleTime: ns[3], - IdleTimeSeconds: ns[4], - Sends: ns[5], - Receives: ns[6], - BadTransactionIDs: ns[7], - CumulativeActiveRequests: ns[8], - CumulativeBacklog: ns[9], - MaximumRPCSlotsUsed: ns[10], - CumulativeSendingQueue: ns[11], - CumulativePendingQueue: ns[12], - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go deleted file mode 100644 index 47a710b..0000000 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "sort" - "strconv" - "strings" -) - -// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. -type NetDevLine struct { - Name string `json:"name"` // The name of the interface. - RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. - RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. - RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. - RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. - RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. - RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. - RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. - RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. - TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. - TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. - TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. - TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. - TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. - TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. - TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. - TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. -} - -// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys -// are interface names. -type NetDev map[string]NetDevLine - -// NetDev returns kernel/system statistics read from /proc/net/dev. -func (fs FS) NetDev() (NetDev, error) { - return newNetDev(fs.proc.Path("net/dev")) -} - -// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev. -func (p Proc) NetDev() (NetDev, error) { - return newNetDev(p.path("net/dev")) -} - -// newNetDev creates a new NetDev from the contents of the given file. -func newNetDev(file string) (NetDev, error) { - f, err := os.Open(file) - if err != nil { - return NetDev{}, err - } - defer f.Close() - - netDev := NetDev{} - s := bufio.NewScanner(f) - for n := 0; s.Scan(); n++ { - // Skip the 2 header lines. - if n < 2 { - continue - } - - line, err := netDev.parseLine(s.Text()) - if err != nil { - return netDev, err - } - - netDev[line.Name] = *line - } - - return netDev, s.Err() -} - -// parseLine parses a single line from the /proc/net/dev file. Header lines -// must be filtered prior to calling this method. -func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) { - parts := strings.SplitN(rawLine, ":", 2) - if len(parts) != 2 { - return nil, errors.New("invalid net/dev line, missing colon") - } - fields := strings.Fields(strings.TrimSpace(parts[1])) - - var err error - line := &NetDevLine{} - - // Interface Name - line.Name = strings.TrimSpace(parts[0]) - if line.Name == "" { - return nil, errors.New("invalid net/dev line, empty interface name") - } - - // RX - line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return nil, err - } - line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return nil, err - } - line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) - if err != nil { - return nil, err - } - line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) - if err != nil { - return nil, err - } - line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) - if err != nil { - return nil, err - } - line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) - if err != nil { - return nil, err - } - line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) - if err != nil { - return nil, err - } - line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) - if err != nil { - return nil, err - } - - // TX - line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) - if err != nil { - return nil, err - } - line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) - if err != nil { - return nil, err - } - line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) - if err != nil { - return nil, err - } - line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) - if err != nil { - return nil, err - } - line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) - if err != nil { - return nil, err - } - line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) - if err != nil { - return nil, err - } - line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) - if err != nil { - return nil, err - } - line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) - if err != nil { - return nil, err - } - - return line, nil -} - -// Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma separated list of interface names. -func (netDev NetDev) Total() NetDevLine { - total := NetDevLine{} - - names := make([]string, 0, len(netDev)) - for _, ifc := range netDev { - names = append(names, ifc.Name) - total.RxBytes += ifc.RxBytes - total.RxPackets += ifc.RxPackets - total.RxErrors += ifc.RxErrors - total.RxDropped += ifc.RxDropped - total.RxFIFO += ifc.RxFIFO - total.RxFrame += ifc.RxFrame - total.RxCompressed += ifc.RxCompressed - total.RxMulticast += ifc.RxMulticast - total.TxBytes += ifc.TxBytes - total.TxPackets += ifc.TxPackets - total.TxErrors += ifc.TxErrors - total.TxDropped += ifc.TxDropped - total.TxFIFO += ifc.TxFIFO - total.TxCollisions += ifc.TxCollisions - total.TxCarrier += ifc.TxCarrier - total.TxCompressed += ifc.TxCompressed - } - sort.Strings(names) - total.Name = strings.Join(names, ", ") - - return total -} diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go deleted file mode 100644 index f91ef55..0000000 --- a/vendor/github.com/prometheus/procfs/net_sockstat.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6, -// respectively. -type NetSockstat struct { - // Used is non-nil for IPv4 sockstat results, but nil for IPv6. - Used *int - Protocols []NetSockstatProtocol -} - -// A NetSockstatProtocol contains statistics about a given socket protocol. -// Pointer fields indicate that the value may or may not be present on any -// given protocol. -type NetSockstatProtocol struct { - Protocol string - InUse int - Orphan *int - TW *int - Alloc *int - Mem *int - Memory *int -} - -// NetSockstat retrieves IPv4 socket statistics. -func (fs FS) NetSockstat() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat")) -} - -// NetSockstat6 retrieves IPv6 socket statistics. -// -// If IPv6 is disabled on this kernel, the returned error can be checked with -// os.IsNotExist. -func (fs FS) NetSockstat6() (*NetSockstat, error) { - return readSockstat(fs.proc.Path("net", "sockstat6")) -} - -// readSockstat opens and parses a NetSockstat from the input file. -func readSockstat(name string) (*NetSockstat, error) { - // This file is small and can be read with one syscall. - b, err := util.ReadFileNoStat(name) - if err != nil { - // Do not wrap this error so the caller can detect os.IsNotExist and - // similar conditions. - return nil, err - } - - stat, err := parseSockstat(bytes.NewReader(b)) - if err != nil { - return nil, fmt.Errorf("failed to read sockstats from %q: %v", name, err) - } - - return stat, nil -} - -// parseSockstat reads the contents of a sockstat file and parses a NetSockstat. -func parseSockstat(r io.Reader) (*NetSockstat, error) { - var stat NetSockstat - s := bufio.NewScanner(r) - for s.Scan() { - // Expect a minimum of a protocol and one key/value pair. - fields := strings.Split(s.Text(), " ") - if len(fields) < 3 { - return nil, fmt.Errorf("malformed sockstat line: %q", s.Text()) - } - - // The remaining fields are key/value pairs. - kvs, err := parseSockstatKVs(fields[1:]) - if err != nil { - return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %v", s.Text(), err) - } - - // The first field is the protocol. We must trim its colon suffix. - proto := strings.TrimSuffix(fields[0], ":") - switch proto { - case "sockets": - // Special case: IPv4 has a sockets "used" key/value pair that we - // embed at the top level of the structure. - used := kvs["used"] - stat.Used = &used - default: - // Parse all other lines as individual protocols. - nsp := parseSockstatProtocol(kvs) - nsp.Protocol = proto - stat.Protocols = append(stat.Protocols, nsp) - } - } - - if err := s.Err(); err != nil { - return nil, err - } - - return &stat, nil -} - -// parseSockstatKVs parses a string slice into a map of key/value pairs. -func parseSockstatKVs(kvs []string) (map[string]int, error) { - if len(kvs)%2 != 0 { - return nil, errors.New("odd number of fields in key/value pairs") - } - - // Iterate two values at a time to gather key/value pairs. - out := make(map[string]int, len(kvs)/2) - for i := 0; i < len(kvs); i += 2 { - vp := util.NewValueParser(kvs[i+1]) - out[kvs[i]] = vp.Int() - - if err := vp.Err(); err != nil { - return nil, err - } - } - - return out, nil -} - -// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map. -func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol { - var nsp NetSockstatProtocol - for k, v := range kvs { - // Capture the range variable to ensure we get unique pointers for - // each of the optional fields. - v := v - switch k { - case "inuse": - nsp.InUse = v - case "orphan": - nsp.Orphan = &v - case "tw": - nsp.TW = &v - case "alloc": - nsp.Alloc = &v - case "mem": - nsp.Mem = &v - case "memory": - nsp.Memory = &v - } - } - - return nsp -} diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go deleted file mode 100644 index 6fcad20..0000000 --- a/vendor/github.com/prometheus/procfs/net_softnet.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "io/ioutil" - "strconv" - "strings" -) - -// For the proc file format details, -// see https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162 -// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810. - -// SoftnetEntry contains a single row of data from /proc/net/softnet_stat -type SoftnetEntry struct { - // Number of processed packets - Processed uint - // Number of dropped packets - Dropped uint - // Number of times processing packets ran out of quota - TimeSqueezed uint -} - -// GatherSoftnetStats reads /proc/net/softnet_stat, parse the relevant columns, -// and then return a slice of SoftnetEntry's. -func (fs FS) GatherSoftnetStats() ([]SoftnetEntry, error) { - data, err := ioutil.ReadFile(fs.proc.Path("net/softnet_stat")) - if err != nil { - return nil, fmt.Errorf("error reading softnet %s: %s", fs.proc.Path("net/softnet_stat"), err) - } - - return parseSoftnetEntries(data) -} - -func parseSoftnetEntries(data []byte) ([]SoftnetEntry, error) { - lines := strings.Split(string(data), "\n") - entries := make([]SoftnetEntry, 0) - var err error - const ( - expectedColumns = 11 - ) - for _, line := range lines { - columns := strings.Fields(line) - width := len(columns) - if width == 0 { - continue - } - if width != expectedColumns { - return []SoftnetEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedColumns) - } - var entry SoftnetEntry - if entry, err = parseSoftnetEntry(columns); err != nil { - return []SoftnetEntry{}, err - } - entries = append(entries, entry) - } - - return entries, nil -} - -func parseSoftnetEntry(columns []string) (SoftnetEntry, error) { - var err error - var processed, dropped, timeSqueezed uint64 - if processed, err = strconv.ParseUint(columns[0], 16, 32); err != nil { - return SoftnetEntry{}, fmt.Errorf("Unable to parse column 0: %s", err) - } - if dropped, err = strconv.ParseUint(columns[1], 16, 32); err != nil { - return SoftnetEntry{}, fmt.Errorf("Unable to parse column 1: %s", err) - } - if timeSqueezed, err = strconv.ParseUint(columns[2], 16, 32); err != nil { - return SoftnetEntry{}, fmt.Errorf("Unable to parse column 2: %s", err) - } - return SoftnetEntry{ - Processed: uint(processed), - Dropped: uint(dropped), - TimeSqueezed: uint(timeSqueezed), - }, nil -} diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go deleted file mode 100644 index 93bd58f..0000000 --- a/vendor/github.com/prometheus/procfs/net_unix.go +++ /dev/null @@ -1,271 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -// For the proc file format details, -// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815 -// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48. - -const ( - netUnixKernelPtrIdx = iota - netUnixRefCountIdx - _ - netUnixFlagsIdx - netUnixTypeIdx - netUnixStateIdx - netUnixInodeIdx - - // Inode and Path are optional. - netUnixStaticFieldsCnt = 6 -) - -const ( - netUnixTypeStream = 1 - netUnixTypeDgram = 2 - netUnixTypeSeqpacket = 5 - - netUnixFlagListen = 1 << 16 - - netUnixStateUnconnected = 1 - netUnixStateConnecting = 2 - netUnixStateConnected = 3 - netUnixStateDisconnected = 4 -) - -var errInvalidKernelPtrFmt = errors.New("Invalid Num(the kernel table slot number) format") - -// NetUnixType is the type of the type field. -type NetUnixType uint64 - -// NetUnixFlags is the type of the flags field. -type NetUnixFlags uint64 - -// NetUnixState is the type of the state field. -type NetUnixState uint64 - -// NetUnixLine represents a line of /proc/net/unix. -type NetUnixLine struct { - KernelPtr string - RefCount uint64 - Protocol uint64 - Flags NetUnixFlags - Type NetUnixType - State NetUnixState - Inode uint64 - Path string -} - -// NetUnix holds the data read from /proc/net/unix. -type NetUnix struct { - Rows []*NetUnixLine -} - -// NewNetUnix returns data read from /proc/net/unix. -func NewNetUnix() (*NetUnix, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return nil, err - } - - return fs.NewNetUnix() -} - -// NewNetUnix returns data read from /proc/net/unix. -func (fs FS) NewNetUnix() (*NetUnix, error) { - return NewNetUnixByPath(fs.proc.Path("net/unix")) -} - -// NewNetUnixByPath returns data read from /proc/net/unix by file path. -// It might returns an error with partial parsed data, if an error occur after some data parsed. -func NewNetUnixByPath(path string) (*NetUnix, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - return NewNetUnixByReader(f) -} - -// NewNetUnixByReader returns data read from /proc/net/unix by a reader. -// It might returns an error with partial parsed data, if an error occur after some data parsed. -func NewNetUnixByReader(reader io.Reader) (*NetUnix, error) { - nu := &NetUnix{ - Rows: make([]*NetUnixLine, 0, 32), - } - scanner := bufio.NewScanner(reader) - // Omit the header line. - scanner.Scan() - header := scanner.Text() - // From the man page of proc(5), it does not contain an Inode field, - // but in actually it exists. - // This code works for both cases. - hasInode := strings.Contains(header, "Inode") - - minFieldsCnt := netUnixStaticFieldsCnt - if hasInode { - minFieldsCnt++ - } - for scanner.Scan() { - line := scanner.Text() - item, err := nu.parseLine(line, hasInode, minFieldsCnt) - if err != nil { - return nu, err - } - nu.Rows = append(nu.Rows, item) - } - - return nu, scanner.Err() -} - -func (u *NetUnix) parseLine(line string, hasInode bool, minFieldsCnt int) (*NetUnixLine, error) { - fields := strings.Fields(line) - fieldsLen := len(fields) - if fieldsLen < minFieldsCnt { - return nil, fmt.Errorf( - "Parse Unix domain failed: expect at least %d fields but got %d", - minFieldsCnt, fieldsLen) - } - kernelPtr, err := u.parseKernelPtr(fields[netUnixKernelPtrIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain num(%s) failed: %s", fields[netUnixKernelPtrIdx], err) - } - users, err := u.parseUsers(fields[netUnixRefCountIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain ref count(%s) failed: %s", fields[netUnixRefCountIdx], err) - } - flags, err := u.parseFlags(fields[netUnixFlagsIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain flags(%s) failed: %s", fields[netUnixFlagsIdx], err) - } - typ, err := u.parseType(fields[netUnixTypeIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain type(%s) failed: %s", fields[netUnixTypeIdx], err) - } - state, err := u.parseState(fields[netUnixStateIdx]) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain state(%s) failed: %s", fields[netUnixStateIdx], err) - } - var inode uint64 - if hasInode { - inodeStr := fields[netUnixInodeIdx] - inode, err = u.parseInode(inodeStr) - if err != nil { - return nil, fmt.Errorf("Parse Unix domain inode(%s) failed: %s", inodeStr, err) - } - } - - nuLine := &NetUnixLine{ - KernelPtr: kernelPtr, - RefCount: users, - Type: typ, - Flags: flags, - State: state, - Inode: inode, - } - - // Path field is optional. - if fieldsLen > minFieldsCnt { - pathIdx := netUnixInodeIdx + 1 - if !hasInode { - pathIdx-- - } - nuLine.Path = fields[pathIdx] - } - - return nuLine, nil -} - -func (u NetUnix) parseKernelPtr(str string) (string, error) { - if !strings.HasSuffix(str, ":") { - return "", errInvalidKernelPtrFmt - } - return str[:len(str)-1], nil -} - -func (u NetUnix) parseUsers(hexStr string) (uint64, error) { - return strconv.ParseUint(hexStr, 16, 32) -} - -func (u NetUnix) parseType(hexStr string) (NetUnixType, error) { - typ, err := strconv.ParseUint(hexStr, 16, 16) - if err != nil { - return 0, err - } - return NetUnixType(typ), nil -} - -func (u NetUnix) parseFlags(hexStr string) (NetUnixFlags, error) { - flags, err := strconv.ParseUint(hexStr, 16, 32) - if err != nil { - return 0, err - } - return NetUnixFlags(flags), nil -} - -func (u NetUnix) parseState(hexStr string) (NetUnixState, error) { - st, err := strconv.ParseInt(hexStr, 16, 8) - if err != nil { - return 0, err - } - return NetUnixState(st), nil -} - -func (u NetUnix) parseInode(inodeStr string) (uint64, error) { - return strconv.ParseUint(inodeStr, 10, 64) -} - -func (t NetUnixType) String() string { - switch t { - case netUnixTypeStream: - return "stream" - case netUnixTypeDgram: - return "dgram" - case netUnixTypeSeqpacket: - return "seqpacket" - } - return "unknown" -} - -func (f NetUnixFlags) String() string { - switch f { - case netUnixFlagListen: - return "listen" - default: - return "default" - } -} - -func (s NetUnixState) String() string { - switch s { - case netUnixStateUnconnected: - return "unconnected" - case netUnixStateConnecting: - return "connecting" - case netUnixStateConnected: - return "connected" - case netUnixStateDisconnected: - return "disconnected" - } - return "unknown" -} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go deleted file mode 100644 index 330e472..0000000 --- a/vendor/github.com/prometheus/procfs/proc.go +++ /dev/null @@ -1,298 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// Proc provides information about a running process. -type Proc struct { - // The process ID. - PID int - - fs fs.FS -} - -// Procs represents a list of Proc structs. -type Procs []Proc - -func (p Procs) Len() int { return len(p) } -func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } - -// Self returns a process for the current process read via /proc/self. -func Self() (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Self() -} - -// NewProc returns a process for the given pid under /proc. -func NewProc(pid int) (Proc, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// AllProcs returns a list of all currently available processes under /proc. -func AllProcs() (Procs, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return Procs{}, err - } - return fs.AllProcs() -} - -// Self returns a process for the current process. -func (fs FS) Self() (Proc, error) { - p, err := os.Readlink(fs.proc.Path("self")) - if err != nil { - return Proc{}, err - } - pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1)) - if err != nil { - return Proc{}, err - } - return fs.Proc(pid) -} - -// NewProc returns a process for the given pid. -// -// Deprecated: use fs.Proc() instead -func (fs FS) NewProc(pid int) (Proc, error) { - return fs.Proc(pid) -} - -// Proc returns a process for the given pid. -func (fs FS) Proc(pid int) (Proc, error) { - if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil { - return Proc{}, err - } - return Proc{PID: pid, fs: fs.proc}, nil -} - -// AllProcs returns a list of all currently available processes. -func (fs FS) AllProcs() (Procs, error) { - d, err := os.Open(fs.proc.Path()) - if err != nil { - return Procs{}, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - p := Procs{} - for _, n := range names { - pid, err := strconv.ParseInt(n, 10, 64) - if err != nil { - continue - } - p = append(p, Proc{PID: int(pid), fs: fs.proc}) - } - - return p, nil -} - -// CmdLine returns the command line of a process. -func (p Proc) CmdLine() ([]string, error) { - data, err := util.ReadFileNoStat(p.path("cmdline")) - if err != nil { - return nil, err - } - - if len(data) < 1 { - return []string{}, nil - } - - return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil -} - -// Comm returns the command name of a process. -func (p Proc) Comm() (string, error) { - data, err := util.ReadFileNoStat(p.path("comm")) - if err != nil { - return "", err - } - - return strings.TrimSpace(string(data)), nil -} - -// Executable returns the absolute path of the executable command of a process. -func (p Proc) Executable() (string, error) { - exe, err := os.Readlink(p.path("exe")) - if os.IsNotExist(err) { - return "", nil - } - - return exe, err -} - -// Cwd returns the absolute path to the current working directory of the process. -func (p Proc) Cwd() (string, error) { - wd, err := os.Readlink(p.path("cwd")) - if os.IsNotExist(err) { - return "", nil - } - - return wd, err -} - -// RootDir returns the absolute path to the process's root directory (as set by chroot) -func (p Proc) RootDir() (string, error) { - rdir, err := os.Readlink(p.path("root")) - if os.IsNotExist(err) { - return "", nil - } - - return rdir, err -} - -// FileDescriptors returns the currently open file descriptors of a process. -func (p Proc) FileDescriptors() ([]uintptr, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - fds := make([]uintptr, len(names)) - for i, n := range names { - fd, err := strconv.ParseInt(n, 10, 32) - if err != nil { - return nil, fmt.Errorf("could not parse fd %s: %s", n, err) - } - fds[i] = uintptr(fd) - } - - return fds, nil -} - -// FileDescriptorTargets returns the targets of all file descriptors of a process. -// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. -func (p Proc) FileDescriptorTargets() ([]string, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - targets := make([]string, len(names)) - - for i, name := range names { - target, err := os.Readlink(p.path("fd", name)) - if err == nil { - targets[i] = target - } - } - - return targets, nil -} - -// FileDescriptorsLen returns the number of currently open file descriptors of -// a process. -func (p Proc) FileDescriptorsLen() (int, error) { - fds, err := p.fileDescriptors() - if err != nil { - return 0, err - } - - return len(fds), nil -} - -// MountStats retrieves statistics and configuration for mount points in a -// process's namespace. -func (p Proc) MountStats() ([]*Mount, error) { - f, err := os.Open(p.path("mountstats")) - if err != nil { - return nil, err - } - defer f.Close() - - return parseMountStats(f) -} - -// MountInfo retrieves mount information for mount points in a -// process's namespace. -// It supplies information missing in `/proc/self/mounts` and -// fixes various other problems with that file too. -func (p Proc) MountInfo() ([]*MountInfo, error) { - data, err := util.ReadFileNoStat(p.path("mountinfo")) - if err != nil { - return nil, err - } - return parseMountInfo(data) -} - -func (p Proc) fileDescriptors() ([]string, error) { - d, err := os.Open(p.path("fd")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) - } - - return names, nil -} - -func (p Proc) path(pa ...string) string { - return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) -} - -// FileDescriptorsInfo retrieves information about all file descriptors of -// the process. -func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) { - names, err := p.fileDescriptors() - if err != nil { - return nil, err - } - - var fdinfos ProcFDInfos - - for _, n := range names { - fdinfo, err := p.FDInfo(n) - if err != nil { - continue - } - fdinfos = append(fdinfos, *fdinfo) - } - - return fdinfos, nil -} - -// Schedstat returns task scheduling information for the process. -func (p Proc) Schedstat() (ProcSchedstat, error) { - contents, err := ioutil.ReadFile(p.path("schedstat")) - if err != nil { - return ProcSchedstat{}, err - } - return parseProcSchedstat(string(contents)) -} diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go deleted file mode 100644 index 6134b35..0000000 --- a/vendor/github.com/prometheus/procfs/proc_environ.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Environ reads process environments from /proc//environ -func (p Proc) Environ() ([]string, error) { - environments := make([]string, 0) - - data, err := util.ReadFileNoStat(p.path("environ")) - if err != nil { - return environments, err - } - - environments = strings.Split(string(data), "\000") - if len(environments) > 0 { - environments = environments[:len(environments)-1] - } - - return environments, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go deleted file mode 100644 index 4e7597f..0000000 --- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "regexp" - - "github.com/prometheus/procfs/internal/util" -) - -// Regexp variables -var ( - rPos = regexp.MustCompile(`^pos:\s+(\d+)$`) - rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`) - rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`) - rInotify = regexp.MustCompile(`^inotify`) -) - -// ProcFDInfo contains represents file descriptor information. -type ProcFDInfo struct { - // File descriptor - FD string - // File offset - Pos string - // File access mode and status flags - Flags string - // Mount point ID - MntID string - // List of inotify lines (structed) in the fdinfo file (kernel 3.8+ only) - InotifyInfos []InotifyInfo -} - -// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty. -func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) { - data, err := util.ReadFileNoStat(p.path("fdinfo", fd)) - if err != nil { - return nil, err - } - - var text, pos, flags, mntid string - var inotify []InotifyInfo - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - text = scanner.Text() - if rPos.MatchString(text) { - pos = rPos.FindStringSubmatch(text)[1] - } else if rFlags.MatchString(text) { - flags = rFlags.FindStringSubmatch(text)[1] - } else if rMntID.MatchString(text) { - mntid = rMntID.FindStringSubmatch(text)[1] - } else if rInotify.MatchString(text) { - newInotify, err := parseInotifyInfo(text) - if err != nil { - return nil, err - } - inotify = append(inotify, *newInotify) - } - } - - i := &ProcFDInfo{ - FD: fd, - Pos: pos, - Flags: flags, - MntID: mntid, - InotifyInfos: inotify, - } - - return i, nil -} - -// InotifyInfo represents a single inotify line in the fdinfo file. -type InotifyInfo struct { - // Watch descriptor number - WD string - // Inode number - Ino string - // Device ID - Sdev string - // Mask of events being monitored - Mask string -} - -// InotifyInfo constructor. Only available on kernel 3.8+. -func parseInotifyInfo(line string) (*InotifyInfo, error) { - r := regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)\s+mask:([0-9a-f]+)`) - m := r.FindStringSubmatch(line) - i := &InotifyInfo{ - WD: m[1], - Ino: m[2], - Sdev: m[3], - Mask: m[4], - } - return i, nil -} - -// ProcFDInfos represents a list of ProcFDInfo structs. -type ProcFDInfos []ProcFDInfo - -func (p ProcFDInfos) Len() int { return len(p) } -func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD } - -// InotifyWatchLen returns the total number of inotify watches -func (p ProcFDInfos) InotifyWatchLen() (int, error) { - length := 0 - for _, f := range p { - length += len(f.InotifyInfos) - } - - return length, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go deleted file mode 100644 index 776f349..0000000 --- a/vendor/github.com/prometheus/procfs/proc_io.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcIO models the content of /proc//io. -type ProcIO struct { - // Chars read. - RChar uint64 - // Chars written. - WChar uint64 - // Read syscalls. - SyscR uint64 - // Write syscalls. - SyscW uint64 - // Bytes read. - ReadBytes uint64 - // Bytes written. - WriteBytes uint64 - // Bytes written, but taking into account truncation. See - // Documentation/filesystems/proc.txt in the kernel sources for - // detailed explanation. - CancelledWriteBytes int64 -} - -// IO creates a new ProcIO instance from a given Proc instance. -func (p Proc) IO() (ProcIO, error) { - pio := ProcIO{} - - data, err := util.ReadFileNoStat(p.path("io")) - if err != nil { - return pio, err - } - - ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + - "read_bytes: %d\nwrite_bytes: %d\n" + - "cancelled_write_bytes: %d\n" - - _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, - &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) - - return pio, err -} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go deleted file mode 100644 index 91ee24d..0000000 --- a/vendor/github.com/prometheus/procfs/proc_limits.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strconv" -) - -// ProcLimits represents the soft limits for each of the process's resource -// limits. For more information see getrlimit(2): -// http://man7.org/linux/man-pages/man2/getrlimit.2.html. -type ProcLimits struct { - // CPU time limit in seconds. - CPUTime int64 - // Maximum size of files that the process may create. - FileSize int64 - // Maximum size of the process's data segment (initialized data, - // uninitialized data, and heap). - DataSize int64 - // Maximum size of the process stack in bytes. - StackSize int64 - // Maximum size of a core file. - CoreFileSize int64 - // Limit of the process's resident set in pages. - ResidentSet int64 - // Maximum number of processes that can be created for the real user ID of - // the calling process. - Processes int64 - // Value one greater than the maximum file descriptor number that can be - // opened by this process. - OpenFiles int64 - // Maximum number of bytes of memory that may be locked into RAM. - LockedMemory int64 - // Maximum size of the process's virtual memory address space in bytes. - AddressSpace int64 - // Limit on the combined number of flock(2) locks and fcntl(2) leases that - // this process may establish. - FileLocks int64 - // Limit of signals that may be queued for the real user ID of the calling - // process. - PendingSignals int64 - // Limit on the number of bytes that can be allocated for POSIX message - // queues for the real user ID of the calling process. - MsqqueueSize int64 - // Limit of the nice priority set using setpriority(2) or nice(2). - NicePriority int64 - // Limit of the real-time priority set using sched_setscheduler(2) or - // sched_setparam(2). - RealtimePriority int64 - // Limit (in microseconds) on the amount of CPU time that a process - // scheduled under a real-time scheduling policy may consume without making - // a blocking system call. - RealtimeTimeout int64 -} - -const ( - limitsFields = 3 - limitsUnlimited = "unlimited" -) - -var ( - limitsDelimiter = regexp.MustCompile(" +") -) - -// NewLimits returns the current soft limits of the process. -// -// Deprecated: use p.Limits() instead -func (p Proc) NewLimits() (ProcLimits, error) { - return p.Limits() -} - -// Limits returns the current soft limits of the process. -func (p Proc) Limits() (ProcLimits, error) { - f, err := os.Open(p.path("limits")) - if err != nil { - return ProcLimits{}, err - } - defer f.Close() - - var ( - l = ProcLimits{} - s = bufio.NewScanner(f) - ) - for s.Scan() { - fields := limitsDelimiter.Split(s.Text(), limitsFields) - if len(fields) != limitsFields { - return ProcLimits{}, fmt.Errorf( - "couldn't parse %s line %s", f.Name(), s.Text()) - } - - switch fields[0] { - case "Max cpu time": - l.CPUTime, err = parseInt(fields[1]) - case "Max file size": - l.FileSize, err = parseInt(fields[1]) - case "Max data size": - l.DataSize, err = parseInt(fields[1]) - case "Max stack size": - l.StackSize, err = parseInt(fields[1]) - case "Max core file size": - l.CoreFileSize, err = parseInt(fields[1]) - case "Max resident set": - l.ResidentSet, err = parseInt(fields[1]) - case "Max processes": - l.Processes, err = parseInt(fields[1]) - case "Max open files": - l.OpenFiles, err = parseInt(fields[1]) - case "Max locked memory": - l.LockedMemory, err = parseInt(fields[1]) - case "Max address space": - l.AddressSpace, err = parseInt(fields[1]) - case "Max file locks": - l.FileLocks, err = parseInt(fields[1]) - case "Max pending signals": - l.PendingSignals, err = parseInt(fields[1]) - case "Max msgqueue size": - l.MsqqueueSize, err = parseInt(fields[1]) - case "Max nice priority": - l.NicePriority, err = parseInt(fields[1]) - case "Max realtime priority": - l.RealtimePriority, err = parseInt(fields[1]) - case "Max realtime timeout": - l.RealtimeTimeout, err = parseInt(fields[1]) - } - if err != nil { - return ProcLimits{}, err - } - } - - return l, s.Err() -} - -func parseInt(s string) (int64, error) { - if s == limitsUnlimited { - return -1, nil - } - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) - } - return i, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go deleted file mode 100644 index c66740f..0000000 --- a/vendor/github.com/prometheus/procfs/proc_ns.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "fmt" - "os" - "strconv" - "strings" -) - -// Namespace represents a single namespace of a process. -type Namespace struct { - Type string // Namespace type. - Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. -} - -// Namespaces contains all of the namespaces that the process is contained in. -type Namespaces map[string]Namespace - -// Namespaces reads from /proc//ns/* to get the namespaces of which the -// process is a member. -func (p Proc) Namespaces() (Namespaces, error) { - d, err := os.Open(p.path("ns")) - if err != nil { - return nil, err - } - defer d.Close() - - names, err := d.Readdirnames(-1) - if err != nil { - return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) - } - - ns := make(Namespaces, len(names)) - for _, name := range names { - target, err := os.Readlink(p.path("ns", name)) - if err != nil { - return nil, err - } - - fields := strings.SplitN(target, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) - } - - typ := fields[0] - inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) - if err != nil { - return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) - } - - ns[name] = Namespace{typ, uint32(inode)} - } - - return ns, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go deleted file mode 100644 index 0d7bee5..0000000 --- a/vendor/github.com/prometheus/procfs/proc_psi.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -// The PSI / pressure interface is described at -// https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt -// Each resource (cpu, io, memory, ...) is exposed as a single file. -// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure. -// Each line contains several averages (over n seconds) and a total in µs. -// -// Example io pressure file: -// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362 -// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134 - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d" - -// PSILine is a single line of values as returned by /proc/pressure/* -// The Avg entries are averages over n seconds, as a percentage -// The Total line is in microseconds -type PSILine struct { - Avg10 float64 - Avg60 float64 - Avg300 float64 - Total uint64 -} - -// PSIStats represent pressure stall information from /proc/pressure/* -// Some indicates the share of time in which at least some tasks are stalled -// Full indicates the share of time in which all non-idle tasks are stalled simultaneously -type PSIStats struct { - Some *PSILine - Full *PSILine -} - -// PSIStatsForResource reads pressure stall information for the specified -// resource from /proc/pressure/. At time of writing this can be -// either "cpu", "memory" or "io". -func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) { - data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource))) - if err != nil { - return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %s", resource) - } - - return parsePSIStats(resource, bytes.NewReader(data)) -} - -// parsePSIStats parses the specified file for pressure stall information -func parsePSIStats(resource string, r io.Reader) (PSIStats, error) { - psiStats := PSIStats{} - - scanner := bufio.NewScanner(r) - for scanner.Scan() { - l := scanner.Text() - prefix := strings.Split(l, " ")[0] - switch prefix { - case "some": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Some = &psi - case "full": - psi := PSILine{} - _, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total) - if err != nil { - return PSIStats{}, err - } - psiStats.Full = &psi - default: - // If we encounter a line with an unknown prefix, ignore it and move on - // Should new measurement types be added in the future we'll simply ignore them instead - // of erroring on retrieval - continue - } - } - - return psiStats, nil -} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go deleted file mode 100644 index 4517d2e..0000000 --- a/vendor/github.com/prometheus/procfs/proc_stat.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "fmt" - "os" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// Originally, this USER_HZ value was dynamically retrieved via a sysconf call -// which required cgo. However, that caused a lot of problems regarding -// cross-compilation. Alternatives such as running a binary to determine the -// value, or trying to derive it in some other way were all problematic. After -// much research it was determined that USER_HZ is actually hardcoded to 100 on -// all Go-supported platforms as of the time of this writing. This is why we -// decided to hardcode it here as well. It is not impossible that there could -// be systems with exceptions, but they should be very exotic edge cases, and -// in that case, the worst outcome will be two misreported metrics. -// -// See also the following discussions: -// -// - https://github.com/prometheus/node_exporter/issues/52 -// - https://github.com/prometheus/procfs/pull/2 -// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue -const userHZ = 100 - -// ProcStat provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStat struct { - // The process ID. - PID int - // The filename of the executable. - Comm string - // The process state. - State string - // The PID of the parent of this process. - PPID int - // The process group ID of the process. - PGRP int - // The session ID of the process. - Session int - // The controlling terminal of the process. - TTY int - // The ID of the foreground process group of the controlling terminal of - // the process. - TPGID int - // The kernel flags word of the process. - Flags uint - // The number of minor faults the process has made which have not required - // loading a memory page from disk. - MinFlt uint - // The number of minor faults that the process's waited-for children have - // made. - CMinFlt uint - // The number of major faults the process has made which have required - // loading a memory page from disk. - MajFlt uint - // The number of major faults that the process's waited-for children have - // made. - CMajFlt uint - // Amount of time that this process has been scheduled in user mode, - // measured in clock ticks. - UTime uint - // Amount of time that this process has been scheduled in kernel mode, - // measured in clock ticks. - STime uint - // Amount of time that this process's waited-for children have been - // scheduled in user mode, measured in clock ticks. - CUTime uint - // Amount of time that this process's waited-for children have been - // scheduled in kernel mode, measured in clock ticks. - CSTime uint - // For processes running a real-time scheduling policy, this is the negated - // scheduling priority, minus one. - Priority int - // The nice value, a value in the range 19 (low priority) to -20 (high - // priority). - Nice int - // Number of threads in this process. - NumThreads int - // The time the process started after system boot, the value is expressed - // in clock ticks. - Starttime uint64 - // Virtual memory size in bytes. - VSize uint - // Resident set size in pages. - RSS int - - proc fs.FS -} - -// NewStat returns the current status information of the process. -// -// Deprecated: use p.Stat() instead -func (p Proc) NewStat() (ProcStat, error) { - return p.Stat() -} - -// Stat returns the current status information of the process. -func (p Proc) Stat() (ProcStat, error) { - data, err := util.ReadFileNoStat(p.path("stat")) - if err != nil { - return ProcStat{}, err - } - - var ( - ignore int - - s = ProcStat{PID: p.PID, proc: p.fs} - l = bytes.Index(data, []byte("(")) - r = bytes.LastIndex(data, []byte(")")) - ) - - if l < 0 || r < 0 { - return ProcStat{}, fmt.Errorf( - "unexpected format, couldn't extract comm: %s", - data, - ) - } - - s.Comm = string(data[l+1 : r]) - _, err = fmt.Fscan( - bytes.NewBuffer(data[r+2:]), - &s.State, - &s.PPID, - &s.PGRP, - &s.Session, - &s.TTY, - &s.TPGID, - &s.Flags, - &s.MinFlt, - &s.CMinFlt, - &s.MajFlt, - &s.CMajFlt, - &s.UTime, - &s.STime, - &s.CUTime, - &s.CSTime, - &s.Priority, - &s.Nice, - &s.NumThreads, - &ignore, - &s.Starttime, - &s.VSize, - &s.RSS, - ) - if err != nil { - return ProcStat{}, err - } - - return s, nil -} - -// VirtualMemory returns the virtual memory size in bytes. -func (s ProcStat) VirtualMemory() uint { - return s.VSize -} - -// ResidentMemory returns the resident memory size in bytes. -func (s ProcStat) ResidentMemory() int { - return s.RSS * os.Getpagesize() -} - -// StartTime returns the unix timestamp of the process in seconds. -func (s ProcStat) StartTime() (float64, error) { - fs := FS{proc: s.proc} - stat, err := fs.Stat() - if err != nil { - return 0, err - } - return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil -} - -// CPUTime returns the total CPU user and system time in seconds. -func (s ProcStat) CPUTime() float64 { - return float64(s.UTime+s.STime) / userHZ -} diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go deleted file mode 100644 index e30c2b8..0000000 --- a/vendor/github.com/prometheus/procfs/proc_status.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bytes" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// ProcStatus provides status information about the process, -// read from /proc/[pid]/stat. -type ProcStatus struct { - // The process ID. - PID int - // The process name. - Name string - - // Thread group ID. - TGID int - - // Peak virtual memory size. - VmPeak uint64 - // Virtual memory size. - VmSize uint64 - // Locked memory size. - VmLck uint64 - // Pinned memory size. - VmPin uint64 - // Peak resident set size. - VmHWM uint64 - // Resident set size (sum of RssAnnon RssFile and RssShmem). - VmRSS uint64 - // Size of resident anonymous memory. - RssAnon uint64 - // Size of resident file mappings. - RssFile uint64 - // Size of resident shared memory. - RssShmem uint64 - // Size of data segments. - VmData uint64 - // Size of stack segments. - VmStk uint64 - // Size of text segments. - VmExe uint64 - // Shared library code size. - VmLib uint64 - // Page table entries size. - VmPTE uint64 - // Size of second-level page tables. - VmPMD uint64 - // Swapped-out virtual memory size by anonymous private. - VmSwap uint64 - // Size of hugetlb memory portions - HugetlbPages uint64 - - // Number of voluntary context switches. - VoluntaryCtxtSwitches uint64 - // Number of involuntary context switches. - NonVoluntaryCtxtSwitches uint64 -} - -// NewStatus returns the current status information of the process. -func (p Proc) NewStatus() (ProcStatus, error) { - data, err := util.ReadFileNoStat(p.path("status")) - if err != nil { - return ProcStatus{}, err - } - - s := ProcStatus{PID: p.PID} - - lines := strings.Split(string(data), "\n") - for _, line := range lines { - if !bytes.Contains([]byte(line), []byte(":")) { - continue - } - - kv := strings.SplitN(line, ":", 2) - - // removes spaces - k := string(strings.TrimSpace(kv[0])) - v := string(strings.TrimSpace(kv[1])) - // removes "kB" - v = string(bytes.Trim([]byte(v), " kB")) - - // value to int when possible - // we can skip error check here, 'cause vKBytes is not used when value is a string - vKBytes, _ := strconv.ParseUint(v, 10, 64) - // convert kB to B - vBytes := vKBytes * 1024 - - s.fillStatus(k, v, vKBytes, vBytes) - } - - return s, nil -} - -func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) { - switch k { - case "Tgid": - s.TGID = int(vUint) - case "Name": - s.Name = vString - case "VmPeak": - s.VmPeak = vUintBytes - case "VmSize": - s.VmSize = vUintBytes - case "VmLck": - s.VmLck = vUintBytes - case "VmPin": - s.VmPin = vUintBytes - case "VmHWM": - s.VmHWM = vUintBytes - case "VmRSS": - s.VmRSS = vUintBytes - case "RssAnon": - s.RssAnon = vUintBytes - case "RssFile": - s.RssFile = vUintBytes - case "RssShmem": - s.RssShmem = vUintBytes - case "VmData": - s.VmData = vUintBytes - case "VmStk": - s.VmStk = vUintBytes - case "VmExe": - s.VmExe = vUintBytes - case "VmLib": - s.VmLib = vUintBytes - case "VmPTE": - s.VmPTE = vUintBytes - case "VmPMD": - s.VmPMD = vUintBytes - case "VmSwap": - s.VmSwap = vUintBytes - case "HugetlbPages": - s.HugetlbPages = vUintBytes - case "voluntary_ctxt_switches": - s.VoluntaryCtxtSwitches = vUint - case "nonvoluntary_ctxt_switches": - s.NonVoluntaryCtxtSwitches = vUint - } -} - -// TotalCtxtSwitches returns the total context switch. -func (s ProcStatus) TotalCtxtSwitches() uint64 { - return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches -} diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go deleted file mode 100644 index a4c4089..0000000 --- a/vendor/github.com/prometheus/procfs/schedstat.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "errors" - "os" - "regexp" - "strconv" -) - -var ( - cpuLineRE = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`) - procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`) -) - -// Schedstat contains scheduler statistics from /proc/schedstat -// -// See -// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt -// for a detailed description of what these numbers mean. -// -// Note the current kernel documentation claims some of the time units are in -// jiffies when they are actually in nanoseconds since 2.6.23 with the -// introduction of CFS. A fix to the documentation is pending. See -// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473 -type Schedstat struct { - CPUs []*SchedstatCPU -} - -// SchedstatCPU contains the values from one "cpu" line -type SchedstatCPU struct { - CPUNum string - - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// ProcSchedstat contains the values from /proc//schedstat -type ProcSchedstat struct { - RunningNanoseconds uint64 - WaitingNanoseconds uint64 - RunTimeslices uint64 -} - -// Schedstat reads data from /proc/schedstat -func (fs FS) Schedstat() (*Schedstat, error) { - file, err := os.Open(fs.proc.Path("schedstat")) - if err != nil { - return nil, err - } - defer file.Close() - - stats := &Schedstat{} - scanner := bufio.NewScanner(file) - - for scanner.Scan() { - match := cpuLineRE.FindStringSubmatch(scanner.Text()) - if match != nil { - cpu := &SchedstatCPU{} - cpu.CPUNum = match[1] - - cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64) - if err != nil { - continue - } - - cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64) - if err != nil { - continue - } - - cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64) - if err != nil { - continue - } - - stats.CPUs = append(stats.CPUs, cpu) - } - } - - return stats, nil -} - -func parseProcSchedstat(contents string) (stats ProcSchedstat, err error) { - match := procLineRE.FindStringSubmatch(contents) - - if match != nil { - stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64) - if err != nil { - return - } - - stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64) - if err != nil { - return - } - - stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64) - return - } - - err = errors.New("could not parse schedstat") - return -} diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go deleted file mode 100644 index b2a6fc9..0000000 --- a/vendor/github.com/prometheus/procfs/stat.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - - "github.com/prometheus/procfs/internal/fs" - "github.com/prometheus/procfs/internal/util" -) - -// CPUStat shows how much time the cpu spend in various stages. -type CPUStat struct { - User float64 - Nice float64 - System float64 - Idle float64 - Iowait float64 - IRQ float64 - SoftIRQ float64 - Steal float64 - Guest float64 - GuestNice float64 -} - -// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. -// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html -// It is possible to get per-cpu stats by reading /proc/softirqs -type SoftIRQStat struct { - Hi uint64 - Timer uint64 - NetTx uint64 - NetRx uint64 - Block uint64 - BlockIoPoll uint64 - Tasklet uint64 - Sched uint64 - Hrtimer uint64 - Rcu uint64 -} - -// Stat represents kernel/system statistics. -type Stat struct { - // Boot time in seconds since the Epoch. - BootTime uint64 - // Summed up cpu statistics. - CPUTotal CPUStat - // Per-CPU statistics. - CPU []CPUStat - // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. - IRQTotal uint64 - // Number of times a numbered IRQ was triggered. - IRQ []uint64 - // Number of times a context switch happened. - ContextSwitches uint64 - // Number of times a process was created. - ProcessCreated uint64 - // Number of processes currently running. - ProcessesRunning uint64 - // Number of processes currently blocked (waiting for IO). - ProcessesBlocked uint64 - // Number of times a softirq was scheduled. - SoftIRQTotal uint64 - // Detailed softirq statistics. - SoftIRQ SoftIRQStat -} - -// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). -func parseCPUStat(line string) (CPUStat, int64, error) { - cpuStat := CPUStat{} - var cpu string - - count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", - &cpu, - &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, - &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, - &cpuStat.Guest, &cpuStat.GuestNice) - - if err != nil && err != io.EOF { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) - } - if count == 0 { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) - } - - cpuStat.User /= userHZ - cpuStat.Nice /= userHZ - cpuStat.System /= userHZ - cpuStat.Idle /= userHZ - cpuStat.Iowait /= userHZ - cpuStat.IRQ /= userHZ - cpuStat.SoftIRQ /= userHZ - cpuStat.Steal /= userHZ - cpuStat.Guest /= userHZ - cpuStat.GuestNice /= userHZ - - if cpu == "cpu" { - return cpuStat, -1, nil - } - - cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) - if err != nil { - return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) - } - - return cpuStat, cpuID, nil -} - -// Parse a softirq line. -func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { - softIRQStat := SoftIRQStat{} - var total uint64 - var prefix string - - _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", - &prefix, &total, - &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, - &softIRQStat.Block, &softIRQStat.BlockIoPoll, - &softIRQStat.Tasklet, &softIRQStat.Sched, - &softIRQStat.Hrtimer, &softIRQStat.Rcu) - - if err != nil { - return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) - } - - return softIRQStat, total, nil -} - -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: use fs.Stat() instead -func NewStat() (Stat, error) { - fs, err := NewFS(fs.DefaultProcMountPoint) - if err != nil { - return Stat{}, err - } - return fs.Stat() -} - -// NewStat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -// -// Deprecated: use fs.Stat() instead -func (fs FS) NewStat() (Stat, error) { - return fs.Stat() -} - -// Stat returns information about current cpu/process statistics. -// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt -func (fs FS) Stat() (Stat, error) { - fileName := fs.proc.Path("stat") - data, err := util.ReadFileNoStat(fileName) - if err != nil { - return Stat{}, err - } - - stat := Stat{} - - scanner := bufio.NewScanner(bytes.NewReader(data)) - for scanner.Scan() { - line := scanner.Text() - parts := strings.Fields(scanner.Text()) - // require at least - if len(parts) < 2 { - continue - } - switch { - case parts[0] == "btime": - if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) - } - case parts[0] == "intr": - if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) - } - numberedIRQs := parts[2:] - stat.IRQ = make([]uint64, len(numberedIRQs)) - for i, count := range numberedIRQs { - if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) - } - } - case parts[0] == "ctxt": - if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) - } - case parts[0] == "processes": - if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) - } - case parts[0] == "procs_running": - if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) - } - case parts[0] == "procs_blocked": - if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) - } - case parts[0] == "softirq": - softIRQStats, total, err := parseSoftIRQStat(line) - if err != nil { - return Stat{}, err - } - stat.SoftIRQTotal = total - stat.SoftIRQ = softIRQStats - case strings.HasPrefix(parts[0], "cpu"): - cpuStat, cpuID, err := parseCPUStat(line) - if err != nil { - return Stat{}, err - } - if cpuID == -1 { - stat.CPUTotal = cpuStat - } else { - for int64(len(stat.CPU)) <= cpuID { - stat.CPU = append(stat.CPU, CPUStat{}) - } - stat.CPU[cpuID] = cpuStat - } - } - } - - if err := scanner.Err(); err != nil { - return Stat{}, fmt.Errorf("couldn't parse %s: %s", fileName, err) - } - - return stat, nil -} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar deleted file mode 100644 index 19ef02b..0000000 --- a/vendor/github.com/prometheus/procfs/ttar +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/env bash - -# Purpose: plain text tar format -# Limitations: - only suitable for text files, directories, and symlinks -# - stores only filename, content, and mode -# - not designed for untrusted input -# -# Note: must work with bash version 3.2 (macOS) - -# Copyright 2017 Roger Luethi -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -o errexit -o nounset - -# Sanitize environment (for instance, standard sorting of glob matches) -export LC_ALL=C - -path="" -CMD="" -ARG_STRING="$*" - -#------------------------------------------------------------------------------ -# Not all sed implementations can work on null bytes. In order to make ttar -# work out of the box on macOS, use Python as a stream editor. - -USE_PYTHON=0 - -PYTHON_CREATE_FILTER=$(cat << 'PCF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'EOF', r'\EOF', line) - line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) - line = re.sub('\x00', r'NULLBYTE', line) - sys.stdout.write(line) -PCF -) - -PYTHON_EXTRACT_FILTER=$(cat << 'PEF' -#!/usr/bin/env python - -import re -import sys - -for line in sys.stdin: - line = re.sub(r'(?/dev/null; then - echo "ERROR Python not found. Aborting." - exit 2 - fi - USE_PYTHON=1 - fi -} - -#------------------------------------------------------------------------------ - -function usage { - bname=$(basename "$0") - cat << USAGE -Usage: $bname [-C ] -c -f (create archive) - $bname -t -f (list archive contents) - $bname [-C ] -x -f (extract archive) - -Options: - -C (change directory) - -v (verbose) - --recursive-unlink (recursively delete existing directory if path - collides with file or directory to extract) - -Example: Change to sysfs directory, create ttar file from fixtures directory - $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ -USAGE -exit "$1" -} - -function vecho { - if [ "${VERBOSE:-}" == "yes" ]; then - echo >&7 "$@" - fi -} - -function set_cmd { - if [ -n "$CMD" ]; then - echo "ERROR: more than one command given" - echo - usage 2 - fi - CMD=$1 -} - -unset VERBOSE -unset RECURSIVE_UNLINK - -while getopts :cf:-:htxvC: opt; do - case $opt in - c) - set_cmd "create" - ;; - f) - ARCHIVE=$OPTARG - ;; - h) - usage 0 - ;; - t) - set_cmd "list" - ;; - x) - set_cmd "extract" - ;; - v) - VERBOSE=yes - exec 7>&1 - ;; - C) - CDIR=$OPTARG - ;; - -) - case $OPTARG in - recursive-unlink) - RECURSIVE_UNLINK="yes" - ;; - *) - echo -e "Error: invalid option -$OPTARG" - echo - usage 1 - ;; - esac - ;; - *) - echo >&2 "ERROR: invalid option -$OPTARG" - echo - usage 1 - ;; - esac -done - -# Remove processed options from arguments -shift $(( OPTIND - 1 )); - -if [ "${CMD:-}" == "" ]; then - echo >&2 "ERROR: no command given" - echo - usage 1 -elif [ "${ARCHIVE:-}" == "" ]; then - echo >&2 "ERROR: no archive name given" - echo - usage 1 -fi - -function list { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while read -r line; do - line_no=$(( line_no + 1 )) - if [ $size -gt 0 ]; then - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - echo "$path" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - echo "$path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - echo "$path -> ${BASH_REMATCH[1]}" - fi - done < "$ttar_file" -} - -function extract { - local path="" - local size=0 - local line_no=0 - local ttar_file=$1 - if [ -n "${2:-}" ]; then - echo >&2 "ERROR: too many arguments." - echo - usage 1 - fi - if [ ! -e "$ttar_file" ]; then - echo >&2 "ERROR: file not found ($ttar_file)" - echo - usage 1 - fi - while IFS= read -r line; do - line_no=$(( line_no + 1 )) - local eof_without_newline - if [ "$size" -gt 0 ]; then - if [[ "$line" =~ [^\\]EOF ]]; then - # An EOF not preceded by a backslash indicates that the line - # does not end with a newline - eof_without_newline=1 - else - eof_without_newline=0 - fi - # Replace NULLBYTE with null byte if at beginning of line - # Replace NULLBYTE with null byte unless preceded by backslash - # Remove one backslash in front of NULLBYTE (if any) - # Remove EOF unless preceded by backslash - # Remove one backslash in front of EOF - if [ $USE_PYTHON -eq 1 ]; then - echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" - else - # The repeated pattern makes up for sed's lack of negative - # lookbehind assertions (for consecutive null bytes). - echo -n "$line" | \ - sed -e 's/^NULLBYTE/\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\([^\\]\)NULLBYTE/\1\x0/g; - s/\\NULLBYTE/NULLBYTE/g; - s/\([^\\]\)EOF/\1/g; - s/\\EOF/EOF/g; - ' >> "$path" - fi - if [[ "$eof_without_newline" -eq 0 ]]; then - echo >> "$path" - fi - size=$(( size - 1 )) - continue - fi - if [[ $line =~ ^Path:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - if [ -L "$path" ]; then - rm "$path" - elif [ -d "$path" ]; then - if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then - rm -r "$path" - else - # Safe because symlinks to directories are dealt with above - rmdir "$path" - fi - elif [ -e "$path" ]; then - rm "$path" - fi - elif [[ $line =~ ^Lines:\ (.*)$ ]]; then - size=${BASH_REMATCH[1]} - # Create file even if it is zero-length. - touch "$path" - vecho " $path" - elif [[ $line =~ ^Mode:\ (.*)$ ]]; then - mode=${BASH_REMATCH[1]} - chmod "$mode" "$path" - vecho "$mode" - elif [[ $line =~ ^Directory:\ (.*)$ ]]; then - path=${BASH_REMATCH[1]} - mkdir -p "$path" - vecho " $path/" - elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then - ln -s "${BASH_REMATCH[1]}" "$path" - vecho " $path -> ${BASH_REMATCH[1]}" - elif [[ $line =~ ^# ]]; then - # Ignore comments between files - continue - else - echo >&2 "ERROR: Unknown keyword on line $line_no: $line" - exit 1 - fi - done < "$ttar_file" -} - -function div { - echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ - "- - - - - -" -} - -function get_mode { - local mfile=$1 - if [ -z "${STAT_OPTION:-}" ]; then - if stat -c '%a' "$mfile" >/dev/null 2>&1; then - # GNU stat - STAT_OPTION='-c' - STAT_FORMAT='%a' - else - # BSD stat - STAT_OPTION='-f' - # Octal output, user/group/other (omit file type, sticky bit) - STAT_FORMAT='%OLp' - fi - fi - stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" -} - -function _create { - shopt -s nullglob - local mode - local eof_without_newline - while (( "$#" )); do - file=$1 - if [ -L "$file" ]; then - echo "Path: $file" - symlinkTo=$(readlink "$file") - echo "SymlinkTo: $symlinkTo" - vecho " $file -> $symlinkTo" - div - elif [ -d "$file" ]; then - # Strip trailing slash (if there is one) - file=${file%/} - echo "Directory: $file" - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file/" - div - # Find all files and dirs, including hidden/dot files - for x in "$file/"{*,.[^.]*}; do - _create "$x" - done - elif [ -f "$file" ]; then - echo "Path: $file" - lines=$(wc -l "$file"|awk '{print $1}') - eof_without_newline=0 - if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ - [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then - eof_without_newline=1 - lines=$((lines+1)) - fi - echo "Lines: $lines" - # Add backslash in front of EOF - # Add backslash in front of NULLBYTE - # Replace null byte with NULLBYTE - if [ $USE_PYTHON -eq 1 ]; then - < "$file" python -c "$PYTHON_CREATE_FILTER" - else - < "$file" \ - sed 's/EOF/\\EOF/g; - s/NULLBYTE/\\NULLBYTE/g; - s/\x0/NULLBYTE/g; - ' - fi - if [[ "$eof_without_newline" -eq 1 ]]; then - # Finish line with EOF to indicate that the original line did - # not end with a linefeed - echo "EOF" - fi - mode=$(get_mode "$file") - echo "Mode: $mode" - vecho "$mode $file" - div - else - echo >&2 "ERROR: file not found ($file in $(pwd))" - exit 2 - fi - shift - done -} - -function create { - ttar_file=$1 - shift - if [ -z "${1:-}" ]; then - echo >&2 "ERROR: missing arguments." - echo - usage 1 - fi - if [ -e "$ttar_file" ]; then - rm "$ttar_file" - fi - exec > "$ttar_file" - echo "# Archive created by ttar $ARG_STRING" - _create "$@" -} - -test_environment - -if [ -n "${CDIR:-}" ]; then - if [[ "$ARCHIVE" != /* ]]; then - # Relative path: preserve the archive's location before changing - # directory - ARCHIVE="$(pwd)/$ARCHIVE" - fi - cd "$CDIR" -fi - -"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go deleted file mode 100644 index cb13891..0000000 --- a/vendor/github.com/prometheus/procfs/vm.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package procfs - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// The VM interface is described at -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -// Each setting is exposed as a single file. -// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array -// and numa_zonelist_order (deprecated) which is a string -type VM struct { - AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes - BlockDump *int64 // /proc/sys/vm/block_dump - CompactUnevictableAllowed *int64 // /proc/sys/vm/compact_unevictable_allowed - DirtyBackgroundBytes *int64 // /proc/sys/vm/dirty_background_bytes - DirtyBackgroundRatio *int64 // /proc/sys/vm/dirty_background_ratio - DirtyBytes *int64 // /proc/sys/vm/dirty_bytes - DirtyExpireCentisecs *int64 // /proc/sys/vm/dirty_expire_centisecs - DirtyRatio *int64 // /proc/sys/vm/dirty_ratio - DirtytimeExpireSeconds *int64 // /proc/sys/vm/dirtytime_expire_seconds - DirtyWritebackCentisecs *int64 // /proc/sys/vm/dirty_writeback_centisecs - DropCaches *int64 // /proc/sys/vm/drop_caches - ExtfragThreshold *int64 // /proc/sys/vm/extfrag_threshold - HugetlbShmGroup *int64 // /proc/sys/vm/hugetlb_shm_group - LaptopMode *int64 // /proc/sys/vm/laptop_mode - LegacyVaLayout *int64 // /proc/sys/vm/legacy_va_layout - LowmemReserveRatio []*int64 // /proc/sys/vm/lowmem_reserve_ratio - MaxMapCount *int64 // /proc/sys/vm/max_map_count - MemoryFailureEarlyKill *int64 // /proc/sys/vm/memory_failure_early_kill - MemoryFailureRecovery *int64 // /proc/sys/vm/memory_failure_recovery - MinFreeKbytes *int64 // /proc/sys/vm/min_free_kbytes - MinSlabRatio *int64 // /proc/sys/vm/min_slab_ratio - MinUnmappedRatio *int64 // /proc/sys/vm/min_unmapped_ratio - MmapMinAddr *int64 // /proc/sys/vm/mmap_min_addr - NrHugepages *int64 // /proc/sys/vm/nr_hugepages - NrHugepagesMempolicy *int64 // /proc/sys/vm/nr_hugepages_mempolicy - NrOvercommitHugepages *int64 // /proc/sys/vm/nr_overcommit_hugepages - NumaStat *int64 // /proc/sys/vm/numa_stat - NumaZonelistOrder string // /proc/sys/vm/numa_zonelist_order - OomDumpTasks *int64 // /proc/sys/vm/oom_dump_tasks - OomKillAllocatingTask *int64 // /proc/sys/vm/oom_kill_allocating_task - OvercommitKbytes *int64 // /proc/sys/vm/overcommit_kbytes - OvercommitMemory *int64 // /proc/sys/vm/overcommit_memory - OvercommitRatio *int64 // /proc/sys/vm/overcommit_ratio - PageCluster *int64 // /proc/sys/vm/page-cluster - PanicOnOom *int64 // /proc/sys/vm/panic_on_oom - PercpuPagelistFraction *int64 // /proc/sys/vm/percpu_pagelist_fraction - StatInterval *int64 // /proc/sys/vm/stat_interval - Swappiness *int64 // /proc/sys/vm/swappiness - UserReserveKbytes *int64 // /proc/sys/vm/user_reserve_kbytes - VfsCachePressure *int64 // /proc/sys/vm/vfs_cache_pressure - WatermarkBoostFactor *int64 // /proc/sys/vm/watermark_boost_factor - WatermarkScaleFactor *int64 // /proc/sys/vm/watermark_scale_factor - ZoneReclaimMode *int64 // /proc/sys/vm/zone_reclaim_mode -} - -// VM reads the VM statistics from the specified `proc` filesystem. -func (fs FS) VM() (*VM, error) { - path := fs.proc.Path("sys/vm") - file, err := os.Stat(path) - if err != nil { - return nil, err - } - if !file.Mode().IsDir() { - return nil, fmt.Errorf("%s is not a directory", path) - } - - files, err := ioutil.ReadDir(path) - if err != nil { - return nil, err - } - - var vm VM - for _, f := range files { - if f.IsDir() { - continue - } - - name := filepath.Join(path, f.Name()) - // ignore errors on read, as there are some write only - // in /proc/sys/vm - value, err := util.SysReadFile(name) - if err != nil { - continue - } - vp := util.NewValueParser(value) - - switch f.Name() { - case "admin_reserve_kbytes": - vm.AdminReserveKbytes = vp.PInt64() - case "block_dump": - vm.BlockDump = vp.PInt64() - case "compact_unevictable_allowed": - vm.CompactUnevictableAllowed = vp.PInt64() - case "dirty_background_bytes": - vm.DirtyBackgroundBytes = vp.PInt64() - case "dirty_background_ratio": - vm.DirtyBackgroundRatio = vp.PInt64() - case "dirty_bytes": - vm.DirtyBytes = vp.PInt64() - case "dirty_expire_centisecs": - vm.DirtyExpireCentisecs = vp.PInt64() - case "dirty_ratio": - vm.DirtyRatio = vp.PInt64() - case "dirtytime_expire_seconds": - vm.DirtytimeExpireSeconds = vp.PInt64() - case "dirty_writeback_centisecs": - vm.DirtyWritebackCentisecs = vp.PInt64() - case "drop_caches": - vm.DropCaches = vp.PInt64() - case "extfrag_threshold": - vm.ExtfragThreshold = vp.PInt64() - case "hugetlb_shm_group": - vm.HugetlbShmGroup = vp.PInt64() - case "laptop_mode": - vm.LaptopMode = vp.PInt64() - case "legacy_va_layout": - vm.LegacyVaLayout = vp.PInt64() - case "lowmem_reserve_ratio": - stringSlice := strings.Fields(value) - pint64Slice := make([]*int64, 0, len(stringSlice)) - for _, value := range stringSlice { - vp := util.NewValueParser(value) - pint64Slice = append(pint64Slice, vp.PInt64()) - } - vm.LowmemReserveRatio = pint64Slice - case "max_map_count": - vm.MaxMapCount = vp.PInt64() - case "memory_failure_early_kill": - vm.MemoryFailureEarlyKill = vp.PInt64() - case "memory_failure_recovery": - vm.MemoryFailureRecovery = vp.PInt64() - case "min_free_kbytes": - vm.MinFreeKbytes = vp.PInt64() - case "min_slab_ratio": - vm.MinSlabRatio = vp.PInt64() - case "min_unmapped_ratio": - vm.MinUnmappedRatio = vp.PInt64() - case "mmap_min_addr": - vm.MmapMinAddr = vp.PInt64() - case "nr_hugepages": - vm.NrHugepages = vp.PInt64() - case "nr_hugepages_mempolicy": - vm.NrHugepagesMempolicy = vp.PInt64() - case "nr_overcommit_hugepages": - vm.NrOvercommitHugepages = vp.PInt64() - case "numa_stat": - vm.NumaStat = vp.PInt64() - case "numa_zonelist_order": - vm.NumaZonelistOrder = value - case "oom_dump_tasks": - vm.OomDumpTasks = vp.PInt64() - case "oom_kill_allocating_task": - vm.OomKillAllocatingTask = vp.PInt64() - case "overcommit_kbytes": - vm.OvercommitKbytes = vp.PInt64() - case "overcommit_memory": - vm.OvercommitMemory = vp.PInt64() - case "overcommit_ratio": - vm.OvercommitRatio = vp.PInt64() - case "page-cluster": - vm.PageCluster = vp.PInt64() - case "panic_on_oom": - vm.PanicOnOom = vp.PInt64() - case "percpu_pagelist_fraction": - vm.PercpuPagelistFraction = vp.PInt64() - case "stat_interval": - vm.StatInterval = vp.PInt64() - case "swappiness": - vm.Swappiness = vp.PInt64() - case "user_reserve_kbytes": - vm.UserReserveKbytes = vp.PInt64() - case "vfs_cache_pressure": - vm.VfsCachePressure = vp.PInt64() - case "watermark_boost_factor": - vm.WatermarkBoostFactor = vp.PInt64() - case "watermark_scale_factor": - vm.WatermarkScaleFactor = vp.PInt64() - case "zone_reclaim_mode": - vm.ZoneReclaimMode = vp.PInt64() - } - if err := vp.Err(); err != nil { - return nil, err - } - } - - return &vm, nil -} diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go deleted file mode 100644 index 30aa417..0000000 --- a/vendor/github.com/prometheus/procfs/xfrm.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2017 Prometheus Team -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package procfs - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" -) - -// XfrmStat models the contents of /proc/net/xfrm_stat. -type XfrmStat struct { - // All errors which are not matched by other - XfrmInError int - // No buffer is left - XfrmInBufferError int - // Header Error - XfrmInHdrError int - // No state found - // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong - XfrmInNoStates int - // Transformation protocol specific error - // e.g. SA Key is wrong - XfrmInStateProtoError int - // Transformation mode specific error - XfrmInStateModeError int - // Sequence error - // e.g. sequence number is out of window - XfrmInStateSeqError int - // State is expired - XfrmInStateExpired int - // State has mismatch option - // e.g. UDP encapsulation type is mismatched - XfrmInStateMismatch int - // State is invalid - XfrmInStateInvalid int - // No matching template for states - // e.g. Inbound SAs are correct but SP rule is wrong - XfrmInTmplMismatch int - // No policy is found for states - // e.g. Inbound SAs are correct but no SP is found - XfrmInNoPols int - // Policy discards - XfrmInPolBlock int - // Policy error - XfrmInPolError int - // All errors which are not matched by others - XfrmOutError int - // Bundle generation error - XfrmOutBundleGenError int - // Bundle check error - XfrmOutBundleCheckError int - // No state was found - XfrmOutNoStates int - // Transformation protocol specific error - XfrmOutStateProtoError int - // Transportation mode specific error - XfrmOutStateModeError int - // Sequence error - // i.e sequence number overflow - XfrmOutStateSeqError int - // State is expired - XfrmOutStateExpired int - // Policy discads - XfrmOutPolBlock int - // Policy is dead - XfrmOutPolDead int - // Policy Error - XfrmOutPolError int - XfrmFwdHdrError int - XfrmOutStateInvalid int - XfrmAcquireError int -} - -// NewXfrmStat reads the xfrm_stat statistics. -func NewXfrmStat() (XfrmStat, error) { - fs, err := NewFS(DefaultMountPoint) - if err != nil { - return XfrmStat{}, err - } - - return fs.NewXfrmStat() -} - -// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. -func (fs FS) NewXfrmStat() (XfrmStat, error) { - file, err := os.Open(fs.proc.Path("net/xfrm_stat")) - if err != nil { - return XfrmStat{}, err - } - defer file.Close() - - var ( - x = XfrmStat{} - s = bufio.NewScanner(file) - ) - - for s.Scan() { - fields := strings.Fields(s.Text()) - - if len(fields) != 2 { - return XfrmStat{}, fmt.Errorf( - "couldn't parse %s line %s", file.Name(), s.Text()) - } - - name := fields[0] - value, err := strconv.Atoi(fields[1]) - if err != nil { - return XfrmStat{}, err - } - - switch name { - case "XfrmInError": - x.XfrmInError = value - case "XfrmInBufferError": - x.XfrmInBufferError = value - case "XfrmInHdrError": - x.XfrmInHdrError = value - case "XfrmInNoStates": - x.XfrmInNoStates = value - case "XfrmInStateProtoError": - x.XfrmInStateProtoError = value - case "XfrmInStateModeError": - x.XfrmInStateModeError = value - case "XfrmInStateSeqError": - x.XfrmInStateSeqError = value - case "XfrmInStateExpired": - x.XfrmInStateExpired = value - case "XfrmInStateInvalid": - x.XfrmInStateInvalid = value - case "XfrmInTmplMismatch": - x.XfrmInTmplMismatch = value - case "XfrmInNoPols": - x.XfrmInNoPols = value - case "XfrmInPolBlock": - x.XfrmInPolBlock = value - case "XfrmInPolError": - x.XfrmInPolError = value - case "XfrmOutError": - x.XfrmOutError = value - case "XfrmInStateMismatch": - x.XfrmInStateMismatch = value - case "XfrmOutBundleGenError": - x.XfrmOutBundleGenError = value - case "XfrmOutBundleCheckError": - x.XfrmOutBundleCheckError = value - case "XfrmOutNoStates": - x.XfrmOutNoStates = value - case "XfrmOutStateProtoError": - x.XfrmOutStateProtoError = value - case "XfrmOutStateModeError": - x.XfrmOutStateModeError = value - case "XfrmOutStateSeqError": - x.XfrmOutStateSeqError = value - case "XfrmOutStateExpired": - x.XfrmOutStateExpired = value - case "XfrmOutPolBlock": - x.XfrmOutPolBlock = value - case "XfrmOutPolDead": - x.XfrmOutPolDead = value - case "XfrmOutPolError": - x.XfrmOutPolError = value - case "XfrmFwdHdrError": - x.XfrmFwdHdrError = value - case "XfrmOutStateInvalid": - x.XfrmOutStateInvalid = value - case "XfrmAcquireError": - x.XfrmAcquireError = value - } - - } - - return x, s.Err() -} diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go deleted file mode 100644 index e941503..0000000 --- a/vendor/github.com/prometheus/procfs/zoneinfo.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows - -package procfs - -import ( - "bytes" - "fmt" - "io/ioutil" - "regexp" - "strings" - - "github.com/prometheus/procfs/internal/util" -) - -// Zoneinfo holds info parsed from /proc/zoneinfo. -type Zoneinfo struct { - Node string - Zone string - NrFreePages *int64 - Min *int64 - Low *int64 - High *int64 - Scanned *int64 - Spanned *int64 - Present *int64 - Managed *int64 - NrActiveAnon *int64 - NrInactiveAnon *int64 - NrIsolatedAnon *int64 - NrAnonPages *int64 - NrAnonTransparentHugepages *int64 - NrActiveFile *int64 - NrInactiveFile *int64 - NrIsolatedFile *int64 - NrFilePages *int64 - NrSlabReclaimable *int64 - NrSlabUnreclaimable *int64 - NrMlockStack *int64 - NrKernelStack *int64 - NrMapped *int64 - NrDirty *int64 - NrWriteback *int64 - NrUnevictable *int64 - NrShmem *int64 - NrDirtied *int64 - NrWritten *int64 - NumaHit *int64 - NumaMiss *int64 - NumaForeign *int64 - NumaInterleave *int64 - NumaLocal *int64 - NumaOther *int64 - Protection []*int64 -} - -var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`) - -// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of -// structs containing the relevant info. More information available here: -// https://www.kernel.org/doc/Documentation/sysctl/vm.txt -func (fs FS) Zoneinfo() ([]Zoneinfo, error) { - data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo")) - if err != nil { - return nil, fmt.Errorf("error reading zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) - } - zoneinfo, err := parseZoneinfo(data) - if err != nil { - return nil, fmt.Errorf("error parsing zoneinfo %s: %s", fs.proc.Path("zoneinfo"), err) - } - return zoneinfo, nil -} - -func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) { - - zoneinfo := []Zoneinfo{} - - zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode")) - for _, block := range zoneinfoBlocks { - var zoneinfoElement Zoneinfo - lines := strings.Split(string(block), "\n") - for _, line := range lines { - - if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil { - zoneinfoElement.Node = nodeZone[1] - zoneinfoElement.Zone = nodeZone[2] - continue - } - if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") { - zoneinfoElement.Zone = "" - continue - } - parts := strings.Fields(strings.TrimSpace(line)) - if len(parts) < 2 { - continue - } - vp := util.NewValueParser(parts[1]) - switch parts[0] { - case "nr_free_pages": - zoneinfoElement.NrFreePages = vp.PInt64() - case "min": - zoneinfoElement.Min = vp.PInt64() - case "low": - zoneinfoElement.Low = vp.PInt64() - case "high": - zoneinfoElement.High = vp.PInt64() - case "scanned": - zoneinfoElement.Scanned = vp.PInt64() - case "spanned": - zoneinfoElement.Spanned = vp.PInt64() - case "present": - zoneinfoElement.Present = vp.PInt64() - case "managed": - zoneinfoElement.Managed = vp.PInt64() - case "nr_active_anon": - zoneinfoElement.NrActiveAnon = vp.PInt64() - case "nr_inactive_anon": - zoneinfoElement.NrInactiveAnon = vp.PInt64() - case "nr_isolated_anon": - zoneinfoElement.NrIsolatedAnon = vp.PInt64() - case "nr_anon_pages": - zoneinfoElement.NrAnonPages = vp.PInt64() - case "nr_anon_transparent_hugepages": - zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64() - case "nr_active_file": - zoneinfoElement.NrActiveFile = vp.PInt64() - case "nr_inactive_file": - zoneinfoElement.NrInactiveFile = vp.PInt64() - case "nr_isolated_file": - zoneinfoElement.NrIsolatedFile = vp.PInt64() - case "nr_file_pages": - zoneinfoElement.NrFilePages = vp.PInt64() - case "nr_slab_reclaimable": - zoneinfoElement.NrSlabReclaimable = vp.PInt64() - case "nr_slab_unreclaimable": - zoneinfoElement.NrSlabUnreclaimable = vp.PInt64() - case "nr_mlock_stack": - zoneinfoElement.NrMlockStack = vp.PInt64() - case "nr_kernel_stack": - zoneinfoElement.NrKernelStack = vp.PInt64() - case "nr_mapped": - zoneinfoElement.NrMapped = vp.PInt64() - case "nr_dirty": - zoneinfoElement.NrDirty = vp.PInt64() - case "nr_writeback": - zoneinfoElement.NrWriteback = vp.PInt64() - case "nr_unevictable": - zoneinfoElement.NrUnevictable = vp.PInt64() - case "nr_shmem": - zoneinfoElement.NrShmem = vp.PInt64() - case "nr_dirtied": - zoneinfoElement.NrDirtied = vp.PInt64() - case "nr_written": - zoneinfoElement.NrWritten = vp.PInt64() - case "numa_hit": - zoneinfoElement.NumaHit = vp.PInt64() - case "numa_miss": - zoneinfoElement.NumaMiss = vp.PInt64() - case "numa_foreign": - zoneinfoElement.NumaForeign = vp.PInt64() - case "numa_interleave": - zoneinfoElement.NumaInterleave = vp.PInt64() - case "numa_local": - zoneinfoElement.NumaLocal = vp.PInt64() - case "numa_other": - zoneinfoElement.NumaOther = vp.PInt64() - case "protection:": - protectionParts := strings.Split(line, ":") - protectionValues := strings.Replace(protectionParts[1], "(", "", 1) - protectionValues = strings.Replace(protectionValues, ")", "", 1) - protectionValues = strings.TrimSpace(protectionValues) - protectionStringMap := strings.Split(protectionValues, ", ") - val, err := util.ParsePInt64s(protectionStringMap) - if err == nil { - zoneinfoElement.Protection = val - } - } - - } - - zoneinfo = append(zoneinfo, zoneinfoElement) - } - return zoneinfo, nil -} diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS deleted file mode 100644 index 15167cd..0000000 --- a/vendor/golang.org/x/net/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS deleted file mode 100644 index 1c4577e..0000000 --- a/vendor/golang.org/x/net/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/LICENSE b/vendor/golang.org/x/net/LICENSE deleted file mode 100644 index 6a66aea..0000000 --- a/vendor/golang.org/x/net/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/net/PATENTS b/vendor/golang.org/x/net/PATENTS deleted file mode 100644 index 7330990..0000000 --- a/vendor/golang.org/x/net/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/net/publicsuffix/list.go b/vendor/golang.org/x/net/publicsuffix/list.go deleted file mode 100644 index 200617e..0000000 --- a/vendor/golang.org/x/net/publicsuffix/list.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run gen.go - -// Package publicsuffix provides a public suffix list based on data from -// https://publicsuffix.org/ -// -// A public suffix is one under which Internet users can directly register -// names. It is related to, but different from, a TLD (top level domain). -// -// "com" is a TLD (top level domain). Top level means it has no dots. -// -// "com" is also a public suffix. Amazon and Google have registered different -// siblings under that domain: "amazon.com" and "google.com". -// -// "au" is another TLD, again because it has no dots. But it's not "amazon.au". -// Instead, it's "amazon.com.au". -// -// "com.au" isn't an actual TLD, because it's not at the top level (it has -// dots). But it is an eTLD (effective TLD), because that's the branching point -// for domain name registrars. -// -// Another name for "an eTLD" is "a public suffix". Often, what's more of -// interest is the eTLD+1, or one more label than the public suffix. For -// example, browsers partition read/write access to HTTP cookies according to -// the eTLD+1. Web pages served from "amazon.com.au" can't read cookies from -// "google.com.au", but web pages served from "maps.google.com" can share -// cookies from "www.google.com", so you don't have to sign into Google Maps -// separately from signing into Google Web Search. Note that all four of those -// domains have 3 labels and 2 dots. The first two domains are each an eTLD+1, -// the last two are not (but share the same eTLD+1: "google.com"). -// -// All of these domains have the same eTLD+1: -// - "www.books.amazon.co.uk" -// - "books.amazon.co.uk" -// - "amazon.co.uk" -// Specifically, the eTLD+1 is "amazon.co.uk", because the eTLD is "co.uk". -// -// There is no closed form algorithm to calculate the eTLD of a domain. -// Instead, the calculation is data driven. This package provides a -// pre-compiled snapshot of Mozilla's PSL (Public Suffix List) data at -// https://publicsuffix.org/ -package publicsuffix // import "golang.org/x/net/publicsuffix" - -// TODO: specify case sensitivity and leading/trailing dot behavior for -// func PublicSuffix and func EffectiveTLDPlusOne. - -import ( - "fmt" - "net/http/cookiejar" - "strings" -) - -// List implements the cookiejar.PublicSuffixList interface by calling the -// PublicSuffix function. -var List cookiejar.PublicSuffixList = list{} - -type list struct{} - -func (list) PublicSuffix(domain string) string { - ps, _ := PublicSuffix(domain) - return ps -} - -func (list) String() string { - return version -} - -// PublicSuffix returns the public suffix of the domain using a copy of the -// publicsuffix.org database compiled into the library. -// -// icann is whether the public suffix is managed by the Internet Corporation -// for Assigned Names and Numbers. If not, the public suffix is either a -// privately managed domain (and in practice, not a top level domain) or an -// unmanaged top level domain (and not explicitly mentioned in the -// publicsuffix.org list). For example, "foo.org" and "foo.co.uk" are ICANN -// domains, "foo.dyndns.org" and "foo.blogspot.co.uk" are private domains and -// "cromulent" is an unmanaged top level domain. -// -// Use cases for distinguishing ICANN domains like "foo.com" from private -// domains like "foo.appspot.com" can be found at -// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases -func PublicSuffix(domain string) (publicSuffix string, icann bool) { - lo, hi := uint32(0), uint32(numTLD) - s, suffix, icannNode, wildcard := domain, len(domain), false, false -loop: - for { - dot := strings.LastIndex(s, ".") - if wildcard { - icann = icannNode - suffix = 1 + dot - } - if lo == hi { - break - } - f := find(s[1+dot:], lo, hi) - if f == notFound { - break - } - - u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength) - icannNode = u&(1<>= nodesBitsICANN - u = children[u&(1<>= childrenBitsLo - hi = u & (1<>= childrenBitsHi - switch u & (1<>= childrenBitsNodeType - wildcard = u&(1<>= nodesBitsTextLength - offset := x & (1< "arg"), -// but only if there is space or tab inside s. -func EscapeArg(s string) string { - if len(s) == 0 { - return "\"\"" - } - n := len(s) - hasSpace := false - for i := 0; i < len(s); i++ { - switch s[i] { - case '"', '\\': - n++ - case ' ', '\t': - hasSpace = true - } - } - if hasSpace { - n += 2 - } - if n == len(s) { - return s - } - - qs := make([]byte, n) - j := 0 - if hasSpace { - qs[j] = '"' - j++ - } - slashes := 0 - for i := 0; i < len(s); i++ { - switch s[i] { - default: - slashes = 0 - qs[j] = s[i] - case '\\': - slashes++ - qs[j] = s[i] - case '"': - for ; slashes > 0; slashes-- { - qs[j] = '\\' - j++ - } - qs[j] = '\\' - j++ - qs[j] = s[i] - } - j++ - } - if hasSpace { - for ; slashes > 0; slashes-- { - qs[j] = '\\' - j++ - } - qs[j] = '"' - j++ - } - return string(qs[:j]) -} - -func CloseOnExec(fd Handle) { - SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) -} - -// FullPath retrieves the full path of the specified file. -func FullPath(name string) (path string, err error) { - p, err := UTF16PtrFromString(name) - if err != nil { - return "", err - } - n := uint32(100) - for { - buf := make([]uint16, n) - n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) - if err != nil { - return "", err - } - if n <= uint32(len(buf)) { - return UTF16ToString(buf[:n]), nil - } - } -} diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go deleted file mode 100644 index f80a420..0000000 --- a/vendor/golang.org/x/sys/windows/memory_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -const ( - MEM_COMMIT = 0x00001000 - MEM_RESERVE = 0x00002000 - MEM_DECOMMIT = 0x00004000 - MEM_RELEASE = 0x00008000 - MEM_RESET = 0x00080000 - MEM_TOP_DOWN = 0x00100000 - MEM_WRITE_WATCH = 0x00200000 - MEM_PHYSICAL = 0x00400000 - MEM_RESET_UNDO = 0x01000000 - MEM_LARGE_PAGES = 0x20000000 - - PAGE_NOACCESS = 0x01 - PAGE_READONLY = 0x02 - PAGE_READWRITE = 0x04 - PAGE_WRITECOPY = 0x08 - PAGE_EXECUTE_READ = 0x20 - PAGE_EXECUTE_READWRITE = 0x40 - PAGE_EXECUTE_WRITECOPY = 0x80 -) diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash deleted file mode 100644 index 2163843..0000000 --- a/vendor/golang.org/x/sys/windows/mkerrors.bash +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -e -shopt -s nullglob - -winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)" -[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; } - -declare -A errors - -{ - echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT." - echo - echo "package windows" - echo "import \"syscall\"" - echo "const (" - - while read -r line; do - unset vtype - if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - vtype="${BASH_REMATCH[2]}" - elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then - key="${BASH_REMATCH[1]}" - value="${BASH_REMATCH[3]}" - vtype="${BASH_REMATCH[2]}" - else - continue - fi - [[ -n $key && -n $value ]] || continue - [[ -z ${errors["$key"]} ]] || continue - errors["$key"]="$value" - if [[ -v vtype ]]; then - if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then - vtype="" - elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then - vtype="Handle" - else - vtype="syscall.Errno" - fi - last_vtype="$vtype" - else - vtype="" - if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then - value="S_OK" - elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then - value="ERROR_SUCCESS" - fi - fi - - echo "$key $vtype = $value" - done < "$winerror" - - echo ")" -} | gofmt > "zerrors_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash deleted file mode 100644 index ab8924e..0000000 --- a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Copyright 2019 The Go Authors. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -set -e -shopt -s nullglob - -knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)" -[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; } - -{ - echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT." - echo - echo "package windows" - echo "type KNOWNFOLDERID GUID" - echo "var (" - while read -r line; do - [[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue - printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \ - "${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \ - $(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \ - $(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" )) - done < "$knownfolders" - echo ")" -} | gofmt > "zknownfolderids_windows.go" diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go deleted file mode 100644 index 328e3b2..0000000 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ /dev/null @@ -1,9 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build generate - -package windows - -//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go deleted file mode 100644 index a74e3e2..0000000 --- a/vendor/golang.org/x/sys/windows/race.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,race - -package windows - -import ( - "runtime" - "unsafe" -) - -const raceenabled = true - -func raceAcquire(addr unsafe.Pointer) { - runtime.RaceAcquire(addr) -} - -func raceReleaseMerge(addr unsafe.Pointer) { - runtime.RaceReleaseMerge(addr) -} - -func raceReadRange(addr unsafe.Pointer, len int) { - runtime.RaceReadRange(addr, len) -} - -func raceWriteRange(addr unsafe.Pointer, len int) { - runtime.RaceWriteRange(addr, len) -} diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go deleted file mode 100644 index e44a3cb..0000000 --- a/vendor/golang.org/x/sys/windows/race0.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,!race - -package windows - -import ( - "unsafe" -) - -const raceenabled = false - -func raceAcquire(addr unsafe.Pointer) { -} - -func raceReleaseMerge(addr unsafe.Pointer) { -} - -func raceReadRange(addr unsafe.Pointer, len int) { -} - -func raceWriteRange(addr unsafe.Pointer, len int) { -} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go deleted file mode 100644 index 4b6eff1..0000000 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ /dev/null @@ -1,1396 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "syscall" - "unsafe" -) - -const ( - NameUnknown = 0 - NameFullyQualifiedDN = 1 - NameSamCompatible = 2 - NameDisplay = 3 - NameUniqueId = 6 - NameCanonical = 7 - NameUserPrincipal = 8 - NameCanonicalEx = 9 - NameServicePrincipal = 10 - NameDnsDomain = 12 -) - -// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. -// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx -//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW -//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW - -// TranslateAccountName converts a directory service -// object name from one format to another. -func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { - u, e := UTF16PtrFromString(username) - if e != nil { - return "", e - } - n := uint32(50) - for { - b := make([]uint16, n) - e = TranslateName(u, from, to, &b[0], &n) - if e == nil { - return UTF16ToString(b[:n]), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", e - } - if n <= uint32(len(b)) { - return "", e - } - } -} - -const ( - // do not reorder - NetSetupUnknownStatus = iota - NetSetupUnjoined - NetSetupWorkgroupName - NetSetupDomainName -) - -type UserInfo10 struct { - Name *uint16 - Comment *uint16 - UsrComment *uint16 - FullName *uint16 -} - -//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo -//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation -//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree - -const ( - // do not reorder - SidTypeUser = 1 + iota - SidTypeGroup - SidTypeDomain - SidTypeAlias - SidTypeWellKnownGroup - SidTypeDeletedAccount - SidTypeInvalid - SidTypeUnknown - SidTypeComputer - SidTypeLabel -) - -type SidIdentifierAuthority struct { - Value [6]byte -} - -var ( - SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} - SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} - SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} - SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} - SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} - SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} - SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} -) - -const ( - SECURITY_NULL_RID = 0 - SECURITY_WORLD_RID = 0 - SECURITY_LOCAL_RID = 0 - SECURITY_CREATOR_OWNER_RID = 0 - SECURITY_CREATOR_GROUP_RID = 1 - SECURITY_DIALUP_RID = 1 - SECURITY_NETWORK_RID = 2 - SECURITY_BATCH_RID = 3 - SECURITY_INTERACTIVE_RID = 4 - SECURITY_LOGON_IDS_RID = 5 - SECURITY_SERVICE_RID = 6 - SECURITY_LOCAL_SYSTEM_RID = 18 - SECURITY_BUILTIN_DOMAIN_RID = 32 - SECURITY_PRINCIPAL_SELF_RID = 10 - SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 - SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 - SECURITY_LOGON_IDS_RID_COUNT = 0x3 - SECURITY_ANONYMOUS_LOGON_RID = 0x7 - SECURITY_PROXY_RID = 0x8 - SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 - SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID - SECURITY_AUTHENTICATED_USER_RID = 0xb - SECURITY_RESTRICTED_CODE_RID = 0xc - SECURITY_NT_NON_UNIQUE_RID = 0x15 -) - -// Predefined domain-relative RIDs for local groups. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx -const ( - DOMAIN_ALIAS_RID_ADMINS = 0x220 - DOMAIN_ALIAS_RID_USERS = 0x221 - DOMAIN_ALIAS_RID_GUESTS = 0x222 - DOMAIN_ALIAS_RID_POWER_USERS = 0x223 - DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 - DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 - DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 - DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 - DOMAIN_ALIAS_RID_REPLICATOR = 0x228 - DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 - DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a - DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b - DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c - DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d - DOMAIN_ALIAS_RID_MONITORING_USERS = 0x22e - DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f - DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 - DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 - DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 - DOMAIN_ALIAS_RID_IUSERS = 0x238 - DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 - DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b - DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c - DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d - DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e -) - -//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW -//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW -//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW -//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid -//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid -//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid -//sys createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid -//sys isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid -//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid -//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid -//sys getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority -//sys getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount -//sys getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority -//sys isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid - -// The security identifier (SID) structure is a variable-length -// structure used to uniquely identify users or groups. -type SID struct{} - -// StringToSid converts a string-format security identifier -// SID into a valid, functional SID. -func StringToSid(s string) (*SID, error) { - var sid *SID - p, e := UTF16PtrFromString(s) - if e != nil { - return nil, e - } - e = ConvertStringSidToSid(p, &sid) - if e != nil { - return nil, e - } - defer LocalFree((Handle)(unsafe.Pointer(sid))) - return sid.Copy() -} - -// LookupSID retrieves a security identifier SID for the account -// and the name of the domain on which the account was found. -// System specify target computer to search. -func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { - if len(account) == 0 { - return nil, "", 0, syscall.EINVAL - } - acc, e := UTF16PtrFromString(account) - if e != nil { - return nil, "", 0, e - } - var sys *uint16 - if len(system) > 0 { - sys, e = UTF16PtrFromString(system) - if e != nil { - return nil, "", 0, e - } - } - n := uint32(50) - dn := uint32(50) - for { - b := make([]byte, n) - db := make([]uint16, dn) - sid = (*SID)(unsafe.Pointer(&b[0])) - e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) - if e == nil { - return sid, UTF16ToString(db), accType, nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return nil, "", 0, e - } - if n <= uint32(len(b)) { - return nil, "", 0, e - } - } -} - -// String converts SID to a string format suitable for display, storage, or transmission. -func (sid *SID) String() string { - var s *uint16 - e := ConvertSidToStringSid(sid, &s) - if e != nil { - return "" - } - defer LocalFree((Handle)(unsafe.Pointer(s))) - return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]) -} - -// Len returns the length, in bytes, of a valid security identifier SID. -func (sid *SID) Len() int { - return int(GetLengthSid(sid)) -} - -// Copy creates a duplicate of security identifier SID. -func (sid *SID) Copy() (*SID, error) { - b := make([]byte, sid.Len()) - sid2 := (*SID)(unsafe.Pointer(&b[0])) - e := CopySid(uint32(len(b)), sid2, sid) - if e != nil { - return nil, e - } - return sid2, nil -} - -// IdentifierAuthority returns the identifier authority of the SID. -func (sid *SID) IdentifierAuthority() SidIdentifierAuthority { - return *getSidIdentifierAuthority(sid) -} - -// SubAuthorityCount returns the number of sub-authorities in the SID. -func (sid *SID) SubAuthorityCount() uint8 { - return *getSidSubAuthorityCount(sid) -} - -// SubAuthority returns the sub-authority of the SID as specified by -// the index, which must be less than sid.SubAuthorityCount(). -func (sid *SID) SubAuthority(idx uint32) uint32 { - if idx >= uint32(sid.SubAuthorityCount()) { - panic("sub-authority index out of range") - } - return *getSidSubAuthority(sid, idx) -} - -// IsValid returns whether the SID has a valid revision and length. -func (sid *SID) IsValid() bool { - return isValidSid(sid) -} - -// Equals compares two SIDs for equality. -func (sid *SID) Equals(sid2 *SID) bool { - return EqualSid(sid, sid2) -} - -// IsWellKnown determines whether the SID matches the well-known sidType. -func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool { - return isWellKnownSid(sid, sidType) -} - -// LookupAccount retrieves the name of the account for this SID -// and the name of the first domain on which this SID is found. -// System specify target computer to search for. -func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { - var sys *uint16 - if len(system) > 0 { - sys, err = UTF16PtrFromString(system) - if err != nil { - return "", "", 0, err - } - } - n := uint32(50) - dn := uint32(50) - for { - b := make([]uint16, n) - db := make([]uint16, dn) - e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) - if e == nil { - return UTF16ToString(b), UTF16ToString(db), accType, nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", "", 0, e - } - if n <= uint32(len(b)) { - return "", "", 0, e - } - } -} - -// Various types of pre-specified SIDs that can be synthesized and compared at runtime. -type WELL_KNOWN_SID_TYPE uint32 - -const ( - WinNullSid = 0 - WinWorldSid = 1 - WinLocalSid = 2 - WinCreatorOwnerSid = 3 - WinCreatorGroupSid = 4 - WinCreatorOwnerServerSid = 5 - WinCreatorGroupServerSid = 6 - WinNtAuthoritySid = 7 - WinDialupSid = 8 - WinNetworkSid = 9 - WinBatchSid = 10 - WinInteractiveSid = 11 - WinServiceSid = 12 - WinAnonymousSid = 13 - WinProxySid = 14 - WinEnterpriseControllersSid = 15 - WinSelfSid = 16 - WinAuthenticatedUserSid = 17 - WinRestrictedCodeSid = 18 - WinTerminalServerSid = 19 - WinRemoteLogonIdSid = 20 - WinLogonIdsSid = 21 - WinLocalSystemSid = 22 - WinLocalServiceSid = 23 - WinNetworkServiceSid = 24 - WinBuiltinDomainSid = 25 - WinBuiltinAdministratorsSid = 26 - WinBuiltinUsersSid = 27 - WinBuiltinGuestsSid = 28 - WinBuiltinPowerUsersSid = 29 - WinBuiltinAccountOperatorsSid = 30 - WinBuiltinSystemOperatorsSid = 31 - WinBuiltinPrintOperatorsSid = 32 - WinBuiltinBackupOperatorsSid = 33 - WinBuiltinReplicatorSid = 34 - WinBuiltinPreWindows2000CompatibleAccessSid = 35 - WinBuiltinRemoteDesktopUsersSid = 36 - WinBuiltinNetworkConfigurationOperatorsSid = 37 - WinAccountAdministratorSid = 38 - WinAccountGuestSid = 39 - WinAccountKrbtgtSid = 40 - WinAccountDomainAdminsSid = 41 - WinAccountDomainUsersSid = 42 - WinAccountDomainGuestsSid = 43 - WinAccountComputersSid = 44 - WinAccountControllersSid = 45 - WinAccountCertAdminsSid = 46 - WinAccountSchemaAdminsSid = 47 - WinAccountEnterpriseAdminsSid = 48 - WinAccountPolicyAdminsSid = 49 - WinAccountRasAndIasServersSid = 50 - WinNTLMAuthenticationSid = 51 - WinDigestAuthenticationSid = 52 - WinSChannelAuthenticationSid = 53 - WinThisOrganizationSid = 54 - WinOtherOrganizationSid = 55 - WinBuiltinIncomingForestTrustBuildersSid = 56 - WinBuiltinPerfMonitoringUsersSid = 57 - WinBuiltinPerfLoggingUsersSid = 58 - WinBuiltinAuthorizationAccessSid = 59 - WinBuiltinTerminalServerLicenseServersSid = 60 - WinBuiltinDCOMUsersSid = 61 - WinBuiltinIUsersSid = 62 - WinIUserSid = 63 - WinBuiltinCryptoOperatorsSid = 64 - WinUntrustedLabelSid = 65 - WinLowLabelSid = 66 - WinMediumLabelSid = 67 - WinHighLabelSid = 68 - WinSystemLabelSid = 69 - WinWriteRestrictedCodeSid = 70 - WinCreatorOwnerRightsSid = 71 - WinCacheablePrincipalsGroupSid = 72 - WinNonCacheablePrincipalsGroupSid = 73 - WinEnterpriseReadonlyControllersSid = 74 - WinAccountReadonlyControllersSid = 75 - WinBuiltinEventLogReadersGroup = 76 - WinNewEnterpriseReadonlyControllersSid = 77 - WinBuiltinCertSvcDComAccessGroup = 78 - WinMediumPlusLabelSid = 79 - WinLocalLogonSid = 80 - WinConsoleLogonSid = 81 - WinThisOrganizationCertificateSid = 82 - WinApplicationPackageAuthoritySid = 83 - WinBuiltinAnyPackageSid = 84 - WinCapabilityInternetClientSid = 85 - WinCapabilityInternetClientServerSid = 86 - WinCapabilityPrivateNetworkClientServerSid = 87 - WinCapabilityPicturesLibrarySid = 88 - WinCapabilityVideosLibrarySid = 89 - WinCapabilityMusicLibrarySid = 90 - WinCapabilityDocumentsLibrarySid = 91 - WinCapabilitySharedUserCertificatesSid = 92 - WinCapabilityEnterpriseAuthenticationSid = 93 - WinCapabilityRemovableStorageSid = 94 - WinBuiltinRDSRemoteAccessServersSid = 95 - WinBuiltinRDSEndpointServersSid = 96 - WinBuiltinRDSManagementServersSid = 97 - WinUserModeDriversSid = 98 - WinBuiltinHyperVAdminsSid = 99 - WinAccountCloneableControllersSid = 100 - WinBuiltinAccessControlAssistanceOperatorsSid = 101 - WinBuiltinRemoteManagementUsersSid = 102 - WinAuthenticationAuthorityAssertedSid = 103 - WinAuthenticationServiceAssertedSid = 104 - WinLocalAccountSid = 105 - WinLocalAccountAndAdministratorSid = 106 - WinAccountProtectedUsersSid = 107 - WinCapabilityAppointmentsSid = 108 - WinCapabilityContactsSid = 109 - WinAccountDefaultSystemManagedSid = 110 - WinBuiltinDefaultSystemManagedGroupSid = 111 - WinBuiltinStorageReplicaAdminsSid = 112 - WinAccountKeyAdminsSid = 113 - WinAccountEnterpriseKeyAdminsSid = 114 - WinAuthenticationKeyTrustSid = 115 - WinAuthenticationKeyPropertyMFASid = 116 - WinAuthenticationKeyPropertyAttestationSid = 117 - WinAuthenticationFreshKeyAuthSid = 118 - WinBuiltinDeviceOwnersSid = 119 -) - -// Creates a SID for a well-known predefined alias, generally using the constants of the form -// Win*Sid, for the local machine. -func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) { - return CreateWellKnownDomainSid(sidType, nil) -} - -// Creates a SID for a well-known predefined alias, generally using the constants of the form -// Win*Sid, for the domain specified by the domainSid parameter. -func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) { - n := uint32(50) - for { - b := make([]byte, n) - sid := (*SID)(unsafe.Pointer(&b[0])) - err := createWellKnownSid(sidType, domainSid, sid, &n) - if err == nil { - return sid, nil - } - if err != ERROR_INSUFFICIENT_BUFFER { - return nil, err - } - if n <= uint32(len(b)) { - return nil, err - } - } -} - -const ( - // do not reorder - TOKEN_ASSIGN_PRIMARY = 1 << iota - TOKEN_DUPLICATE - TOKEN_IMPERSONATE - TOKEN_QUERY - TOKEN_QUERY_SOURCE - TOKEN_ADJUST_PRIVILEGES - TOKEN_ADJUST_GROUPS - TOKEN_ADJUST_DEFAULT - TOKEN_ADJUST_SESSIONID - - TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | - TOKEN_ASSIGN_PRIMARY | - TOKEN_DUPLICATE | - TOKEN_IMPERSONATE | - TOKEN_QUERY | - TOKEN_QUERY_SOURCE | - TOKEN_ADJUST_PRIVILEGES | - TOKEN_ADJUST_GROUPS | - TOKEN_ADJUST_DEFAULT | - TOKEN_ADJUST_SESSIONID - TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY - TOKEN_WRITE = STANDARD_RIGHTS_WRITE | - TOKEN_ADJUST_PRIVILEGES | - TOKEN_ADJUST_GROUPS | - TOKEN_ADJUST_DEFAULT - TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE -) - -const ( - // do not reorder - TokenUser = 1 + iota - TokenGroups - TokenPrivileges - TokenOwner - TokenPrimaryGroup - TokenDefaultDacl - TokenSource - TokenType - TokenImpersonationLevel - TokenStatistics - TokenRestrictedSids - TokenSessionId - TokenGroupsAndPrivileges - TokenSessionReference - TokenSandBoxInert - TokenAuditPolicy - TokenOrigin - TokenElevationType - TokenLinkedToken - TokenElevation - TokenHasRestrictions - TokenAccessInformation - TokenVirtualizationAllowed - TokenVirtualizationEnabled - TokenIntegrityLevel - TokenUIAccess - TokenMandatoryPolicy - TokenLogonSid - MaxTokenInfoClass -) - -// Group attributes inside of Tokengroups.Groups[i].Attributes -const ( - SE_GROUP_MANDATORY = 0x00000001 - SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002 - SE_GROUP_ENABLED = 0x00000004 - SE_GROUP_OWNER = 0x00000008 - SE_GROUP_USE_FOR_DENY_ONLY = 0x00000010 - SE_GROUP_INTEGRITY = 0x00000020 - SE_GROUP_INTEGRITY_ENABLED = 0x00000040 - SE_GROUP_LOGON_ID = 0xC0000000 - SE_GROUP_RESOURCE = 0x20000000 - SE_GROUP_VALID_ATTRIBUTES = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED -) - -// Privilege attributes -const ( - SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001 - SE_PRIVILEGE_ENABLED = 0x00000002 - SE_PRIVILEGE_REMOVED = 0x00000004 - SE_PRIVILEGE_USED_FOR_ACCESS = 0x80000000 - SE_PRIVILEGE_VALID_ATTRIBUTES = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS -) - -// Token types -const ( - TokenPrimary = 1 - TokenImpersonation = 2 -) - -// Impersonation levels -const ( - SecurityAnonymous = 0 - SecurityIdentification = 1 - SecurityImpersonation = 2 - SecurityDelegation = 3 -) - -type LUID struct { - LowPart uint32 - HighPart int32 -} - -type LUIDAndAttributes struct { - Luid LUID - Attributes uint32 -} - -type SIDAndAttributes struct { - Sid *SID - Attributes uint32 -} - -type Tokenuser struct { - User SIDAndAttributes -} - -type Tokenprimarygroup struct { - PrimaryGroup *SID -} - -type Tokengroups struct { - GroupCount uint32 - Groups [1]SIDAndAttributes // Use AllGroups() for iterating. -} - -// AllGroups returns a slice that can be used to iterate over the groups in g. -func (g *Tokengroups) AllGroups() []SIDAndAttributes { - return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount] -} - -type Tokenprivileges struct { - PrivilegeCount uint32 - Privileges [1]LUIDAndAttributes // Use AllPrivileges() for iterating. -} - -// AllPrivileges returns a slice that can be used to iterate over the privileges in p. -func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes { - return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount] -} - -type Tokenmandatorylabel struct { - Label SIDAndAttributes -} - -func (tml *Tokenmandatorylabel) Size() uint32 { - return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid) -} - -// Authorization Functions -//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership -//sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken -//sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken -//sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf -//sys RevertToSelf() (err error) = advapi32.RevertToSelf -//sys SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken -//sys LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW -//sys AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges -//sys AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups -//sys GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation -//sys SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation -//sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx -//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW -//sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW -//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW -//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW - -// An access token contains the security information for a logon session. -// The system creates an access token when a user logs on, and every -// process executed on behalf of the user has a copy of the token. -// The token identifies the user, the user's groups, and the user's -// privileges. The system uses the token to control access to securable -// objects and to control the ability of the user to perform various -// system-related operations on the local computer. -type Token Handle - -// OpenCurrentProcessToken opens an access token associated with current -// process with TOKEN_QUERY access. It is a real token that needs to be closed. -// -// Deprecated: Explicitly call OpenProcessToken(CurrentProcess(), ...) -// with the desired access instead, or use GetCurrentProcessToken for a -// TOKEN_QUERY token. -func OpenCurrentProcessToken() (Token, error) { - var token Token - err := OpenProcessToken(CurrentProcess(), TOKEN_QUERY, &token) - return token, err -} - -// GetCurrentProcessToken returns the access token associated with -// the current process. It is a pseudo token that does not need -// to be closed. -func GetCurrentProcessToken() Token { - return Token(^uintptr(4 - 1)) -} - -// GetCurrentThreadToken return the access token associated with -// the current thread. It is a pseudo token that does not need -// to be closed. -func GetCurrentThreadToken() Token { - return Token(^uintptr(5 - 1)) -} - -// GetCurrentThreadEffectiveToken returns the effective access token -// associated with the current thread. It is a pseudo token that does -// not need to be closed. -func GetCurrentThreadEffectiveToken() Token { - return Token(^uintptr(6 - 1)) -} - -// Close releases access to access token. -func (t Token) Close() error { - return CloseHandle(Handle(t)) -} - -// getInfo retrieves a specified type of information about an access token. -func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { - n := uint32(initSize) - for { - b := make([]byte, n) - e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) - if e == nil { - return unsafe.Pointer(&b[0]), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return nil, e - } - if n <= uint32(len(b)) { - return nil, e - } - } -} - -// GetTokenUser retrieves access token t user account information. -func (t Token) GetTokenUser() (*Tokenuser, error) { - i, e := t.getInfo(TokenUser, 50) - if e != nil { - return nil, e - } - return (*Tokenuser)(i), nil -} - -// GetTokenGroups retrieves group accounts associated with access token t. -func (t Token) GetTokenGroups() (*Tokengroups, error) { - i, e := t.getInfo(TokenGroups, 50) - if e != nil { - return nil, e - } - return (*Tokengroups)(i), nil -} - -// GetTokenPrimaryGroup retrieves access token t primary group information. -// A pointer to a SID structure representing a group that will become -// the primary group of any objects created by a process using this access token. -func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { - i, e := t.getInfo(TokenPrimaryGroup, 50) - if e != nil { - return nil, e - } - return (*Tokenprimarygroup)(i), nil -} - -// GetUserProfileDirectory retrieves path to the -// root directory of the access token t user's profile. -func (t Token) GetUserProfileDirectory() (string, error) { - n := uint32(100) - for { - b := make([]uint16, n) - e := GetUserProfileDirectory(t, &b[0], &n) - if e == nil { - return UTF16ToString(b), nil - } - if e != ERROR_INSUFFICIENT_BUFFER { - return "", e - } - if n <= uint32(len(b)) { - return "", e - } - } -} - -// IsElevated returns whether the current token is elevated from a UAC perspective. -func (token Token) IsElevated() bool { - var isElevated uint32 - var outLen uint32 - err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen) - if err != nil { - return false - } - return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0 -} - -// GetLinkedToken returns the linked token, which may be an elevated UAC token. -func (token Token) GetLinkedToken() (Token, error) { - var linkedToken Token - var outLen uint32 - err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen) - if err != nil { - return Token(0), err - } - return linkedToken, nil -} - -// GetSystemDirectory retrieves the path to current location of the system -// directory, which is typically, though not always, `C:\Windows\System32`. -func GetSystemDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getSystemDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// GetWindowsDirectory retrieves the path to current location of the Windows -// directory, which is typically, though not always, `C:\Windows`. This may -// be a private user directory in the case that the application is running -// under a terminal server. -func GetWindowsDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getWindowsDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// GetSystemWindowsDirectory retrieves the path to current location of the -// Windows directory, which is typically, though not always, `C:\Windows`. -func GetSystemWindowsDirectory() (string, error) { - n := uint32(MAX_PATH) - for { - b := make([]uint16, n) - l, e := getSystemWindowsDirectory(&b[0], n) - if e != nil { - return "", e - } - if l <= n { - return UTF16ToString(b[:l]), nil - } - n = l - } -} - -// IsMember reports whether the access token t is a member of the provided SID. -func (t Token) IsMember(sid *SID) (bool, error) { - var b int32 - if e := checkTokenMembership(t, sid, &b); e != nil { - return false, e - } - return b != 0, nil -} - -const ( - WTS_CONSOLE_CONNECT = 0x1 - WTS_CONSOLE_DISCONNECT = 0x2 - WTS_REMOTE_CONNECT = 0x3 - WTS_REMOTE_DISCONNECT = 0x4 - WTS_SESSION_LOGON = 0x5 - WTS_SESSION_LOGOFF = 0x6 - WTS_SESSION_LOCK = 0x7 - WTS_SESSION_UNLOCK = 0x8 - WTS_SESSION_REMOTE_CONTROL = 0x9 - WTS_SESSION_CREATE = 0xa - WTS_SESSION_TERMINATE = 0xb -) - -const ( - WTSActive = 0 - WTSConnected = 1 - WTSConnectQuery = 2 - WTSShadow = 3 - WTSDisconnected = 4 - WTSIdle = 5 - WTSListen = 6 - WTSReset = 7 - WTSDown = 8 - WTSInit = 9 -) - -type WTSSESSION_NOTIFICATION struct { - Size uint32 - SessionID uint32 -} - -type WTS_SESSION_INFO struct { - SessionID uint32 - WindowStationName *uint16 - State uint32 -} - -//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken -//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW -//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory - -type ACL struct { - aclRevision byte - sbz1 byte - aclSize uint16 - aceCount uint16 - sbz2 uint16 -} - -type SECURITY_DESCRIPTOR struct { - revision byte - sbz1 byte - control SECURITY_DESCRIPTOR_CONTROL - owner *SID - group *SID - sacl *ACL - dacl *ACL -} - -type SecurityAttributes struct { - Length uint32 - SecurityDescriptor *SECURITY_DESCRIPTOR - InheritHandle uint32 -} - -type SE_OBJECT_TYPE uint32 - -// Constants for type SE_OBJECT_TYPE -const ( - SE_UNKNOWN_OBJECT_TYPE = 0 - SE_FILE_OBJECT = 1 - SE_SERVICE = 2 - SE_PRINTER = 3 - SE_REGISTRY_KEY = 4 - SE_LMSHARE = 5 - SE_KERNEL_OBJECT = 6 - SE_WINDOW_OBJECT = 7 - SE_DS_OBJECT = 8 - SE_DS_OBJECT_ALL = 9 - SE_PROVIDER_DEFINED_OBJECT = 10 - SE_WMIGUID_OBJECT = 11 - SE_REGISTRY_WOW64_32KEY = 12 - SE_REGISTRY_WOW64_64KEY = 13 -) - -type SECURITY_INFORMATION uint32 - -// Constants for type SECURITY_INFORMATION -const ( - OWNER_SECURITY_INFORMATION = 0x00000001 - GROUP_SECURITY_INFORMATION = 0x00000002 - DACL_SECURITY_INFORMATION = 0x00000004 - SACL_SECURITY_INFORMATION = 0x00000008 - LABEL_SECURITY_INFORMATION = 0x00000010 - ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 - SCOPE_SECURITY_INFORMATION = 0x00000040 - BACKUP_SECURITY_INFORMATION = 0x00010000 - PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 - PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 - UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 - UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 -) - -type SECURITY_DESCRIPTOR_CONTROL uint16 - -// Constants for type SECURITY_DESCRIPTOR_CONTROL -const ( - SE_OWNER_DEFAULTED = 0x0001 - SE_GROUP_DEFAULTED = 0x0002 - SE_DACL_PRESENT = 0x0004 - SE_DACL_DEFAULTED = 0x0008 - SE_SACL_PRESENT = 0x0010 - SE_SACL_DEFAULTED = 0x0020 - SE_DACL_AUTO_INHERIT_REQ = 0x0100 - SE_SACL_AUTO_INHERIT_REQ = 0x0200 - SE_DACL_AUTO_INHERITED = 0x0400 - SE_SACL_AUTO_INHERITED = 0x0800 - SE_DACL_PROTECTED = 0x1000 - SE_SACL_PROTECTED = 0x2000 - SE_RM_CONTROL_VALID = 0x4000 - SE_SELF_RELATIVE = 0x8000 -) - -type ACCESS_MASK uint32 - -// Constants for type ACCESS_MASK -const ( - DELETE = 0x00010000 - READ_CONTROL = 0x00020000 - WRITE_DAC = 0x00040000 - WRITE_OWNER = 0x00080000 - SYNCHRONIZE = 0x00100000 - STANDARD_RIGHTS_REQUIRED = 0x000F0000 - STANDARD_RIGHTS_READ = READ_CONTROL - STANDARD_RIGHTS_WRITE = READ_CONTROL - STANDARD_RIGHTS_EXECUTE = READ_CONTROL - STANDARD_RIGHTS_ALL = 0x001F0000 - SPECIFIC_RIGHTS_ALL = 0x0000FFFF - ACCESS_SYSTEM_SECURITY = 0x01000000 - MAXIMUM_ALLOWED = 0x02000000 - GENERIC_READ = 0x80000000 - GENERIC_WRITE = 0x40000000 - GENERIC_EXECUTE = 0x20000000 - GENERIC_ALL = 0x10000000 -) - -type ACCESS_MODE uint32 - -// Constants for type ACCESS_MODE -const ( - NOT_USED_ACCESS = 0 - GRANT_ACCESS = 1 - SET_ACCESS = 2 - DENY_ACCESS = 3 - REVOKE_ACCESS = 4 - SET_AUDIT_SUCCESS = 5 - SET_AUDIT_FAILURE = 6 -) - -// Constants for AceFlags and Inheritance fields -const ( - NO_INHERITANCE = 0x0 - SUB_OBJECTS_ONLY_INHERIT = 0x1 - SUB_CONTAINERS_ONLY_INHERIT = 0x2 - SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3 - INHERIT_NO_PROPAGATE = 0x4 - INHERIT_ONLY = 0x8 - INHERITED_ACCESS_ENTRY = 0x10 - INHERITED_PARENT = 0x10000000 - INHERITED_GRANDPARENT = 0x20000000 - OBJECT_INHERIT_ACE = 0x1 - CONTAINER_INHERIT_ACE = 0x2 - NO_PROPAGATE_INHERIT_ACE = 0x4 - INHERIT_ONLY_ACE = 0x8 - INHERITED_ACE = 0x10 - VALID_INHERIT_FLAGS = 0x1F -) - -type MULTIPLE_TRUSTEE_OPERATION uint32 - -// Constants for MULTIPLE_TRUSTEE_OPERATION -const ( - NO_MULTIPLE_TRUSTEE = 0 - TRUSTEE_IS_IMPERSONATE = 1 -) - -type TRUSTEE_FORM uint32 - -// Constants for TRUSTEE_FORM -const ( - TRUSTEE_IS_SID = 0 - TRUSTEE_IS_NAME = 1 - TRUSTEE_BAD_FORM = 2 - TRUSTEE_IS_OBJECTS_AND_SID = 3 - TRUSTEE_IS_OBJECTS_AND_NAME = 4 -) - -type TRUSTEE_TYPE uint32 - -// Constants for TRUSTEE_TYPE -const ( - TRUSTEE_IS_UNKNOWN = 0 - TRUSTEE_IS_USER = 1 - TRUSTEE_IS_GROUP = 2 - TRUSTEE_IS_DOMAIN = 3 - TRUSTEE_IS_ALIAS = 4 - TRUSTEE_IS_WELL_KNOWN_GROUP = 5 - TRUSTEE_IS_DELETED = 6 - TRUSTEE_IS_INVALID = 7 - TRUSTEE_IS_COMPUTER = 8 -) - -// Constants for ObjectsPresent field -const ( - ACE_OBJECT_TYPE_PRESENT = 0x1 - ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2 -) - -type EXPLICIT_ACCESS struct { - AccessPermissions ACCESS_MASK - AccessMode ACCESS_MODE - Inheritance uint32 - Trustee TRUSTEE -} - -// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. -type TrusteeValue uintptr - -func TrusteeValueFromString(str string) TrusteeValue { - return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str))) -} -func TrusteeValueFromSID(sid *SID) TrusteeValue { - return TrusteeValue(unsafe.Pointer(sid)) -} -func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue { - return TrusteeValue(unsafe.Pointer(objectsAndSid)) -} -func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue { - return TrusteeValue(unsafe.Pointer(objectsAndName)) -} - -type TRUSTEE struct { - MultipleTrustee *TRUSTEE - MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION - TrusteeForm TRUSTEE_FORM - TrusteeType TRUSTEE_TYPE - TrusteeValue TrusteeValue -} - -type OBJECTS_AND_SID struct { - ObjectsPresent uint32 - ObjectTypeGuid GUID - InheritedObjectTypeGuid GUID - Sid *SID -} - -type OBJECTS_AND_NAME struct { - ObjectsPresent uint32 - ObjectType SE_OBJECT_TYPE - ObjectTypeName *uint16 - InheritedObjectTypeName *uint16 - Name *uint16 -} - -//sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo -//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) = advapi32.SetSecurityInfo -//sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW -//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW - -//sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW -//sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor - -//sys getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl -//sys getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl -//sys getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl -//sys getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner -//sys getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup -//sys getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength -//sys getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl -//sys isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor - -//sys setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl -//sys setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl -//sys setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl -//sys setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner -//sys setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup -//sys setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl - -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW - -//sys makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD -//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD - -//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW - -// Control returns the security descriptor control bits. -func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { - err = getSecurityDescriptorControl(sd, &control, &revision) - return -} - -// SetControl sets the security descriptor control bits. -func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error { - return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet) -} - -// RMControl returns the security descriptor resource manager control bits. -func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) { - err = getSecurityDescriptorRMControl(sd, &control) - return -} - -// SetRMControl sets the security descriptor resource manager control bits. -func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) { - setSecurityDescriptorRMControl(sd, &rmControl) -} - -// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil -// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns -// ERROR_OBJECT_NOT_FOUND. -func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) { - var present bool - err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted) - if !present { - err = ERROR_OBJECT_NOT_FOUND - } - return -} - -// SetDACL sets the absolute security descriptor DACL. -func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error { - return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted) -} - -// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil -// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns -// ERROR_OBJECT_NOT_FOUND. -func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) { - var present bool - err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted) - if !present { - err = ERROR_OBJECT_NOT_FOUND - } - return -} - -// SetSACL sets the absolute security descriptor SACL. -func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error { - return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted) -} - -// Owner returns the security descriptor owner and whether it was defaulted. -func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) { - err = getSecurityDescriptorOwner(sd, &owner, &defaulted) - return -} - -// SetOwner sets the absolute security descriptor owner. -func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error { - return setSecurityDescriptorOwner(absoluteSD, owner, defaulted) -} - -// Group returns the security descriptor group and whether it was defaulted. -func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) { - err = getSecurityDescriptorGroup(sd, &group, &defaulted) - return -} - -// SetGroup sets the absolute security descriptor owner. -func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error { - return setSecurityDescriptorGroup(absoluteSD, group, defaulted) -} - -// Length returns the length of the security descriptor. -func (sd *SECURITY_DESCRIPTOR) Length() uint32 { - return getSecurityDescriptorLength(sd) -} - -// IsValid returns whether the security descriptor is valid. -func (sd *SECURITY_DESCRIPTOR) IsValid() bool { - return isValidSecurityDescriptor(sd) -} - -// String returns the SDDL form of the security descriptor, with a function signature that can be -// used with %v formatting directives. -func (sd *SECURITY_DESCRIPTOR) String() string { - var sddl *uint16 - err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil) - if err != nil { - return "" - } - defer LocalFree(Handle(unsafe.Pointer(sddl))) - return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(sddl))[:]) -} - -// ToAbsolute converts a self-relative security descriptor into an absolute one. -func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) { - control, _, err := selfRelativeSD.Control() - if err != nil { - return - } - if control&SE_SELF_RELATIVE == 0 { - err = ERROR_INVALID_PARAMETER - return - } - var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32 - err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize, - nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize) - switch err { - case ERROR_INSUFFICIENT_BUFFER: - case nil: - // makeAbsoluteSD is expected to fail, but it succeeds. - return nil, ERROR_INTERNAL_ERROR - default: - return nil, err - } - if absoluteSDSize > 0 { - absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) - } - var ( - dacl *ACL - sacl *ACL - owner *SID - group *SID - ) - if daclSize > 0 { - dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) - } - if saclSize > 0 { - sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) - } - if ownerSize > 0 { - owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) - } - if groupSize > 0 { - group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) - } - err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, - dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) - return -} - -// ToSelfRelative converts an absolute security descriptor into a self-relative one. -func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) { - control, _, err := absoluteSD.Control() - if err != nil { - return - } - if control&SE_SELF_RELATIVE != 0 { - err = ERROR_INVALID_PARAMETER - return - } - var selfRelativeSDSize uint32 - err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize) - switch err { - case ERROR_INSUFFICIENT_BUFFER: - case nil: - // makeSelfRelativeSD is expected to fail, but it succeeds. - return nil, ERROR_INTERNAL_ERROR - default: - return nil, err - } - if selfRelativeSDSize > 0 { - selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0])) - } - err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize) - return -} - -func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { - sdBytes := make([]byte, selfRelativeSD.Length()) - copy(sdBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(selfRelativeSD))[:len(sdBytes)]) - return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&sdBytes[0])) -} - -// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a -// self-relative security descriptor object allocated on the Go heap. -func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// GetSecurityInfo queries the security information for a given handle and returns the self-relative security -// descriptor result on the Go heap. -func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security -// descriptor result on the Go heap. -func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and -// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor -// result on the Go heap. -func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) { - var winHeapSD *SECURITY_DESCRIPTOR - var winHeapSDSize uint32 - var firstAccessEntry *EXPLICIT_ACCESS - if len(accessEntries) > 0 { - firstAccessEntry = &accessEntries[0] - } - var firstAuditEntry *EXPLICIT_ACCESS - if len(auditEntries) > 0 { - firstAuditEntry = &auditEntries[0] - } - err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) - return winHeapSD.copySelfRelativeSecurityDescriptor(), nil -} - -// NewSecurityDescriptor creates and initializes a new absolute security descriptor. -func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) { - absoluteSD = &SECURITY_DESCRIPTOR{} - err = initializeSecurityDescriptor(absoluteSD, 1) - return -} - -// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL. -// Both explicitEntries and mergedACL are optional and can be nil. -func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) { - var firstExplicitEntry *EXPLICIT_ACCESS - if len(explicitEntries) > 0 { - firstExplicitEntry = &explicitEntries[0] - } - var winHeapACL *ACL - err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL) - if err != nil { - return - } - defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) - aclBytes := make([]byte, winHeapACL.aclSize) - copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes)]) - return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil -} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go deleted file mode 100644 index 847e00b..0000000 --- a/vendor/golang.org/x/sys/windows/service.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package windows - -const ( - SC_MANAGER_CONNECT = 1 - SC_MANAGER_CREATE_SERVICE = 2 - SC_MANAGER_ENUMERATE_SERVICE = 4 - SC_MANAGER_LOCK = 8 - SC_MANAGER_QUERY_LOCK_STATUS = 16 - SC_MANAGER_MODIFY_BOOT_CONFIG = 32 - SC_MANAGER_ALL_ACCESS = 0xf003f -) - -//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW - -const ( - SERVICE_KERNEL_DRIVER = 1 - SERVICE_FILE_SYSTEM_DRIVER = 2 - SERVICE_ADAPTER = 4 - SERVICE_RECOGNIZER_DRIVER = 8 - SERVICE_WIN32_OWN_PROCESS = 16 - SERVICE_WIN32_SHARE_PROCESS = 32 - SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS - SERVICE_INTERACTIVE_PROCESS = 256 - SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER - SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS - - SERVICE_BOOT_START = 0 - SERVICE_SYSTEM_START = 1 - SERVICE_AUTO_START = 2 - SERVICE_DEMAND_START = 3 - SERVICE_DISABLED = 4 - - SERVICE_ERROR_IGNORE = 0 - SERVICE_ERROR_NORMAL = 1 - SERVICE_ERROR_SEVERE = 2 - SERVICE_ERROR_CRITICAL = 3 - - SC_STATUS_PROCESS_INFO = 0 - - SC_ACTION_NONE = 0 - SC_ACTION_RESTART = 1 - SC_ACTION_REBOOT = 2 - SC_ACTION_RUN_COMMAND = 3 - - SERVICE_STOPPED = 1 - SERVICE_START_PENDING = 2 - SERVICE_STOP_PENDING = 3 - SERVICE_RUNNING = 4 - SERVICE_CONTINUE_PENDING = 5 - SERVICE_PAUSE_PENDING = 6 - SERVICE_PAUSED = 7 - SERVICE_NO_CHANGE = 0xffffffff - - SERVICE_ACCEPT_STOP = 1 - SERVICE_ACCEPT_PAUSE_CONTINUE = 2 - SERVICE_ACCEPT_SHUTDOWN = 4 - SERVICE_ACCEPT_PARAMCHANGE = 8 - SERVICE_ACCEPT_NETBINDCHANGE = 16 - SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 - SERVICE_ACCEPT_POWEREVENT = 64 - SERVICE_ACCEPT_SESSIONCHANGE = 128 - - SERVICE_CONTROL_STOP = 1 - SERVICE_CONTROL_PAUSE = 2 - SERVICE_CONTROL_CONTINUE = 3 - SERVICE_CONTROL_INTERROGATE = 4 - SERVICE_CONTROL_SHUTDOWN = 5 - SERVICE_CONTROL_PARAMCHANGE = 6 - SERVICE_CONTROL_NETBINDADD = 7 - SERVICE_CONTROL_NETBINDREMOVE = 8 - SERVICE_CONTROL_NETBINDENABLE = 9 - SERVICE_CONTROL_NETBINDDISABLE = 10 - SERVICE_CONTROL_DEVICEEVENT = 11 - SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 - SERVICE_CONTROL_POWEREVENT = 13 - SERVICE_CONTROL_SESSIONCHANGE = 14 - - SERVICE_ACTIVE = 1 - SERVICE_INACTIVE = 2 - SERVICE_STATE_ALL = 3 - - SERVICE_QUERY_CONFIG = 1 - SERVICE_CHANGE_CONFIG = 2 - SERVICE_QUERY_STATUS = 4 - SERVICE_ENUMERATE_DEPENDENTS = 8 - SERVICE_START = 16 - SERVICE_STOP = 32 - SERVICE_PAUSE_CONTINUE = 64 - SERVICE_INTERROGATE = 128 - SERVICE_USER_DEFINED_CONTROL = 256 - SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL - - SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 - - SERVICE_CONFIG_DESCRIPTION = 1 - SERVICE_CONFIG_FAILURE_ACTIONS = 2 - SERVICE_CONFIG_DELAYED_AUTO_START_INFO = 3 - SERVICE_CONFIG_FAILURE_ACTIONS_FLAG = 4 - SERVICE_CONFIG_SERVICE_SID_INFO = 5 - SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6 - SERVICE_CONFIG_PRESHUTDOWN_INFO = 7 - SERVICE_CONFIG_TRIGGER_INFO = 8 - SERVICE_CONFIG_PREFERRED_NODE = 9 - SERVICE_CONFIG_LAUNCH_PROTECTED = 12 - - SERVICE_SID_TYPE_NONE = 0 - SERVICE_SID_TYPE_UNRESTRICTED = 1 - SERVICE_SID_TYPE_RESTRICTED = 2 | SERVICE_SID_TYPE_UNRESTRICTED - - SC_ENUM_PROCESS_INFO = 0 - - SERVICE_NOTIFY_STATUS_CHANGE = 2 - SERVICE_NOTIFY_STOPPED = 0x00000001 - SERVICE_NOTIFY_START_PENDING = 0x00000002 - SERVICE_NOTIFY_STOP_PENDING = 0x00000004 - SERVICE_NOTIFY_RUNNING = 0x00000008 - SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010 - SERVICE_NOTIFY_PAUSE_PENDING = 0x00000020 - SERVICE_NOTIFY_PAUSED = 0x00000040 - SERVICE_NOTIFY_CREATED = 0x00000080 - SERVICE_NOTIFY_DELETED = 0x00000100 - SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 -) - -type SERVICE_STATUS struct { - ServiceType uint32 - CurrentState uint32 - ControlsAccepted uint32 - Win32ExitCode uint32 - ServiceSpecificExitCode uint32 - CheckPoint uint32 - WaitHint uint32 -} - -type SERVICE_TABLE_ENTRY struct { - ServiceName *uint16 - ServiceProc uintptr -} - -type QUERY_SERVICE_CONFIG struct { - ServiceType uint32 - StartType uint32 - ErrorControl uint32 - BinaryPathName *uint16 - LoadOrderGroup *uint16 - TagId uint32 - Dependencies *uint16 - ServiceStartName *uint16 - DisplayName *uint16 -} - -type SERVICE_DESCRIPTION struct { - Description *uint16 -} - -type SERVICE_DELAYED_AUTO_START_INFO struct { - IsDelayedAutoStartUp uint32 -} - -type SERVICE_STATUS_PROCESS struct { - ServiceType uint32 - CurrentState uint32 - ControlsAccepted uint32 - Win32ExitCode uint32 - ServiceSpecificExitCode uint32 - CheckPoint uint32 - WaitHint uint32 - ProcessId uint32 - ServiceFlags uint32 -} - -type ENUM_SERVICE_STATUS_PROCESS struct { - ServiceName *uint16 - DisplayName *uint16 - ServiceStatusProcess SERVICE_STATUS_PROCESS -} - -type SERVICE_NOTIFY struct { - Version uint32 - NotifyCallback uintptr - Context uintptr - NotificationStatus uint32 - ServiceStatus SERVICE_STATUS_PROCESS - NotificationTriggered uint32 - ServiceNames *uint16 -} - -type SERVICE_FAILURE_ACTIONS struct { - ResetPeriod uint32 - RebootMsg *uint16 - Command *uint16 - ActionsCount uint32 - Actions *SC_ACTION -} - -type SC_ACTION struct { - Type uint32 - Delay uint32 -} - -type QUERY_SERVICE_LOCK_STATUS struct { - IsLocked uint32 - LockOwner *uint16 - LockDuration uint32 -} - -//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle -//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW -//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW -//sys DeleteService(service Handle) (err error) = advapi32.DeleteService -//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW -//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus -//sys QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW -//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService -//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW -//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus -//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW -//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW -//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W -//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W -//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW -//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx -//sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go deleted file mode 100644 index 917cc2a..0000000 --- a/vendor/golang.org/x/sys/windows/str.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package windows - -func itoa(val int) string { // do it here rather than with fmt to avoid dependency - if val < 0 { - return "-" + itoa(-val) - } - var buf [32]byte // big enough for int64 - i := len(buf) - 1 - for val >= 10 { - buf[i] = byte(val%10 + '0') - i-- - val /= 10 - } - buf[i] = byte(val + '0') - return string(buf[i:]) -} diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go deleted file mode 100644 index af828a9..0000000 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -// Package windows contains an interface to the low-level operating system -// primitives. OS details vary depending on the underlying system, and -// by default, godoc will display the OS-specific documentation for the current -// system. If you want godoc to display syscall documentation for another -// system, set $GOOS and $GOARCH to the desired system. For example, if -// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS -// to freebsd and $GOARCH to arm. -// -// The primary use of this package is inside other packages that provide a more -// portable interface to the system, such as "os", "time" and "net". Use -// those packages rather than this one if you can. -// -// For details of the functions and data types in this package consult -// the manuals for the appropriate operating system. -// -// These calls return err == nil to indicate success; otherwise -// err represents an operating system error describing the failure and -// holds a value of type syscall.Errno. -package windows // import "golang.org/x/sys/windows" - -import ( - "syscall" -) - -// ByteSliceFromString returns a NUL-terminated slice of bytes -// containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func ByteSliceFromString(s string) ([]byte, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, syscall.EINVAL - } - } - a := make([]byte, len(s)+1) - copy(a, s) - return a, nil -} - -// BytePtrFromString returns a pointer to a NUL-terminated array of -// bytes containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func BytePtrFromString(s string) (*byte, error) { - a, err := ByteSliceFromString(s) - if err != nil { - return nil, err - } - return &a[0], nil -} - -// Single-word zero for use when we need a valid pointer to 0 bytes. -// See mksyscall.pl. -var _zero uintptr - -func (ts *Timespec) Unix() (sec int64, nsec int64) { - return int64(ts.Sec), int64(ts.Nsec) -} - -func (tv *Timeval) Unix() (sec int64, nsec int64) { - return int64(tv.Sec), int64(tv.Usec) * 1000 -} - -func (ts *Timespec) Nano() int64 { - return int64(ts.Sec)*1e9 + int64(ts.Nsec) -} - -func (tv *Timeval) Nano() int64 { - return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 -} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go deleted file mode 100644 index 053d664..0000000 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ /dev/null @@ -1,1454 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Windows system calls. - -package windows - -import ( - errorspkg "errors" - "sync" - "syscall" - "time" - "unicode/utf16" - "unsafe" -) - -type Handle uintptr - -const ( - InvalidHandle = ^Handle(0) - - // Flags for DefineDosDevice. - DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 - DDD_NO_BROADCAST_SYSTEM = 0x00000008 - DDD_RAW_TARGET_PATH = 0x00000001 - DDD_REMOVE_DEFINITION = 0x00000002 - - // Return values for GetDriveType. - DRIVE_UNKNOWN = 0 - DRIVE_NO_ROOT_DIR = 1 - DRIVE_REMOVABLE = 2 - DRIVE_FIXED = 3 - DRIVE_REMOTE = 4 - DRIVE_CDROM = 5 - DRIVE_RAMDISK = 6 - - // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. - FILE_CASE_SENSITIVE_SEARCH = 0x00000001 - FILE_CASE_PRESERVED_NAMES = 0x00000002 - FILE_FILE_COMPRESSION = 0x00000010 - FILE_DAX_VOLUME = 0x20000000 - FILE_NAMED_STREAMS = 0x00040000 - FILE_PERSISTENT_ACLS = 0x00000008 - FILE_READ_ONLY_VOLUME = 0x00080000 - FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 - FILE_SUPPORTS_ENCRYPTION = 0x00020000 - FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 - FILE_SUPPORTS_HARD_LINKS = 0x00400000 - FILE_SUPPORTS_OBJECT_IDS = 0x00010000 - FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 - FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 - FILE_SUPPORTS_SPARSE_FILES = 0x00000040 - FILE_SUPPORTS_TRANSACTIONS = 0x00200000 - FILE_SUPPORTS_USN_JOURNAL = 0x02000000 - FILE_UNICODE_ON_DISK = 0x00000004 - FILE_VOLUME_IS_COMPRESSED = 0x00008000 - FILE_VOLUME_QUOTAS = 0x00000020 - - // Flags for LockFileEx. - LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 - LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 - - // Return values of SleepEx and other APC functions - STATUS_USER_APC = 0x000000C0 - WAIT_IO_COMPLETION = STATUS_USER_APC -) - -// StringToUTF16 is deprecated. Use UTF16FromString instead. -// If s contains a NUL byte this function panics instead of -// returning an error. -func StringToUTF16(s string) []uint16 { - a, err := UTF16FromString(s) - if err != nil { - panic("windows: string with NUL passed to StringToUTF16") - } - return a -} - -// UTF16FromString returns the UTF-16 encoding of the UTF-8 string -// s, with a terminating NUL added. If s contains a NUL byte at any -// location, it returns (nil, syscall.EINVAL). -func UTF16FromString(s string) ([]uint16, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, syscall.EINVAL - } - } - return utf16.Encode([]rune(s + "\x00")), nil -} - -// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, -// with a terminating NUL removed. -func UTF16ToString(s []uint16) string { - for i, v := range s { - if v == 0 { - s = s[0:i] - break - } - } - return string(utf16.Decode(s)) -} - -// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. -// If s contains a NUL byte this function panics instead of -// returning an error. -func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } - -// UTF16PtrFromString returns pointer to the UTF-16 encoding of -// the UTF-8 string s, with a terminating NUL added. If s -// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). -func UTF16PtrFromString(s string) (*uint16, error) { - a, err := UTF16FromString(s) - if err != nil { - return nil, err - } - return &a[0], nil -} - -func Getpagesize() int { return 4096 } - -// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. -// This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. -func NewCallback(fn interface{}) uintptr { - return syscall.NewCallback(fn) -} - -// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. -// This is useful when interoperating with Windows code requiring callbacks. -// The argument is expected to be a function with with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr. -func NewCallbackCDecl(fn interface{}) uintptr { - return syscall.NewCallbackCDecl(fn) -} - -// windows api calls - -//sys GetLastError() (lasterr error) -//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW -//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW -//sys FreeLibrary(handle Handle) (err error) -//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) -//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW -//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW -//sys GetVersion() (ver uint32, err error) -//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW -//sys ExitProcess(exitcode uint32) -//sys IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process -//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW -//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) -//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) -//sys GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) -//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] -//sys CloseHandle(handle Handle) (err error) -//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] -//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) -//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW -//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW -//sys FindClose(handle Handle) (err error) -//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) -//sys GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) -//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW -//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW -//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW -//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW -//sys DeleteFile(path *uint16) (err error) = DeleteFileW -//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW -//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW -//sys LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) -//sys UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) -//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW -//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW -//sys SetEndOfFile(handle Handle) (err error) -//sys GetSystemTimeAsFileTime(time *Filetime) -//sys GetSystemTimePreciseAsFileTime(time *Filetime) -//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] -//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) -//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) -//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) -//sys CancelIo(s Handle) (err error) -//sys CancelIoEx(s Handle, o *Overlapped) (err error) -//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW -//sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) -//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW -//sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath -//sys TerminateProcess(handle Handle, exitcode uint32) (err error) -//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) -//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW -//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) -//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) -//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] -//sys waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] = WaitForMultipleObjects -//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW -//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) -//sys GetFileType(filehandle Handle) (n uint32, err error) -//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW -//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext -//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom -//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW -//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW -//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW -//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW -//sys CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock -//sys DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock -//sys getTickCount64() (ms uint64) = kernel32.GetTickCount64 -//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) -//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW -//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW -//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW -//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW -//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW -//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] -//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) -//sys FlushFileBuffers(handle Handle) (err error) -//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW -//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW -//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW -//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW -//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) -//sys UnmapViewOfFile(addr uintptr) (err error) -//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) -//sys VirtualLock(addr uintptr, length uintptr) (err error) -//sys VirtualUnlock(addr uintptr, length uintptr) (err error) -//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc -//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree -//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect -//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile -//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW -//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW -//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) [failretval==InvalidHandle] = crypt32.CertOpenStore -//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore -//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore -//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore -//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain -//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain -//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext -//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext -//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy -//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW -//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey -//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW -//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW -//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW -//sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId -//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode -//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode -//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo -//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW -//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW -//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot -//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW -//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW -//sys Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) -//sys Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) -//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) -// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. -//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW -//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW -//sys GetCurrentThreadId() (id uint32) -//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW -//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW -//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW -//sys SetEvent(event Handle) (err error) = kernel32.SetEvent -//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent -//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent -//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW -//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW -//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW -//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex -//sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx -//sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW -//sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject -//sys TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject -//sys SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode -//sys ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread -//sys SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass -//sys GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass -//sys SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) -//sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) -//sys GetProcessId(process Handle) (id uint32, err error) -//sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) -//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost - -// Volume Management Functions -//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW -//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW -//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW -//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW -//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW -//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW -//sys FindVolumeClose(findVolume Handle) (err error) -//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) -//sys GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = GetDiskFreeSpaceExW -//sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW -//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] -//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW -//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW -//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW -//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW -//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW -//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW -//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW -//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW -//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW -//sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW -//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx -//sys InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW -//sys SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters -//sys GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters -//sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString -//sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 -//sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid -//sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree -//sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion -//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers -//sys getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages -//sys getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages -//sys getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages -//sys getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages - -// Process Status API (PSAPI) -//sys EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses - -// syscall interface implementation for other packages - -// GetCurrentProcess returns the handle for the current process. -// It is a pseudo handle that does not need to be closed. -// The returned error is always nil. -// -// Deprecated: use CurrentProcess for the same Handle without the nil -// error. -func GetCurrentProcess() (Handle, error) { - return CurrentProcess(), nil -} - -// CurrentProcess returns the handle for the current process. -// It is a pseudo handle that does not need to be closed. -func CurrentProcess() Handle { return Handle(^uintptr(1 - 1)) } - -// GetCurrentThread returns the handle for the current thread. -// It is a pseudo handle that does not need to be closed. -// The returned error is always nil. -// -// Deprecated: use CurrentThread for the same Handle without the nil -// error. -func GetCurrentThread() (Handle, error) { - return CurrentThread(), nil -} - -// CurrentThread returns the handle for the current thread. -// It is a pseudo handle that does not need to be closed. -func CurrentThread() Handle { return Handle(^uintptr(2 - 1)) } - -// GetProcAddressByOrdinal retrieves the address of the exported -// function from module by ordinal. -func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) - proc = uintptr(r0) - if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Exit(code int) { ExitProcess(uint32(code)) } - -func makeInheritSa() *SecurityAttributes { - var sa SecurityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - sa.InheritHandle = 1 - return &sa -} - -func Open(path string, mode int, perm uint32) (fd Handle, err error) { - if len(path) == 0 { - return InvalidHandle, ERROR_FILE_NOT_FOUND - } - pathp, err := UTF16PtrFromString(path) - if err != nil { - return InvalidHandle, err - } - var access uint32 - switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { - case O_RDONLY: - access = GENERIC_READ - case O_WRONLY: - access = GENERIC_WRITE - case O_RDWR: - access = GENERIC_READ | GENERIC_WRITE - } - if mode&O_CREAT != 0 { - access |= GENERIC_WRITE - } - if mode&O_APPEND != 0 { - access &^= GENERIC_WRITE - access |= FILE_APPEND_DATA - } - sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) - var sa *SecurityAttributes - if mode&O_CLOEXEC == 0 { - sa = makeInheritSa() - } - var createmode uint32 - switch { - case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): - createmode = CREATE_NEW - case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): - createmode = CREATE_ALWAYS - case mode&O_CREAT == O_CREAT: - createmode = OPEN_ALWAYS - case mode&O_TRUNC == O_TRUNC: - createmode = TRUNCATE_EXISTING - default: - createmode = OPEN_EXISTING - } - var attrs uint32 = FILE_ATTRIBUTE_NORMAL - if perm&S_IWRITE == 0 { - attrs = FILE_ATTRIBUTE_READONLY - } - h, e := CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0) - return h, e -} - -func Read(fd Handle, p []byte) (n int, err error) { - var done uint32 - e := ReadFile(fd, p, &done, nil) - if e != nil { - if e == ERROR_BROKEN_PIPE { - // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin - return 0, nil - } - return 0, e - } - if raceenabled { - if done > 0 { - raceWriteRange(unsafe.Pointer(&p[0]), int(done)) - } - raceAcquire(unsafe.Pointer(&ioSync)) - } - return int(done), nil -} - -func Write(fd Handle, p []byte) (n int, err error) { - if raceenabled { - raceReleaseMerge(unsafe.Pointer(&ioSync)) - } - var done uint32 - e := WriteFile(fd, p, &done, nil) - if e != nil { - return 0, e - } - if raceenabled && done > 0 { - raceReadRange(unsafe.Pointer(&p[0]), int(done)) - } - return int(done), nil -} - -var ioSync int64 - -func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { - var w uint32 - switch whence { - case 0: - w = FILE_BEGIN - case 1: - w = FILE_CURRENT - case 2: - w = FILE_END - } - hi := int32(offset >> 32) - lo := int32(offset) - // use GetFileType to check pipe, pipe can't do seek - ft, _ := GetFileType(fd) - if ft == FILE_TYPE_PIPE { - return 0, syscall.EPIPE - } - rlo, e := SetFilePointer(fd, lo, &hi, w) - if e != nil { - return 0, e - } - return int64(hi)<<32 + int64(rlo), nil -} - -func Close(fd Handle) (err error) { - return CloseHandle(fd) -} - -var ( - Stdin = getStdHandle(STD_INPUT_HANDLE) - Stdout = getStdHandle(STD_OUTPUT_HANDLE) - Stderr = getStdHandle(STD_ERROR_HANDLE) -) - -func getStdHandle(stdhandle uint32) (fd Handle) { - r, _ := GetStdHandle(stdhandle) - CloseOnExec(r) - return r -} - -const ImplementsGetwd = true - -func Getwd() (wd string, err error) { - b := make([]uint16, 300) - n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) - if e != nil { - return "", e - } - return string(utf16.Decode(b[0:n])), nil -} - -func Chdir(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return SetCurrentDirectory(pathp) -} - -func Mkdir(path string, mode uint32) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return CreateDirectory(pathp, nil) -} - -func Rmdir(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return RemoveDirectory(pathp) -} - -func Unlink(path string) (err error) { - pathp, err := UTF16PtrFromString(path) - if err != nil { - return err - } - return DeleteFile(pathp) -} - -func Rename(oldpath, newpath string) (err error) { - from, err := UTF16PtrFromString(oldpath) - if err != nil { - return err - } - to, err := UTF16PtrFromString(newpath) - if err != nil { - return err - } - return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) -} - -func ComputerName() (name string, err error) { - var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 - b := make([]uint16, n) - e := GetComputerName(&b[0], &n) - if e != nil { - return "", e - } - return string(utf16.Decode(b[0:n])), nil -} - -func DurationSinceBoot() time.Duration { - return time.Duration(getTickCount64()) * time.Millisecond -} - -func Ftruncate(fd Handle, length int64) (err error) { - curoffset, e := Seek(fd, 0, 1) - if e != nil { - return e - } - defer Seek(fd, curoffset, 0) - _, e = Seek(fd, length, 0) - if e != nil { - return e - } - e = SetEndOfFile(fd) - if e != nil { - return e - } - return nil -} - -func Gettimeofday(tv *Timeval) (err error) { - var ft Filetime - GetSystemTimeAsFileTime(&ft) - *tv = NsecToTimeval(ft.Nanoseconds()) - return nil -} - -func Pipe(p []Handle) (err error) { - if len(p) != 2 { - return syscall.EINVAL - } - var r, w Handle - e := CreatePipe(&r, &w, makeInheritSa(), 0) - if e != nil { - return e - } - p[0] = r - p[1] = w - return nil -} - -func Utimes(path string, tv []Timeval) (err error) { - if len(tv) != 2 { - return syscall.EINVAL - } - pathp, e := UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := CreateFile(pathp, - FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer Close(h) - a := NsecToFiletime(tv[0].Nanoseconds()) - w := NsecToFiletime(tv[1].Nanoseconds()) - return SetFileTime(h, nil, &a, &w) -} - -func UtimesNano(path string, ts []Timespec) (err error) { - if len(ts) != 2 { - return syscall.EINVAL - } - pathp, e := UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := CreateFile(pathp, - FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, - OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer Close(h) - a := NsecToFiletime(TimespecToNsec(ts[0])) - w := NsecToFiletime(TimespecToNsec(ts[1])) - return SetFileTime(h, nil, &a, &w) -} - -func Fsync(fd Handle) (err error) { - return FlushFileBuffers(fd) -} - -func Chmod(path string, mode uint32) (err error) { - p, e := UTF16PtrFromString(path) - if e != nil { - return e - } - attrs, e := GetFileAttributes(p) - if e != nil { - return e - } - if mode&S_IWRITE != 0 { - attrs &^= FILE_ATTRIBUTE_READONLY - } else { - attrs |= FILE_ATTRIBUTE_READONLY - } - return SetFileAttributes(p, attrs) -} - -func LoadGetSystemTimePreciseAsFileTime() error { - return procGetSystemTimePreciseAsFileTime.Find() -} - -func LoadCancelIoEx() error { - return procCancelIoEx.Find() -} - -func LoadSetFileCompletionNotificationModes() error { - return procSetFileCompletionNotificationModes.Find() -} - -func WaitForMultipleObjects(handles []Handle, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { - // Every other win32 array API takes arguments as "pointer, count", except for this function. So we - // can't declare it as a usual [] type, because mksyscall will use the opposite order. We therefore - // trivially stub this ourselves. - - var handlePtr *Handle - if len(handles) > 0 { - handlePtr = &handles[0] - } - return waitForMultipleObjects(uint32(len(handles)), uintptr(unsafe.Pointer(handlePtr)), waitAll, waitMilliseconds) -} - -// net api calls - -const socket_error = uintptr(^uint32(0)) - -//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup -//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup -//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl -//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket -//sys sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto -//sys recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom -//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt -//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt -//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind -//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect -//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname -//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername -//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen -//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown -//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket -//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx -//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs -//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv -//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend -//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom -//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo -//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname -//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname -//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs -//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname -//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W -//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree -//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W -//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW -//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW -//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry -//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo -//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes -//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW -//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses -//sys GetACP() (acp uint32) = kernel32.GetACP -//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar - -// For testing: clients can set this flag to force -// creation of IPv6 sockets to return EAFNOSUPPORT. -var SocketDisableIPv6 bool - -type RawSockaddrInet4 struct { - Family uint16 - Port uint16 - Addr [4]byte /* in_addr */ - Zero [8]uint8 -} - -type RawSockaddrInet6 struct { - Family uint16 - Port uint16 - Flowinfo uint32 - Addr [16]byte /* in6_addr */ - Scope_id uint32 -} - -type RawSockaddr struct { - Family uint16 - Data [14]int8 -} - -type RawSockaddrAny struct { - Addr RawSockaddr - Pad [100]int8 -} - -type Sockaddr interface { - sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs -} - -type SockaddrInet4 struct { - Port int - Addr [4]byte - raw RawSockaddrInet4 -} - -func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { - if sa.Port < 0 || sa.Port > 0xFFFF { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_INET - p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) - p[0] = byte(sa.Port >> 8) - p[1] = byte(sa.Port) - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } - return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil -} - -type SockaddrInet6 struct { - Port int - ZoneId uint32 - Addr [16]byte - raw RawSockaddrInet6 -} - -func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { - if sa.Port < 0 || sa.Port > 0xFFFF { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_INET6 - p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) - p[0] = byte(sa.Port >> 8) - p[1] = byte(sa.Port) - sa.raw.Scope_id = sa.ZoneId - for i := 0; i < len(sa.Addr); i++ { - sa.raw.Addr[i] = sa.Addr[i] - } - return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil -} - -type RawSockaddrUnix struct { - Family uint16 - Path [UNIX_PATH_MAX]int8 -} - -type SockaddrUnix struct { - Name string - raw RawSockaddrUnix -} - -func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { - name := sa.Name - n := len(name) - if n > len(sa.raw.Path) { - return nil, 0, syscall.EINVAL - } - if n == len(sa.raw.Path) && name[0] != '@' { - return nil, 0, syscall.EINVAL - } - sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { - sa.raw.Path[i] = int8(name[i]) - } - // length is family (uint16), name, NUL. - sl := int32(2) - if n > 0 { - sl += int32(n) + 1 - } - if sa.raw.Path[0] == '@' { - sa.raw.Path[0] = 0 - // Don't count trailing NUL for abstract address. - sl-- - } - - return unsafe.Pointer(&sa.raw), sl, nil -} - -func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { - switch rsa.Addr.Family { - case AF_UNIX: - pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa)) - sa := new(SockaddrUnix) - if pp.Path[0] == 0 { - // "Abstract" Unix domain socket. - // Rewrite leading NUL as @ for textual display. - // (This is the standard convention.) - // Not friendly to overwrite in place, - // but the callers below don't care. - pp.Path[0] = '@' - } - - // Assume path ends at NUL. - // This is not technically the Linux semantics for - // abstract Unix domain sockets--they are supposed - // to be uninterpreted fixed-size binary blobs--but - // everyone uses this convention. - n := 0 - for n < len(pp.Path) && pp.Path[n] != 0 { - n++ - } - bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] - sa.Name = string(bytes) - return sa, nil - - case AF_INET: - pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet4) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } - return sa, nil - - case AF_INET6: - pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) - sa := new(SockaddrInet6) - p := (*[2]byte)(unsafe.Pointer(&pp.Port)) - sa.Port = int(p[0])<<8 + int(p[1]) - sa.ZoneId = pp.Scope_id - for i := 0; i < len(sa.Addr); i++ { - sa.Addr[i] = pp.Addr[i] - } - return sa, nil - } - return nil, syscall.EAFNOSUPPORT -} - -func Socket(domain, typ, proto int) (fd Handle, err error) { - if domain == AF_INET6 && SocketDisableIPv6 { - return InvalidHandle, syscall.EAFNOSUPPORT - } - return socket(int32(domain), int32(typ), int32(proto)) -} - -func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { - v := int32(value) - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) -} - -func Bind(fd Handle, sa Sockaddr) (err error) { - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return bind(fd, ptr, n) -} - -func Connect(fd Handle, sa Sockaddr) (err error) { - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return connect(fd, ptr, n) -} - -func Getsockname(fd Handle) (sa Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - if err = getsockname(fd, &rsa, &l); err != nil { - return - } - return rsa.Sockaddr() -} - -func Getpeername(fd Handle) (sa Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - if err = getpeername(fd, &rsa, &l); err != nil { - return - } - return rsa.Sockaddr() -} - -func Listen(s Handle, n int) (err error) { - return listen(s, int32(n)) -} - -func Shutdown(fd Handle, how int) (err error) { - return shutdown(fd, int32(how)) -} - -func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { - rsa, l, err := to.sockaddr() - if err != nil { - return err - } - return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) -} - -func LoadGetAddrInfo() error { - return procGetAddrInfoW.Find() -} - -var connectExFunc struct { - once sync.Once - addr uintptr - err error -} - -func LoadConnectEx() error { - connectExFunc.once.Do(func() { - var s Handle - s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) - if connectExFunc.err != nil { - return - } - defer CloseHandle(s) - var n uint32 - connectExFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), - uint32(unsafe.Sizeof(WSAID_CONNECTEX)), - (*byte)(unsafe.Pointer(&connectExFunc.addr)), - uint32(unsafe.Sizeof(connectExFunc.addr)), - &n, nil, 0) - }) - return connectExFunc.err -} - -func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { - err := LoadConnectEx() - if err != nil { - return errorspkg.New("failed to find ConnectEx: " + err.Error()) - } - ptr, n, err := sa.sockaddr() - if err != nil { - return err - } - return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) -} - -var sendRecvMsgFunc struct { - once sync.Once - sendAddr uintptr - recvAddr uintptr - err error -} - -func loadWSASendRecvMsg() error { - sendRecvMsgFunc.once.Do(func() { - var s Handle - s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) - if sendRecvMsgFunc.err != nil { - return - } - defer CloseHandle(s) - var n uint32 - sendRecvMsgFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), - uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), - (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), - uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), - &n, nil, 0) - if sendRecvMsgFunc.err != nil { - return - } - sendRecvMsgFunc.err = WSAIoctl(s, - SIO_GET_EXTENSION_FUNCTION_POINTER, - (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), - uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), - (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), - uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), - &n, nil, 0) - }) - return sendRecvMsgFunc.err -} - -func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { - err := loadWSASendRecvMsg() - if err != nil { - return err - } - r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return err -} - -func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { - err := loadWSASendRecvMsg() - if err != nil { - return err - } - r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return err -} - -// Invented structures to support what package os expects. -type Rusage struct { - CreationTime Filetime - ExitTime Filetime - KernelTime Filetime - UserTime Filetime -} - -type WaitStatus struct { - ExitCode uint32 -} - -func (w WaitStatus) Exited() bool { return true } - -func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } - -func (w WaitStatus) Signal() Signal { return -1 } - -func (w WaitStatus) CoreDump() bool { return false } - -func (w WaitStatus) Stopped() bool { return false } - -func (w WaitStatus) Continued() bool { return false } - -func (w WaitStatus) StopSignal() Signal { return -1 } - -func (w WaitStatus) Signaled() bool { return false } - -func (w WaitStatus) TrapCause() int { return -1 } - -// Timespec is an invented structure on Windows, but here for -// consistency with the corresponding package for other operating systems. -type Timespec struct { - Sec int64 - Nsec int64 -} - -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } - -func NsecToTimespec(nsec int64) (ts Timespec) { - ts.Sec = nsec / 1e9 - ts.Nsec = nsec % 1e9 - return -} - -// TODO(brainman): fix all needed for net - -func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } - -func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { - var rsa RawSockaddrAny - l := int32(unsafe.Sizeof(rsa)) - n32, err := recvfrom(fd, p, int32(flags), &rsa, &l) - n = int(n32) - if err != nil { - return - } - from, err = rsa.Sockaddr() - return -} - -func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { - ptr, l, err := to.sockaddr() - if err != nil { - return err - } - return sendto(fd, p, int32(flags), ptr, l) -} - -func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } - -// The Linger struct is wrong but we only noticed after Go 1. -// sysLinger is the real system call structure. - -// BUG(brainman): The definition of Linger is not appropriate for direct use -// with Setsockopt and Getsockopt. -// Use SetsockoptLinger instead. - -type Linger struct { - Onoff int32 - Linger int32 -} - -type sysLinger struct { - Onoff uint16 - Linger uint16 -} - -type IPMreq struct { - Multiaddr [4]byte /* in_addr */ - Interface [4]byte /* in_addr */ -} - -type IPv6Mreq struct { - Multiaddr [16]byte /* in6_addr */ - Interface uint32 -} - -func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS } - -func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { - sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) -} - -func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) -} -func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { - return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) -} -func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { - return syscall.EWINDOWS -} - -func Getpid() (pid int) { return int(GetCurrentProcessId()) } - -func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { - // NOTE(rsc): The Win32finddata struct is wrong for the system call: - // the two paths are each one uint16 short. Use the correct struct, - // a win32finddata1, and then copy the results out. - // There is no loss of expressivity here, because the final - // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. - // For Go 1.1, we might avoid the allocation of win32finddata1 here - // by adding a final Bug [2]uint16 field to the struct and then - // adjusting the fields in the result directly. - var data1 win32finddata1 - handle, err = findFirstFile1(name, &data1) - if err == nil { - copyFindData(data, &data1) - } - return -} - -func FindNextFile(handle Handle, data *Win32finddata) (err error) { - var data1 win32finddata1 - err = findNextFile1(handle, &data1) - if err == nil { - copyFindData(data, &data1) - } - return -} - -func getProcessEntry(pid int) (*ProcessEntry32, error) { - snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) - if err != nil { - return nil, err - } - defer CloseHandle(snapshot) - var procEntry ProcessEntry32 - procEntry.Size = uint32(unsafe.Sizeof(procEntry)) - if err = Process32First(snapshot, &procEntry); err != nil { - return nil, err - } - for { - if procEntry.ProcessID == uint32(pid) { - return &procEntry, nil - } - err = Process32Next(snapshot, &procEntry) - if err != nil { - return nil, err - } - } -} - -func Getppid() (ppid int) { - pe, err := getProcessEntry(Getpid()) - if err != nil { - return -1 - } - return int(pe.ParentProcessID) -} - -// TODO(brainman): fix all needed for os -func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } -func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } -func Symlink(path, link string) (err error) { return syscall.EWINDOWS } - -func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } -func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } -func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } -func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } - -func Getuid() (uid int) { return -1 } -func Geteuid() (euid int) { return -1 } -func Getgid() (gid int) { return -1 } -func Getegid() (egid int) { return -1 } -func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } - -type Signal int - -func (s Signal) Signal() {} - -func (s Signal) String() string { - if 0 <= s && int(s) < len(signals) { - str := signals[s] - if str != "" { - return str - } - } - return "signal " + itoa(int(s)) -} - -func LoadCreateSymbolicLink() error { - return procCreateSymbolicLinkW.Find() -} - -// Readlink returns the destination of the named symbolic link. -func Readlink(path string, buf []byte) (n int, err error) { - fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, - FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - return -1, err - } - defer CloseHandle(fd) - - rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) - var bytesReturned uint32 - err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) - if err != nil { - return -1, err - } - - rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) - var s string - switch rdb.ReparseTag { - case IO_REPARSE_TAG_SYMLINK: - data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) - p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) - s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) - case IO_REPARSE_TAG_MOUNT_POINT: - data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) - p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) - s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) - default: - // the path is not a symlink or junction but another type of reparse - // point - return -1, syscall.ENOENT - } - n = copy(buf, []byte(s)) - - return n, nil -} - -// GUIDFromString parses a string in the form of -// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID. -func GUIDFromString(str string) (GUID, error) { - guid := GUID{} - str16, err := syscall.UTF16PtrFromString(str) - if err != nil { - return guid, err - } - err = clsidFromString(str16, &guid) - if err != nil { - return guid, err - } - return guid, nil -} - -// GenerateGUID creates a new random GUID. -func GenerateGUID() (GUID, error) { - guid := GUID{} - err := coCreateGuid(&guid) - if err != nil { - return guid, err - } - return guid, nil -} - -// String returns the canonical string form of the GUID, -// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}". -func (guid GUID) String() string { - var str [100]uint16 - chars := stringFromGUID2(&guid, &str[0], int32(len(str))) - if chars <= 1 { - return "" - } - return string(utf16.Decode(str[:chars-1])) -} - -// KnownFolderPath returns a well-known folder path for the current user, specified by one of -// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. -func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { - return Token(0).KnownFolderPath(folderID, flags) -} - -// KnownFolderPath returns a well-known folder path for the user token, specified by one of -// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag. -func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) { - var p *uint16 - err := shGetKnownFolderPath(folderID, flags, t, &p) - if err != nil { - return "", err - } - defer CoTaskMemFree(unsafe.Pointer(p)) - return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil -} - -// RtlGetVersion returns the version of the underlying operating system, ignoring -// manifest semantics but is affected by the application compatibility layer. -func RtlGetVersion() *OsVersionInfoEx { - info := &OsVersionInfoEx{} - info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) - // According to documentation, this function always succeeds. - // The function doesn't even check the validity of the - // osVersionInfoSize member. Disassembling ntdll.dll indicates - // that the documentation is indeed correct about that. - _ = rtlGetVersion(info) - return info -} - -// RtlGetNtVersionNumbers returns the version of the underlying operating system, -// ignoring manifest semantics and the application compatibility layer. -func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { - rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) - buildNumber &= 0xffff - return -} - -// GetProcessPreferredUILanguages retrieves the process preferred UI languages. -func GetProcessPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getProcessPreferredUILanguages) -} - -// GetThreadPreferredUILanguages retrieves the thread preferred UI languages for the current thread. -func GetThreadPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getThreadPreferredUILanguages) -} - -// GetUserPreferredUILanguages retrieves information about the user preferred UI languages. -func GetUserPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getUserPreferredUILanguages) -} - -// GetSystemPreferredUILanguages retrieves the system preferred UI languages. -func GetSystemPreferredUILanguages(flags uint32) ([]string, error) { - return getUILanguages(flags, getSystemPreferredUILanguages) -} - -func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) error) ([]string, error) { - size := uint32(128) - for { - var numLanguages uint32 - buf := make([]uint16, size) - err := f(flags, &numLanguages, &buf[0], &size) - if err == ERROR_INSUFFICIENT_BUFFER { - continue - } - if err != nil { - return nil, err - } - buf = buf[:size] - if numLanguages == 0 || len(buf) == 0 { // GetProcessPreferredUILanguages may return numLanguages==0 with "\0\0" - return []string{}, nil - } - if buf[len(buf)-1] == 0 { - buf = buf[:len(buf)-1] // remove terminating null - } - languages := make([]string, 0, numLanguages) - from := 0 - for i, c := range buf { - if c == 0 { - languages = append(languages, string(utf16.Decode(buf[from:i]))) - from = i + 1 - } - } - return languages, nil - } -} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go deleted file mode 100644 index 809fff0..0000000 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ /dev/null @@ -1,1786 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -import ( - "net" - "syscall" - "unsafe" -) - -const ( - // Invented values to support what package os expects. - O_RDONLY = 0x00000 - O_WRONLY = 0x00001 - O_RDWR = 0x00002 - O_CREAT = 0x00040 - O_EXCL = 0x00080 - O_NOCTTY = 0x00100 - O_TRUNC = 0x00200 - O_NONBLOCK = 0x00800 - O_APPEND = 0x00400 - O_SYNC = 0x01000 - O_ASYNC = 0x02000 - O_CLOEXEC = 0x80000 -) - -const ( - // More invented values for signals - SIGHUP = Signal(0x1) - SIGINT = Signal(0x2) - SIGQUIT = Signal(0x3) - SIGILL = Signal(0x4) - SIGTRAP = Signal(0x5) - SIGABRT = Signal(0x6) - SIGBUS = Signal(0x7) - SIGFPE = Signal(0x8) - SIGKILL = Signal(0x9) - SIGSEGV = Signal(0xb) - SIGPIPE = Signal(0xd) - SIGALRM = Signal(0xe) - SIGTERM = Signal(0xf) -) - -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", -} - -const ( - FILE_LIST_DIRECTORY = 0x00000001 - FILE_APPEND_DATA = 0x00000004 - FILE_WRITE_ATTRIBUTES = 0x00000100 - - FILE_SHARE_READ = 0x00000001 - FILE_SHARE_WRITE = 0x00000002 - FILE_SHARE_DELETE = 0x00000004 - - FILE_ATTRIBUTE_READONLY = 0x00000001 - FILE_ATTRIBUTE_HIDDEN = 0x00000002 - FILE_ATTRIBUTE_SYSTEM = 0x00000004 - FILE_ATTRIBUTE_DIRECTORY = 0x00000010 - FILE_ATTRIBUTE_ARCHIVE = 0x00000020 - FILE_ATTRIBUTE_DEVICE = 0x00000040 - FILE_ATTRIBUTE_NORMAL = 0x00000080 - FILE_ATTRIBUTE_TEMPORARY = 0x00000100 - FILE_ATTRIBUTE_SPARSE_FILE = 0x00000200 - FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 - FILE_ATTRIBUTE_COMPRESSED = 0x00000800 - FILE_ATTRIBUTE_OFFLINE = 0x00001000 - FILE_ATTRIBUTE_NOT_CONTENT_INDEXED = 0x00002000 - FILE_ATTRIBUTE_ENCRYPTED = 0x00004000 - FILE_ATTRIBUTE_INTEGRITY_STREAM = 0x00008000 - FILE_ATTRIBUTE_VIRTUAL = 0x00010000 - FILE_ATTRIBUTE_NO_SCRUB_DATA = 0x00020000 - FILE_ATTRIBUTE_RECALL_ON_OPEN = 0x00040000 - FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000 - - INVALID_FILE_ATTRIBUTES = 0xffffffff - - CREATE_NEW = 1 - CREATE_ALWAYS = 2 - OPEN_EXISTING = 3 - OPEN_ALWAYS = 4 - TRUNCATE_EXISTING = 5 - - FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000 - FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000 - FILE_FLAG_OPEN_NO_RECALL = 0x00100000 - FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 - FILE_FLAG_SESSION_AWARE = 0x00800000 - FILE_FLAG_POSIX_SEMANTICS = 0x01000000 - FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 - FILE_FLAG_DELETE_ON_CLOSE = 0x04000000 - FILE_FLAG_SEQUENTIAL_SCAN = 0x08000000 - FILE_FLAG_RANDOM_ACCESS = 0x10000000 - FILE_FLAG_NO_BUFFERING = 0x20000000 - FILE_FLAG_OVERLAPPED = 0x40000000 - FILE_FLAG_WRITE_THROUGH = 0x80000000 - - HANDLE_FLAG_INHERIT = 0x00000001 - STARTF_USESTDHANDLES = 0x00000100 - STARTF_USESHOWWINDOW = 0x00000001 - DUPLICATE_CLOSE_SOURCE = 0x00000001 - DUPLICATE_SAME_ACCESS = 0x00000002 - - STD_INPUT_HANDLE = -10 & (1<<32 - 1) - STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) - STD_ERROR_HANDLE = -12 & (1<<32 - 1) - - FILE_BEGIN = 0 - FILE_CURRENT = 1 - FILE_END = 2 - - LANG_ENGLISH = 0x09 - SUBLANG_ENGLISH_US = 0x01 - - FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 - FORMAT_MESSAGE_IGNORE_INSERTS = 512 - FORMAT_MESSAGE_FROM_STRING = 1024 - FORMAT_MESSAGE_FROM_HMODULE = 2048 - FORMAT_MESSAGE_FROM_SYSTEM = 4096 - FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 - FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 - - MAX_PATH = 260 - MAX_LONG_PATH = 32768 - - MAX_COMPUTERNAME_LENGTH = 15 - - TIME_ZONE_ID_UNKNOWN = 0 - TIME_ZONE_ID_STANDARD = 1 - - TIME_ZONE_ID_DAYLIGHT = 2 - IGNORE = 0 - INFINITE = 0xffffffff - - WAIT_ABANDONED = 0x00000080 - WAIT_OBJECT_0 = 0x00000000 - WAIT_FAILED = 0xFFFFFFFF - - // Access rights for process. - PROCESS_CREATE_PROCESS = 0x0080 - PROCESS_CREATE_THREAD = 0x0002 - PROCESS_DUP_HANDLE = 0x0040 - PROCESS_QUERY_INFORMATION = 0x0400 - PROCESS_QUERY_LIMITED_INFORMATION = 0x1000 - PROCESS_SET_INFORMATION = 0x0200 - PROCESS_SET_QUOTA = 0x0100 - PROCESS_SUSPEND_RESUME = 0x0800 - PROCESS_TERMINATE = 0x0001 - PROCESS_VM_OPERATION = 0x0008 - PROCESS_VM_READ = 0x0010 - PROCESS_VM_WRITE = 0x0020 - - // Access rights for thread. - THREAD_DIRECT_IMPERSONATION = 0x0200 - THREAD_GET_CONTEXT = 0x0008 - THREAD_IMPERSONATE = 0x0100 - THREAD_QUERY_INFORMATION = 0x0040 - THREAD_QUERY_LIMITED_INFORMATION = 0x0800 - THREAD_SET_CONTEXT = 0x0010 - THREAD_SET_INFORMATION = 0x0020 - THREAD_SET_LIMITED_INFORMATION = 0x0400 - THREAD_SET_THREAD_TOKEN = 0x0080 - THREAD_SUSPEND_RESUME = 0x0002 - THREAD_TERMINATE = 0x0001 - - FILE_MAP_COPY = 0x01 - FILE_MAP_WRITE = 0x02 - FILE_MAP_READ = 0x04 - FILE_MAP_EXECUTE = 0x20 - - CTRL_C_EVENT = 0 - CTRL_BREAK_EVENT = 1 - CTRL_CLOSE_EVENT = 2 - CTRL_LOGOFF_EVENT = 5 - CTRL_SHUTDOWN_EVENT = 6 - - // Windows reserves errors >= 1<<29 for application use. - APPLICATION_ERROR = 1 << 29 -) - -const ( - // Process creation flags. - CREATE_BREAKAWAY_FROM_JOB = 0x01000000 - CREATE_DEFAULT_ERROR_MODE = 0x04000000 - CREATE_NEW_CONSOLE = 0x00000010 - CREATE_NEW_PROCESS_GROUP = 0x00000200 - CREATE_NO_WINDOW = 0x08000000 - CREATE_PROTECTED_PROCESS = 0x00040000 - CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 - CREATE_SEPARATE_WOW_VDM = 0x00000800 - CREATE_SHARED_WOW_VDM = 0x00001000 - CREATE_SUSPENDED = 0x00000004 - CREATE_UNICODE_ENVIRONMENT = 0x00000400 - DEBUG_ONLY_THIS_PROCESS = 0x00000002 - DEBUG_PROCESS = 0x00000001 - DETACHED_PROCESS = 0x00000008 - EXTENDED_STARTUPINFO_PRESENT = 0x00080000 - INHERIT_PARENT_AFFINITY = 0x00010000 -) - -const ( - // flags for CreateToolhelp32Snapshot - TH32CS_SNAPHEAPLIST = 0x01 - TH32CS_SNAPPROCESS = 0x02 - TH32CS_SNAPTHREAD = 0x04 - TH32CS_SNAPMODULE = 0x08 - TH32CS_SNAPMODULE32 = 0x10 - TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD - TH32CS_INHERIT = 0x80000000 -) - -const ( - // filters for ReadDirectoryChangesW - FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 - FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 - FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 - FILE_NOTIFY_CHANGE_SIZE = 0x008 - FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 - FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 - FILE_NOTIFY_CHANGE_CREATION = 0x040 - FILE_NOTIFY_CHANGE_SECURITY = 0x100 -) - -const ( - // do not reorder - FILE_ACTION_ADDED = iota + 1 - FILE_ACTION_REMOVED - FILE_ACTION_MODIFIED - FILE_ACTION_RENAMED_OLD_NAME - FILE_ACTION_RENAMED_NEW_NAME -) - -const ( - // wincrypt.h - PROV_RSA_FULL = 1 - PROV_RSA_SIG = 2 - PROV_DSS = 3 - PROV_FORTEZZA = 4 - PROV_MS_EXCHANGE = 5 - PROV_SSL = 6 - PROV_RSA_SCHANNEL = 12 - PROV_DSS_DH = 13 - PROV_EC_ECDSA_SIG = 14 - PROV_EC_ECNRA_SIG = 15 - PROV_EC_ECDSA_FULL = 16 - PROV_EC_ECNRA_FULL = 17 - PROV_DH_SCHANNEL = 18 - PROV_SPYRUS_LYNKS = 20 - PROV_RNG = 21 - PROV_INTEL_SEC = 22 - PROV_REPLACE_OWF = 23 - PROV_RSA_AES = 24 - CRYPT_VERIFYCONTEXT = 0xF0000000 - CRYPT_NEWKEYSET = 0x00000008 - CRYPT_DELETEKEYSET = 0x00000010 - CRYPT_MACHINE_KEYSET = 0x00000020 - CRYPT_SILENT = 0x00000040 - CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 - - USAGE_MATCH_TYPE_AND = 0 - USAGE_MATCH_TYPE_OR = 1 - - /* msgAndCertEncodingType values for CertOpenStore function */ - X509_ASN_ENCODING = 0x00000001 - PKCS_7_ASN_ENCODING = 0x00010000 - - /* storeProvider values for CertOpenStore function */ - CERT_STORE_PROV_MSG = 1 - CERT_STORE_PROV_MEMORY = 2 - CERT_STORE_PROV_FILE = 3 - CERT_STORE_PROV_REG = 4 - CERT_STORE_PROV_PKCS7 = 5 - CERT_STORE_PROV_SERIALIZED = 6 - CERT_STORE_PROV_FILENAME_A = 7 - CERT_STORE_PROV_FILENAME_W = 8 - CERT_STORE_PROV_FILENAME = CERT_STORE_PROV_FILENAME_W - CERT_STORE_PROV_SYSTEM_A = 9 - CERT_STORE_PROV_SYSTEM_W = 10 - CERT_STORE_PROV_SYSTEM = CERT_STORE_PROV_SYSTEM_W - CERT_STORE_PROV_COLLECTION = 11 - CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12 - CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13 - CERT_STORE_PROV_SYSTEM_REGISTRY = CERT_STORE_PROV_SYSTEM_REGISTRY_W - CERT_STORE_PROV_PHYSICAL_W = 14 - CERT_STORE_PROV_PHYSICAL = CERT_STORE_PROV_PHYSICAL_W - CERT_STORE_PROV_SMART_CARD_W = 15 - CERT_STORE_PROV_SMART_CARD = CERT_STORE_PROV_SMART_CARD_W - CERT_STORE_PROV_LDAP_W = 16 - CERT_STORE_PROV_LDAP = CERT_STORE_PROV_LDAP_W - CERT_STORE_PROV_PKCS12 = 17 - - /* store characteristics (low WORD of flag) for CertOpenStore function */ - CERT_STORE_NO_CRYPT_RELEASE_FLAG = 0x00000001 - CERT_STORE_SET_LOCALIZED_NAME_FLAG = 0x00000002 - CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 - CERT_STORE_DELETE_FLAG = 0x00000010 - CERT_STORE_UNSAFE_PHYSICAL_FLAG = 0x00000020 - CERT_STORE_SHARE_STORE_FLAG = 0x00000040 - CERT_STORE_SHARE_CONTEXT_FLAG = 0x00000080 - CERT_STORE_MANIFOLD_FLAG = 0x00000100 - CERT_STORE_ENUM_ARCHIVED_FLAG = 0x00000200 - CERT_STORE_UPDATE_KEYID_FLAG = 0x00000400 - CERT_STORE_BACKUP_RESTORE_FLAG = 0x00000800 - CERT_STORE_MAXIMUM_ALLOWED_FLAG = 0x00001000 - CERT_STORE_CREATE_NEW_FLAG = 0x00002000 - CERT_STORE_OPEN_EXISTING_FLAG = 0x00004000 - CERT_STORE_READONLY_FLAG = 0x00008000 - - /* store locations (high WORD of flag) for CertOpenStore function */ - CERT_SYSTEM_STORE_CURRENT_USER = 0x00010000 - CERT_SYSTEM_STORE_LOCAL_MACHINE = 0x00020000 - CERT_SYSTEM_STORE_CURRENT_SERVICE = 0x00040000 - CERT_SYSTEM_STORE_SERVICES = 0x00050000 - CERT_SYSTEM_STORE_USERS = 0x00060000 - CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY = 0x00070000 - CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000 - CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE = 0x00090000 - CERT_SYSTEM_STORE_UNPROTECTED_FLAG = 0x40000000 - CERT_SYSTEM_STORE_RELOCATE_FLAG = 0x80000000 - - /* Miscellaneous high-WORD flags for CertOpenStore function */ - CERT_REGISTRY_STORE_REMOTE_FLAG = 0x00010000 - CERT_REGISTRY_STORE_SERIALIZED_FLAG = 0x00020000 - CERT_REGISTRY_STORE_ROAMING_FLAG = 0x00040000 - CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000 - CERT_REGISTRY_STORE_LM_GPT_FLAG = 0x01000000 - CERT_REGISTRY_STORE_CLIENT_GPT_FLAG = 0x80000000 - CERT_FILE_STORE_COMMIT_ENABLE_FLAG = 0x00010000 - CERT_LDAP_STORE_SIGN_FLAG = 0x00010000 - CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG = 0x00020000 - CERT_LDAP_STORE_OPENED_FLAG = 0x00040000 - CERT_LDAP_STORE_UNBIND_FLAG = 0x00080000 - - /* addDisposition values for CertAddCertificateContextToStore function */ - CERT_STORE_ADD_NEW = 1 - CERT_STORE_ADD_USE_EXISTING = 2 - CERT_STORE_ADD_REPLACE_EXISTING = 3 - CERT_STORE_ADD_ALWAYS = 4 - CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5 - CERT_STORE_ADD_NEWER = 6 - CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES = 7 - - /* ErrorStatus values for CertTrustStatus struct */ - CERT_TRUST_NO_ERROR = 0x00000000 - CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 - CERT_TRUST_IS_REVOKED = 0x00000004 - CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 - CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 - CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 - CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 - CERT_TRUST_IS_CYCLIC = 0x00000080 - CERT_TRUST_INVALID_EXTENSION = 0x00000100 - CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 - CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 - CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 - CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 - CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 - CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 - CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 - CERT_TRUST_IS_PARTIAL_CHAIN = 0x00010000 - CERT_TRUST_CTL_IS_NOT_TIME_VALID = 0x00020000 - CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID = 0x00040000 - CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE = 0x00080000 - CERT_TRUST_HAS_WEAK_SIGNATURE = 0x00100000 - CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 - CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 - CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 - CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 - - /* InfoStatus values for CertTrustStatus struct */ - CERT_TRUST_HAS_EXACT_MATCH_ISSUER = 0x00000001 - CERT_TRUST_HAS_KEY_MATCH_ISSUER = 0x00000002 - CERT_TRUST_HAS_NAME_MATCH_ISSUER = 0x00000004 - CERT_TRUST_IS_SELF_SIGNED = 0x00000008 - CERT_TRUST_HAS_PREFERRED_ISSUER = 0x00000100 - CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY = 0x00000400 - CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS = 0x00000400 - CERT_TRUST_IS_PEER_TRUSTED = 0x00000800 - CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED = 0x00001000 - CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000 - CERT_TRUST_IS_CA_TRUSTED = 0x00004000 - CERT_TRUST_IS_COMPLEX_CHAIN = 0x00010000 - - /* policyOID values for CertVerifyCertificateChainPolicy function */ - CERT_CHAIN_POLICY_BASE = 1 - CERT_CHAIN_POLICY_AUTHENTICODE = 2 - CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3 - CERT_CHAIN_POLICY_SSL = 4 - CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5 - CERT_CHAIN_POLICY_NT_AUTH = 6 - CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7 - CERT_CHAIN_POLICY_EV = 8 - CERT_CHAIN_POLICY_SSL_F12 = 9 - - /* AuthType values for SSLExtraCertChainPolicyPara struct */ - AUTHTYPE_CLIENT = 1 - AUTHTYPE_SERVER = 2 - - /* Checks values for SSLExtraCertChainPolicyPara struct */ - SECURITY_FLAG_IGNORE_REVOCATION = 0x00000080 - SECURITY_FLAG_IGNORE_UNKNOWN_CA = 0x00000100 - SECURITY_FLAG_IGNORE_WRONG_USAGE = 0x00000200 - SECURITY_FLAG_IGNORE_CERT_CN_INVALID = 0x00001000 - SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000 -) - -const ( - // flags for SetErrorMode - SEM_FAILCRITICALERRORS = 0x0001 - SEM_NOALIGNMENTFAULTEXCEPT = 0x0004 - SEM_NOGPFAULTERRORBOX = 0x0002 - SEM_NOOPENFILEERRORBOX = 0x8000 -) - -const ( - // Priority class. - ABOVE_NORMAL_PRIORITY_CLASS = 0x00008000 - BELOW_NORMAL_PRIORITY_CLASS = 0x00004000 - HIGH_PRIORITY_CLASS = 0x00000080 - IDLE_PRIORITY_CLASS = 0x00000040 - NORMAL_PRIORITY_CLASS = 0x00000020 - PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000 - PROCESS_MODE_BACKGROUND_END = 0x00200000 - REALTIME_PRIORITY_CLASS = 0x00000100 -) - -var ( - OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") - OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") - OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") -) - -// Pointer represents a pointer to an arbitrary Windows type. -// -// Pointer-typed fields may point to one of many different types. It's -// up to the caller to provide a pointer to the appropriate type, cast -// to Pointer. The caller must obey the unsafe.Pointer rules while -// doing so. -type Pointer *struct{} - -// Invented values to support what package os expects. -type Timeval struct { - Sec int32 - Usec int32 -} - -func (tv *Timeval) Nanoseconds() int64 { - return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3 -} - -func NsecToTimeval(nsec int64) (tv Timeval) { - tv.Sec = int32(nsec / 1e9) - tv.Usec = int32(nsec % 1e9 / 1e3) - return -} - -type Overlapped struct { - Internal uintptr - InternalHigh uintptr - Offset uint32 - OffsetHigh uint32 - HEvent Handle -} - -type FileNotifyInformation struct { - NextEntryOffset uint32 - Action uint32 - FileNameLength uint32 - FileName uint16 -} - -type Filetime struct { - LowDateTime uint32 - HighDateTime uint32 -} - -// Nanoseconds returns Filetime ft in nanoseconds -// since Epoch (00:00:00 UTC, January 1, 1970). -func (ft *Filetime) Nanoseconds() int64 { - // 100-nanosecond intervals since January 1, 1601 - nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) - // change starting time to the Epoch (00:00:00 UTC, January 1, 1970) - nsec -= 116444736000000000 - // convert into nanoseconds - nsec *= 100 - return nsec -} - -func NsecToFiletime(nsec int64) (ft Filetime) { - // convert into 100-nanosecond - nsec /= 100 - // change starting time to January 1, 1601 - nsec += 116444736000000000 - // split into high / low - ft.LowDateTime = uint32(nsec & 0xffffffff) - ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff) - return ft -} - -type Win32finddata struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 - Reserved0 uint32 - Reserved1 uint32 - FileName [MAX_PATH - 1]uint16 - AlternateFileName [13]uint16 -} - -// This is the actual system call structure. -// Win32finddata is what we committed to in Go 1. -type win32finddata1 struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 - Reserved0 uint32 - Reserved1 uint32 - FileName [MAX_PATH]uint16 - AlternateFileName [14]uint16 -} - -func copyFindData(dst *Win32finddata, src *win32finddata1) { - dst.FileAttributes = src.FileAttributes - dst.CreationTime = src.CreationTime - dst.LastAccessTime = src.LastAccessTime - dst.LastWriteTime = src.LastWriteTime - dst.FileSizeHigh = src.FileSizeHigh - dst.FileSizeLow = src.FileSizeLow - dst.Reserved0 = src.Reserved0 - dst.Reserved1 = src.Reserved1 - - // The src is 1 element bigger than dst, but it must be NUL. - copy(dst.FileName[:], src.FileName[:]) - copy(dst.AlternateFileName[:], src.AlternateFileName[:]) -} - -type ByHandleFileInformation struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - VolumeSerialNumber uint32 - FileSizeHigh uint32 - FileSizeLow uint32 - NumberOfLinks uint32 - FileIndexHigh uint32 - FileIndexLow uint32 -} - -const ( - GetFileExInfoStandard = 0 - GetFileExMaxInfoLevel = 1 -) - -type Win32FileAttributeData struct { - FileAttributes uint32 - CreationTime Filetime - LastAccessTime Filetime - LastWriteTime Filetime - FileSizeHigh uint32 - FileSizeLow uint32 -} - -// ShowWindow constants -const ( - // winuser.h - SW_HIDE = 0 - SW_NORMAL = 1 - SW_SHOWNORMAL = 1 - SW_SHOWMINIMIZED = 2 - SW_SHOWMAXIMIZED = 3 - SW_MAXIMIZE = 3 - SW_SHOWNOACTIVATE = 4 - SW_SHOW = 5 - SW_MINIMIZE = 6 - SW_SHOWMINNOACTIVE = 7 - SW_SHOWNA = 8 - SW_RESTORE = 9 - SW_SHOWDEFAULT = 10 - SW_FORCEMINIMIZE = 11 -) - -type StartupInfo struct { - Cb uint32 - _ *uint16 - Desktop *uint16 - Title *uint16 - X uint32 - Y uint32 - XSize uint32 - YSize uint32 - XCountChars uint32 - YCountChars uint32 - FillAttribute uint32 - Flags uint32 - ShowWindow uint16 - _ uint16 - _ *byte - StdInput Handle - StdOutput Handle - StdErr Handle -} - -type ProcessInformation struct { - Process Handle - Thread Handle - ProcessId uint32 - ThreadId uint32 -} - -type ProcessEntry32 struct { - Size uint32 - Usage uint32 - ProcessID uint32 - DefaultHeapID uintptr - ModuleID uint32 - Threads uint32 - ParentProcessID uint32 - PriClassBase int32 - Flags uint32 - ExeFile [MAX_PATH]uint16 -} - -type ThreadEntry32 struct { - Size uint32 - Usage uint32 - ThreadID uint32 - OwnerProcessID uint32 - BasePri int32 - DeltaPri int32 - Flags uint32 -} - -type Systemtime struct { - Year uint16 - Month uint16 - DayOfWeek uint16 - Day uint16 - Hour uint16 - Minute uint16 - Second uint16 - Milliseconds uint16 -} - -type Timezoneinformation struct { - Bias int32 - StandardName [32]uint16 - StandardDate Systemtime - StandardBias int32 - DaylightName [32]uint16 - DaylightDate Systemtime - DaylightBias int32 -} - -// Socket related. - -const ( - AF_UNSPEC = 0 - AF_UNIX = 1 - AF_INET = 2 - AF_NETBIOS = 17 - AF_INET6 = 23 - AF_IRDA = 26 - AF_BTH = 32 - - SOCK_STREAM = 1 - SOCK_DGRAM = 2 - SOCK_RAW = 3 - SOCK_RDM = 4 - SOCK_SEQPACKET = 5 - - IPPROTO_IP = 0 - IPPROTO_ICMP = 1 - IPPROTO_IGMP = 2 - BTHPROTO_RFCOMM = 3 - IPPROTO_TCP = 6 - IPPROTO_UDP = 17 - IPPROTO_IPV6 = 41 - IPPROTO_ICMPV6 = 58 - IPPROTO_RM = 113 - - SOL_SOCKET = 0xffff - SO_REUSEADDR = 4 - SO_KEEPALIVE = 8 - SO_DONTROUTE = 16 - SO_BROADCAST = 32 - SO_LINGER = 128 - SO_RCVBUF = 0x1002 - SO_RCVTIMEO = 0x1006 - SO_SNDBUF = 0x1001 - SO_UPDATE_ACCEPT_CONTEXT = 0x700b - SO_UPDATE_CONNECT_CONTEXT = 0x7010 - - IOC_OUT = 0x40000000 - IOC_IN = 0x80000000 - IOC_VENDOR = 0x18000000 - IOC_INOUT = IOC_IN | IOC_OUT - IOC_WS2 = 0x08000000 - SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 - SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 - SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 - - // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 - - IP_TOS = 0x3 - IP_TTL = 0x4 - IP_MULTICAST_IF = 0x9 - IP_MULTICAST_TTL = 0xa - IP_MULTICAST_LOOP = 0xb - IP_ADD_MEMBERSHIP = 0xc - IP_DROP_MEMBERSHIP = 0xd - - IPV6_V6ONLY = 0x1b - IPV6_UNICAST_HOPS = 0x4 - IPV6_MULTICAST_IF = 0x9 - IPV6_MULTICAST_HOPS = 0xa - IPV6_MULTICAST_LOOP = 0xb - IPV6_JOIN_GROUP = 0xc - IPV6_LEAVE_GROUP = 0xd - - MSG_OOB = 0x1 - MSG_PEEK = 0x2 - MSG_DONTROUTE = 0x4 - MSG_WAITALL = 0x8 - - MSG_TRUNC = 0x0100 - MSG_CTRUNC = 0x0200 - MSG_BCAST = 0x0400 - MSG_MCAST = 0x0800 - - SOMAXCONN = 0x7fffffff - - TCP_NODELAY = 1 - - SHUT_RD = 0 - SHUT_WR = 1 - SHUT_RDWR = 2 - - WSADESCRIPTION_LEN = 256 - WSASYS_STATUS_LEN = 128 -) - -type WSABuf struct { - Len uint32 - Buf *byte -} - -type WSAMsg struct { - Name *syscall.RawSockaddrAny - Namelen int32 - Buffers *WSABuf - BufferCount uint32 - Control WSABuf - Flags uint32 -} - -// Invented values to support what package os expects. -const ( - S_IFMT = 0x1f000 - S_IFIFO = 0x1000 - S_IFCHR = 0x2000 - S_IFDIR = 0x4000 - S_IFBLK = 0x6000 - S_IFREG = 0x8000 - S_IFLNK = 0xa000 - S_IFSOCK = 0xc000 - S_ISUID = 0x800 - S_ISGID = 0x400 - S_ISVTX = 0x200 - S_IRUSR = 0x100 - S_IWRITE = 0x80 - S_IWUSR = 0x80 - S_IXUSR = 0x40 -) - -const ( - FILE_TYPE_CHAR = 0x0002 - FILE_TYPE_DISK = 0x0001 - FILE_TYPE_PIPE = 0x0003 - FILE_TYPE_REMOTE = 0x8000 - FILE_TYPE_UNKNOWN = 0x0000 -) - -type Hostent struct { - Name *byte - Aliases **byte - AddrType uint16 - Length uint16 - AddrList **byte -} - -type Protoent struct { - Name *byte - Aliases **byte - Proto uint16 -} - -const ( - DNS_TYPE_A = 0x0001 - DNS_TYPE_NS = 0x0002 - DNS_TYPE_MD = 0x0003 - DNS_TYPE_MF = 0x0004 - DNS_TYPE_CNAME = 0x0005 - DNS_TYPE_SOA = 0x0006 - DNS_TYPE_MB = 0x0007 - DNS_TYPE_MG = 0x0008 - DNS_TYPE_MR = 0x0009 - DNS_TYPE_NULL = 0x000a - DNS_TYPE_WKS = 0x000b - DNS_TYPE_PTR = 0x000c - DNS_TYPE_HINFO = 0x000d - DNS_TYPE_MINFO = 0x000e - DNS_TYPE_MX = 0x000f - DNS_TYPE_TEXT = 0x0010 - DNS_TYPE_RP = 0x0011 - DNS_TYPE_AFSDB = 0x0012 - DNS_TYPE_X25 = 0x0013 - DNS_TYPE_ISDN = 0x0014 - DNS_TYPE_RT = 0x0015 - DNS_TYPE_NSAP = 0x0016 - DNS_TYPE_NSAPPTR = 0x0017 - DNS_TYPE_SIG = 0x0018 - DNS_TYPE_KEY = 0x0019 - DNS_TYPE_PX = 0x001a - DNS_TYPE_GPOS = 0x001b - DNS_TYPE_AAAA = 0x001c - DNS_TYPE_LOC = 0x001d - DNS_TYPE_NXT = 0x001e - DNS_TYPE_EID = 0x001f - DNS_TYPE_NIMLOC = 0x0020 - DNS_TYPE_SRV = 0x0021 - DNS_TYPE_ATMA = 0x0022 - DNS_TYPE_NAPTR = 0x0023 - DNS_TYPE_KX = 0x0024 - DNS_TYPE_CERT = 0x0025 - DNS_TYPE_A6 = 0x0026 - DNS_TYPE_DNAME = 0x0027 - DNS_TYPE_SINK = 0x0028 - DNS_TYPE_OPT = 0x0029 - DNS_TYPE_DS = 0x002B - DNS_TYPE_RRSIG = 0x002E - DNS_TYPE_NSEC = 0x002F - DNS_TYPE_DNSKEY = 0x0030 - DNS_TYPE_DHCID = 0x0031 - DNS_TYPE_UINFO = 0x0064 - DNS_TYPE_UID = 0x0065 - DNS_TYPE_GID = 0x0066 - DNS_TYPE_UNSPEC = 0x0067 - DNS_TYPE_ADDRS = 0x00f8 - DNS_TYPE_TKEY = 0x00f9 - DNS_TYPE_TSIG = 0x00fa - DNS_TYPE_IXFR = 0x00fb - DNS_TYPE_AXFR = 0x00fc - DNS_TYPE_MAILB = 0x00fd - DNS_TYPE_MAILA = 0x00fe - DNS_TYPE_ALL = 0x00ff - DNS_TYPE_ANY = 0x00ff - DNS_TYPE_WINS = 0xff01 - DNS_TYPE_WINSR = 0xff02 - DNS_TYPE_NBSTAT = 0xff01 -) - -const ( - // flags inside DNSRecord.Dw - DnsSectionQuestion = 0x0000 - DnsSectionAnswer = 0x0001 - DnsSectionAuthority = 0x0002 - DnsSectionAdditional = 0x0003 -) - -type DNSSRVData struct { - Target *uint16 - Priority uint16 - Weight uint16 - Port uint16 - Pad uint16 -} - -type DNSPTRData struct { - Host *uint16 -} - -type DNSMXData struct { - NameExchange *uint16 - Preference uint16 - Pad uint16 -} - -type DNSTXTData struct { - StringCount uint16 - StringArray [1]*uint16 -} - -type DNSRecord struct { - Next *DNSRecord - Name *uint16 - Type uint16 - Length uint16 - Dw uint32 - Ttl uint32 - Reserved uint32 - Data [40]byte -} - -const ( - TF_DISCONNECT = 1 - TF_REUSE_SOCKET = 2 - TF_WRITE_BEHIND = 4 - TF_USE_DEFAULT_WORKER = 0 - TF_USE_SYSTEM_THREAD = 16 - TF_USE_KERNEL_APC = 32 -) - -type TransmitFileBuffers struct { - Head uintptr - HeadLength uint32 - Tail uintptr - TailLength uint32 -} - -const ( - IFF_UP = 1 - IFF_BROADCAST = 2 - IFF_LOOPBACK = 4 - IFF_POINTTOPOINT = 8 - IFF_MULTICAST = 16 -) - -const SIO_GET_INTERFACE_LIST = 0x4004747F - -// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. -// will be fixed to change variable type as suitable. - -type SockaddrGen [24]byte - -type InterfaceInfo struct { - Flags uint32 - Address SockaddrGen - BroadcastAddress SockaddrGen - Netmask SockaddrGen -} - -type IpAddressString struct { - String [16]byte -} - -type IpMaskString IpAddressString - -type IpAddrString struct { - Next *IpAddrString - IpAddress IpAddressString - IpMask IpMaskString - Context uint32 -} - -const MAX_ADAPTER_NAME_LENGTH = 256 -const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 -const MAX_ADAPTER_ADDRESS_LENGTH = 8 - -type IpAdapterInfo struct { - Next *IpAdapterInfo - ComboIndex uint32 - AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte - Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte - AddressLength uint32 - Address [MAX_ADAPTER_ADDRESS_LENGTH]byte - Index uint32 - Type uint32 - DhcpEnabled uint32 - CurrentIpAddress *IpAddrString - IpAddressList IpAddrString - GatewayList IpAddrString - DhcpServer IpAddrString - HaveWins bool - PrimaryWinsServer IpAddrString - SecondaryWinsServer IpAddrString - LeaseObtained int64 - LeaseExpires int64 -} - -const MAXLEN_PHYSADDR = 8 -const MAX_INTERFACE_NAME_LEN = 256 -const MAXLEN_IFDESCR = 256 - -type MibIfRow struct { - Name [MAX_INTERFACE_NAME_LEN]uint16 - Index uint32 - Type uint32 - Mtu uint32 - Speed uint32 - PhysAddrLen uint32 - PhysAddr [MAXLEN_PHYSADDR]byte - AdminStatus uint32 - OperStatus uint32 - LastChange uint32 - InOctets uint32 - InUcastPkts uint32 - InNUcastPkts uint32 - InDiscards uint32 - InErrors uint32 - InUnknownProtos uint32 - OutOctets uint32 - OutUcastPkts uint32 - OutNUcastPkts uint32 - OutDiscards uint32 - OutErrors uint32 - OutQLen uint32 - DescrLen uint32 - Descr [MAXLEN_IFDESCR]byte -} - -type CertInfo struct { - // Not implemented -} - -type CertContext struct { - EncodingType uint32 - EncodedCert *byte - Length uint32 - CertInfo *CertInfo - Store Handle -} - -type CertChainContext struct { - Size uint32 - TrustStatus CertTrustStatus - ChainCount uint32 - Chains **CertSimpleChain - LowerQualityChainCount uint32 - LowerQualityChains **CertChainContext - HasRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 -} - -type CertTrustListInfo struct { - // Not implemented -} - -type CertSimpleChain struct { - Size uint32 - TrustStatus CertTrustStatus - NumElements uint32 - Elements **CertChainElement - TrustListInfo *CertTrustListInfo - HasRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 -} - -type CertChainElement struct { - Size uint32 - CertContext *CertContext - TrustStatus CertTrustStatus - RevocationInfo *CertRevocationInfo - IssuanceUsage *CertEnhKeyUsage - ApplicationUsage *CertEnhKeyUsage - ExtendedErrorInfo *uint16 -} - -type CertRevocationCrlInfo struct { - // Not implemented -} - -type CertRevocationInfo struct { - Size uint32 - RevocationResult uint32 - RevocationOid *byte - OidSpecificInfo Pointer - HasFreshnessTime uint32 - FreshnessTime uint32 - CrlInfo *CertRevocationCrlInfo -} - -type CertTrustStatus struct { - ErrorStatus uint32 - InfoStatus uint32 -} - -type CertUsageMatch struct { - Type uint32 - Usage CertEnhKeyUsage -} - -type CertEnhKeyUsage struct { - Length uint32 - UsageIdentifiers **byte -} - -type CertChainPara struct { - Size uint32 - RequestedUsage CertUsageMatch - RequstedIssuancePolicy CertUsageMatch - URLRetrievalTimeout uint32 - CheckRevocationFreshnessTime uint32 - RevocationFreshnessTime uint32 - CacheResync *Filetime -} - -type CertChainPolicyPara struct { - Size uint32 - Flags uint32 - ExtraPolicyPara Pointer -} - -type SSLExtraCertChainPolicyPara struct { - Size uint32 - AuthType uint32 - Checks uint32 - ServerName *uint16 -} - -type CertChainPolicyStatus struct { - Size uint32 - Error uint32 - ChainIndex uint32 - ElementIndex uint32 - ExtraPolicyStatus Pointer -} - -const ( - // do not reorder - HKEY_CLASSES_ROOT = 0x80000000 + iota - HKEY_CURRENT_USER - HKEY_LOCAL_MACHINE - HKEY_USERS - HKEY_PERFORMANCE_DATA - HKEY_CURRENT_CONFIG - HKEY_DYN_DATA - - KEY_QUERY_VALUE = 1 - KEY_SET_VALUE = 2 - KEY_CREATE_SUB_KEY = 4 - KEY_ENUMERATE_SUB_KEYS = 8 - KEY_NOTIFY = 16 - KEY_CREATE_LINK = 32 - KEY_WRITE = 0x20006 - KEY_EXECUTE = 0x20019 - KEY_READ = 0x20019 - KEY_WOW64_64KEY = 0x0100 - KEY_WOW64_32KEY = 0x0200 - KEY_ALL_ACCESS = 0xf003f -) - -const ( - // do not reorder - REG_NONE = iota - REG_SZ - REG_EXPAND_SZ - REG_BINARY - REG_DWORD_LITTLE_ENDIAN - REG_DWORD_BIG_ENDIAN - REG_LINK - REG_MULTI_SZ - REG_RESOURCE_LIST - REG_FULL_RESOURCE_DESCRIPTOR - REG_RESOURCE_REQUIREMENTS_LIST - REG_QWORD_LITTLE_ENDIAN - REG_DWORD = REG_DWORD_LITTLE_ENDIAN - REG_QWORD = REG_QWORD_LITTLE_ENDIAN -) - -const ( - EVENT_MODIFY_STATE = 0x0002 - EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 - - MUTANT_QUERY_STATE = 0x0001 - MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE - - SEMAPHORE_MODIFY_STATE = 0x0002 - SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 - - TIMER_QUERY_STATE = 0x0001 - TIMER_MODIFY_STATE = 0x0002 - TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE - - MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE - MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS - - CREATE_EVENT_MANUAL_RESET = 0x1 - CREATE_EVENT_INITIAL_SET = 0x2 - CREATE_MUTEX_INITIAL_OWNER = 0x1 -) - -type AddrinfoW struct { - Flags int32 - Family int32 - Socktype int32 - Protocol int32 - Addrlen uintptr - Canonname *uint16 - Addr uintptr - Next *AddrinfoW -} - -const ( - AI_PASSIVE = 1 - AI_CANONNAME = 2 - AI_NUMERICHOST = 4 -) - -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} - -var WSAID_CONNECTEX = GUID{ - 0x25a207b9, - 0xddf3, - 0x4660, - [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, -} - -var WSAID_WSASENDMSG = GUID{ - 0xa441e712, - 0x754f, - 0x43ca, - [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, -} - -var WSAID_WSARECVMSG = GUID{ - 0xf689d7c8, - 0x6f1f, - 0x436b, - [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, -} - -const ( - FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - FILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - -const ( - WSAPROTOCOL_LEN = 255 - MAX_PROTOCOL_CHAIN = 7 - BASE_PROTOCOL = 1 - LAYERED_PROTOCOL = 0 - - XP1_CONNECTIONLESS = 0x00000001 - XP1_GUARANTEED_DELIVERY = 0x00000002 - XP1_GUARANTEED_ORDER = 0x00000004 - XP1_MESSAGE_ORIENTED = 0x00000008 - XP1_PSEUDO_STREAM = 0x00000010 - XP1_GRACEFUL_CLOSE = 0x00000020 - XP1_EXPEDITED_DATA = 0x00000040 - XP1_CONNECT_DATA = 0x00000080 - XP1_DISCONNECT_DATA = 0x00000100 - XP1_SUPPORT_BROADCAST = 0x00000200 - XP1_SUPPORT_MULTIPOINT = 0x00000400 - XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 - XP1_MULTIPOINT_DATA_PLANE = 0x00001000 - XP1_QOS_SUPPORTED = 0x00002000 - XP1_UNI_SEND = 0x00008000 - XP1_UNI_RECV = 0x00010000 - XP1_IFS_HANDLES = 0x00020000 - XP1_PARTIAL_MESSAGE = 0x00040000 - XP1_SAN_SUPPORT_SDP = 0x00080000 - - PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 - PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 - PFL_HIDDEN = 0x00000004 - PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 - PFL_NETWORKDIRECT_PROVIDER = 0x00000010 -) - -type WSAProtocolInfo struct { - ServiceFlags1 uint32 - ServiceFlags2 uint32 - ServiceFlags3 uint32 - ServiceFlags4 uint32 - ProviderFlags uint32 - ProviderId GUID - CatalogEntryId uint32 - ProtocolChain WSAProtocolChain - Version int32 - AddressFamily int32 - MaxSockAddr int32 - MinSockAddr int32 - SocketType int32 - Protocol int32 - ProtocolMaxOffset int32 - NetworkByteOrder int32 - SecurityScheme int32 - MessageSize uint32 - ProviderReserved uint32 - ProtocolName [WSAPROTOCOL_LEN + 1]uint16 -} - -type WSAProtocolChain struct { - ChainLen int32 - ChainEntries [MAX_PROTOCOL_CHAIN]uint32 -} - -type TCPKeepalive struct { - OnOff uint32 - Time uint32 - Interval uint32 -} - -type symbolicLinkReparseBuffer struct { - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 - Flags uint32 - PathBuffer [1]uint16 -} - -type mountPointReparseBuffer struct { - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 - PathBuffer [1]uint16 -} - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - - // GenericReparseBuffer - reparseBuffer byte -} - -const ( - FSCTL_GET_REPARSE_POINT = 0x900A8 - MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 - IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 - IO_REPARSE_TAG_SYMLINK = 0xA000000C - SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 -) - -const ( - ComputerNameNetBIOS = 0 - ComputerNameDnsHostname = 1 - ComputerNameDnsDomain = 2 - ComputerNameDnsFullyQualified = 3 - ComputerNamePhysicalNetBIOS = 4 - ComputerNamePhysicalDnsHostname = 5 - ComputerNamePhysicalDnsDomain = 6 - ComputerNamePhysicalDnsFullyQualified = 7 - ComputerNameMax = 8 -) - -// For MessageBox() -const ( - MB_OK = 0x00000000 - MB_OKCANCEL = 0x00000001 - MB_ABORTRETRYIGNORE = 0x00000002 - MB_YESNOCANCEL = 0x00000003 - MB_YESNO = 0x00000004 - MB_RETRYCANCEL = 0x00000005 - MB_CANCELTRYCONTINUE = 0x00000006 - MB_ICONHAND = 0x00000010 - MB_ICONQUESTION = 0x00000020 - MB_ICONEXCLAMATION = 0x00000030 - MB_ICONASTERISK = 0x00000040 - MB_USERICON = 0x00000080 - MB_ICONWARNING = MB_ICONEXCLAMATION - MB_ICONERROR = MB_ICONHAND - MB_ICONINFORMATION = MB_ICONASTERISK - MB_ICONSTOP = MB_ICONHAND - MB_DEFBUTTON1 = 0x00000000 - MB_DEFBUTTON2 = 0x00000100 - MB_DEFBUTTON3 = 0x00000200 - MB_DEFBUTTON4 = 0x00000300 - MB_APPLMODAL = 0x00000000 - MB_SYSTEMMODAL = 0x00001000 - MB_TASKMODAL = 0x00002000 - MB_HELP = 0x00004000 - MB_NOFOCUS = 0x00008000 - MB_SETFOREGROUND = 0x00010000 - MB_DEFAULT_DESKTOP_ONLY = 0x00020000 - MB_TOPMOST = 0x00040000 - MB_RIGHT = 0x00080000 - MB_RTLREADING = 0x00100000 - MB_SERVICE_NOTIFICATION = 0x00200000 -) - -const ( - MOVEFILE_REPLACE_EXISTING = 0x1 - MOVEFILE_COPY_ALLOWED = 0x2 - MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 - MOVEFILE_WRITE_THROUGH = 0x8 - MOVEFILE_CREATE_HARDLINK = 0x10 - MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 -) - -const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 - -const ( - IF_TYPE_OTHER = 1 - IF_TYPE_ETHERNET_CSMACD = 6 - IF_TYPE_ISO88025_TOKENRING = 9 - IF_TYPE_PPP = 23 - IF_TYPE_SOFTWARE_LOOPBACK = 24 - IF_TYPE_ATM = 37 - IF_TYPE_IEEE80211 = 71 - IF_TYPE_TUNNEL = 131 - IF_TYPE_IEEE1394 = 144 -) - -type SocketAddress struct { - Sockaddr *syscall.RawSockaddrAny - SockaddrLength int32 -} - -// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither. -func (addr *SocketAddress) IP() net.IP { - if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET { - return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:] - } else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 { - return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:] - } - return nil -} - -type IpAdapterUnicastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterUnicastAddress - Address SocketAddress - PrefixOrigin int32 - SuffixOrigin int32 - DadState int32 - ValidLifetime uint32 - PreferredLifetime uint32 - LeaseLifetime uint32 - OnLinkPrefixLength uint8 -} - -type IpAdapterAnycastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterAnycastAddress - Address SocketAddress -} - -type IpAdapterMulticastAddress struct { - Length uint32 - Flags uint32 - Next *IpAdapterMulticastAddress - Address SocketAddress -} - -type IpAdapterDnsServerAdapter struct { - Length uint32 - Reserved uint32 - Next *IpAdapterDnsServerAdapter - Address SocketAddress -} - -type IpAdapterPrefix struct { - Length uint32 - Flags uint32 - Next *IpAdapterPrefix - Address SocketAddress - PrefixLength uint32 -} - -type IpAdapterAddresses struct { - Length uint32 - IfIndex uint32 - Next *IpAdapterAddresses - AdapterName *byte - FirstUnicastAddress *IpAdapterUnicastAddress - FirstAnycastAddress *IpAdapterAnycastAddress - FirstMulticastAddress *IpAdapterMulticastAddress - FirstDnsServerAddress *IpAdapterDnsServerAdapter - DnsSuffix *uint16 - Description *uint16 - FriendlyName *uint16 - PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte - PhysicalAddressLength uint32 - Flags uint32 - Mtu uint32 - IfType uint32 - OperStatus uint32 - Ipv6IfIndex uint32 - ZoneIndices [16]uint32 - FirstPrefix *IpAdapterPrefix - /* more fields might be present here. */ -} - -const ( - IfOperStatusUp = 1 - IfOperStatusDown = 2 - IfOperStatusTesting = 3 - IfOperStatusUnknown = 4 - IfOperStatusDormant = 5 - IfOperStatusNotPresent = 6 - IfOperStatusLowerLayerDown = 7 -) - -// Console related constants used for the mode parameter to SetConsoleMode. See -// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. - -const ( - ENABLE_PROCESSED_INPUT = 0x1 - ENABLE_LINE_INPUT = 0x2 - ENABLE_ECHO_INPUT = 0x4 - ENABLE_WINDOW_INPUT = 0x8 - ENABLE_MOUSE_INPUT = 0x10 - ENABLE_INSERT_MODE = 0x20 - ENABLE_QUICK_EDIT_MODE = 0x40 - ENABLE_EXTENDED_FLAGS = 0x80 - ENABLE_AUTO_POSITION = 0x100 - ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 - - ENABLE_PROCESSED_OUTPUT = 0x1 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 - ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 - DISABLE_NEWLINE_AUTO_RETURN = 0x8 - ENABLE_LVB_GRID_WORLDWIDE = 0x10 -) - -type Coord struct { - X int16 - Y int16 -} - -type SmallRect struct { - Left int16 - Top int16 - Right int16 - Bottom int16 -} - -// Used with GetConsoleScreenBuffer to retrieve information about a console -// screen buffer. See -// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str -// for details. - -type ConsoleScreenBufferInfo struct { - Size Coord - CursorPosition Coord - Attributes uint16 - Window SmallRect - MaximumWindowSize Coord -} - -const UNIX_PATH_MAX = 108 // defined in afunix.h - -const ( - // flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags - JOB_OBJECT_LIMIT_ACTIVE_PROCESS = 0x00000008 - JOB_OBJECT_LIMIT_AFFINITY = 0x00000010 - JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800 - JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400 - JOB_OBJECT_LIMIT_JOB_MEMORY = 0x00000200 - JOB_OBJECT_LIMIT_JOB_TIME = 0x00000004 - JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE = 0x00002000 - JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME = 0x00000040 - JOB_OBJECT_LIMIT_PRIORITY_CLASS = 0x00000020 - JOB_OBJECT_LIMIT_PROCESS_MEMORY = 0x00000100 - JOB_OBJECT_LIMIT_PROCESS_TIME = 0x00000002 - JOB_OBJECT_LIMIT_SCHEDULING_CLASS = 0x00000080 - JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000 - JOB_OBJECT_LIMIT_SUBSET_AFFINITY = 0x00004000 - JOB_OBJECT_LIMIT_WORKINGSET = 0x00000001 -) - -type JOBOBJECT_BASIC_LIMIT_INFORMATION struct { - PerProcessUserTimeLimit int64 - PerJobUserTimeLimit int64 - LimitFlags uint32 - MinimumWorkingSetSize uintptr - MaximumWorkingSetSize uintptr - ActiveProcessLimit uint32 - Affinity uintptr - PriorityClass uint32 - SchedulingClass uint32 -} - -type IO_COUNTERS struct { - ReadOperationCount uint64 - WriteOperationCount uint64 - OtherOperationCount uint64 - ReadTransferCount uint64 - WriteTransferCount uint64 - OtherTransferCount uint64 -} - -type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct { - BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION - IoInfo IO_COUNTERS - ProcessMemoryLimit uintptr - JobMemoryLimit uintptr - PeakProcessMemoryUsed uintptr - PeakJobMemoryUsed uintptr -} - -const ( - // UIRestrictionsClass - JOB_OBJECT_UILIMIT_DESKTOP = 0x00000040 - JOB_OBJECT_UILIMIT_DISPLAYSETTINGS = 0x00000010 - JOB_OBJECT_UILIMIT_EXITWINDOWS = 0x00000080 - JOB_OBJECT_UILIMIT_GLOBALATOMS = 0x00000020 - JOB_OBJECT_UILIMIT_HANDLES = 0x00000001 - JOB_OBJECT_UILIMIT_READCLIPBOARD = 0x00000002 - JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008 - JOB_OBJECT_UILIMIT_WRITECLIPBOARD = 0x00000004 -) - -type JOBOBJECT_BASIC_UI_RESTRICTIONS struct { - UIRestrictionsClass uint32 -} - -const ( - // JobObjectInformationClass - JobObjectAssociateCompletionPortInformation = 7 - JobObjectBasicLimitInformation = 2 - JobObjectBasicUIRestrictions = 4 - JobObjectCpuRateControlInformation = 15 - JobObjectEndOfJobTimeInformation = 6 - JobObjectExtendedLimitInformation = 9 - JobObjectGroupInformation = 11 - JobObjectGroupInformationEx = 14 - JobObjectLimitViolationInformation2 = 35 - JobObjectNetRateControlInformation = 32 - JobObjectNotificationLimitInformation = 12 - JobObjectNotificationLimitInformation2 = 34 - JobObjectSecurityLimitInformation = 5 -) - -const ( - KF_FLAG_DEFAULT = 0x00000000 - KF_FLAG_FORCE_APP_DATA_REDIRECTION = 0x00080000 - KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000 - KF_FLAG_FORCE_PACKAGE_REDIRECTION = 0x00020000 - KF_FLAG_NO_PACKAGE_REDIRECTION = 0x00010000 - KF_FLAG_FORCE_APPCONTAINER_REDIRECTION = 0x00020000 - KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000 - KF_FLAG_CREATE = 0x00008000 - KF_FLAG_DONT_VERIFY = 0x00004000 - KF_FLAG_DONT_UNEXPAND = 0x00002000 - KF_FLAG_NO_ALIAS = 0x00001000 - KF_FLAG_INIT = 0x00000800 - KF_FLAG_DEFAULT_PATH = 0x00000400 - KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200 - KF_FLAG_SIMPLE_IDLIST = 0x00000100 - KF_FLAG_ALIAS_ONLY = 0x80000000 -) - -type OsVersionInfoEx struct { - osVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformId uint32 - CsdVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - _ byte -} - -const ( - EWX_LOGOFF = 0x00000000 - EWX_SHUTDOWN = 0x00000001 - EWX_REBOOT = 0x00000002 - EWX_FORCE = 0x00000004 - EWX_POWEROFF = 0x00000008 - EWX_FORCEIFHUNG = 0x00000010 - EWX_QUICKRESOLVE = 0x00000020 - EWX_RESTARTAPPS = 0x00000040 - EWX_HYBRID_SHUTDOWN = 0x00400000 - EWX_BOOTOPTIONS = 0x01000000 - - SHTDN_REASON_FLAG_COMMENT_REQUIRED = 0x01000000 - SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000 - SHTDN_REASON_FLAG_CLEAN_UI = 0x04000000 - SHTDN_REASON_FLAG_DIRTY_UI = 0x08000000 - SHTDN_REASON_FLAG_USER_DEFINED = 0x40000000 - SHTDN_REASON_FLAG_PLANNED = 0x80000000 - SHTDN_REASON_MAJOR_OTHER = 0x00000000 - SHTDN_REASON_MAJOR_NONE = 0x00000000 - SHTDN_REASON_MAJOR_HARDWARE = 0x00010000 - SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 0x00020000 - SHTDN_REASON_MAJOR_SOFTWARE = 0x00030000 - SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 - SHTDN_REASON_MAJOR_SYSTEM = 0x00050000 - SHTDN_REASON_MAJOR_POWER = 0x00060000 - SHTDN_REASON_MAJOR_LEGACY_API = 0x00070000 - SHTDN_REASON_MINOR_OTHER = 0x00000000 - SHTDN_REASON_MINOR_NONE = 0x000000ff - SHTDN_REASON_MINOR_MAINTENANCE = 0x00000001 - SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 - SHTDN_REASON_MINOR_UPGRADE = 0x00000003 - SHTDN_REASON_MINOR_RECONFIG = 0x00000004 - SHTDN_REASON_MINOR_HUNG = 0x00000005 - SHTDN_REASON_MINOR_UNSTABLE = 0x00000006 - SHTDN_REASON_MINOR_DISK = 0x00000007 - SHTDN_REASON_MINOR_PROCESSOR = 0x00000008 - SHTDN_REASON_MINOR_NETWORKCARD = 0x00000009 - SHTDN_REASON_MINOR_POWER_SUPPLY = 0x0000000a - SHTDN_REASON_MINOR_CORDUNPLUGGED = 0x0000000b - SHTDN_REASON_MINOR_ENVIRONMENT = 0x0000000c - SHTDN_REASON_MINOR_HARDWARE_DRIVER = 0x0000000d - SHTDN_REASON_MINOR_OTHERDRIVER = 0x0000000e - SHTDN_REASON_MINOR_BLUESCREEN = 0x0000000F - SHTDN_REASON_MINOR_SERVICEPACK = 0x00000010 - SHTDN_REASON_MINOR_HOTFIX = 0x00000011 - SHTDN_REASON_MINOR_SECURITYFIX = 0x00000012 - SHTDN_REASON_MINOR_SECURITY = 0x00000013 - SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 0x00000014 - SHTDN_REASON_MINOR_WMI = 0x00000015 - SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 0x00000016 - SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 0x00000017 - SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 0x00000018 - SHTDN_REASON_MINOR_MMC = 0x00000019 - SHTDN_REASON_MINOR_SYSTEMRESTORE = 0x0000001a - SHTDN_REASON_MINOR_TERMSRV = 0x00000020 - SHTDN_REASON_MINOR_DC_PROMOTION = 0x00000021 - SHTDN_REASON_MINOR_DC_DEMOTION = 0x00000022 - SHTDN_REASON_UNKNOWN = SHTDN_REASON_MINOR_NONE - SHTDN_REASON_LEGACY_API = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED - SHTDN_REASON_VALID_BIT_MASK = 0xc0ffffff - - SHUTDOWN_NORETRY = 0x1 -) - -// Flags used for GetModuleHandleEx -const ( - GET_MODULE_HANDLE_EX_FLAG_PIN = 1 - GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2 - GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS = 4 -) - -// MUI function flag values -const ( - MUI_LANGUAGE_ID = 0x4 - MUI_LANGUAGE_NAME = 0x8 - MUI_MERGE_SYSTEM_FALLBACK = 0x10 - MUI_MERGE_USER_FALLBACK = 0x20 - MUI_UI_FALLBACK = MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK - MUI_THREAD_LANGUAGES = 0x40 - MUI_CONSOLE_FILTER = 0x100 - MUI_COMPLEX_SCRIPT_FILTER = 0x200 - MUI_RESET_FILTERS = 0x001 - MUI_USER_PREFERRED_UI_LANGUAGES = 0x10 - MUI_USE_INSTALLED_LANGUAGES = 0x20 - MUI_USE_SEARCH_ALL_LANGUAGES = 0x40 - MUI_LANG_NEUTRAL_PE_FILE = 0x100 - MUI_NON_LANG_NEUTRAL_FILE = 0x200 - MUI_MACHINE_LANGUAGE_SETTINGS = 0x400 - MUI_FILETYPE_NOT_LANGUAGE_NEUTRAL = 0x001 - MUI_FILETYPE_LANGUAGE_NEUTRAL_MAIN = 0x002 - MUI_FILETYPE_LANGUAGE_NEUTRAL_MUI = 0x004 - MUI_QUERY_TYPE = 0x001 - MUI_QUERY_CHECKSUM = 0x002 - MUI_QUERY_LANGUAGE_NAME = 0x004 - MUI_QUERY_RESOURCE_TYPES = 0x008 - MUI_FILEINFO_VERSION = 0x001 - - MUI_FULL_LANGUAGE = 0x01 - MUI_PARTIAL_LANGUAGE = 0x02 - MUI_LIP_LANGUAGE = 0x04 - MUI_LANGUAGE_INSTALLED = 0x20 - MUI_LANGUAGE_LICENSED = 0x40 -) diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go deleted file mode 100644 index fe0ddd0..0000000 --- a/vendor/golang.org/x/sys/windows/types_windows_386.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte -} - -type Servent struct { - Name *byte - Aliases **byte - Port uint16 - Proto *byte -} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go deleted file mode 100644 index 7e154c2..0000000 --- a/vendor/golang.org/x/sys/windows/types_windows_amd64.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte -} - -type Servent struct { - Name *byte - Aliases **byte - Proto *byte - Port uint16 -} diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go deleted file mode 100644 index 74571e3..0000000 --- a/vendor/golang.org/x/sys/windows/types_windows_arm.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package windows - -type WSAData struct { - Version uint16 - HighVersion uint16 - Description [WSADESCRIPTION_LEN + 1]byte - SystemStatus [WSASYS_STATUS_LEN + 1]byte - MaxSockets uint16 - MaxUdpDg uint16 - VendorInfo *byte -} - -type Servent struct { - Name *byte - Aliases **byte - Port uint16 - Proto *byte -} diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go deleted file mode 100644 index f021200..0000000 --- a/vendor/golang.org/x/sys/windows/zerrors_windows.go +++ /dev/null @@ -1,6853 +0,0 @@ -// Code generated by 'mkerrors.bash'; DO NOT EDIT. - -package windows - -import "syscall" - -const ( - FACILITY_NULL = 0 - FACILITY_RPC = 1 - FACILITY_DISPATCH = 2 - FACILITY_STORAGE = 3 - FACILITY_ITF = 4 - FACILITY_WIN32 = 7 - FACILITY_WINDOWS = 8 - FACILITY_SSPI = 9 - FACILITY_SECURITY = 9 - FACILITY_CONTROL = 10 - FACILITY_CERT = 11 - FACILITY_INTERNET = 12 - FACILITY_MEDIASERVER = 13 - FACILITY_MSMQ = 14 - FACILITY_SETUPAPI = 15 - FACILITY_SCARD = 16 - FACILITY_COMPLUS = 17 - FACILITY_AAF = 18 - FACILITY_URT = 19 - FACILITY_ACS = 20 - FACILITY_DPLAY = 21 - FACILITY_UMI = 22 - FACILITY_SXS = 23 - FACILITY_WINDOWS_CE = 24 - FACILITY_HTTP = 25 - FACILITY_USERMODE_COMMONLOG = 26 - FACILITY_WER = 27 - FACILITY_USERMODE_FILTER_MANAGER = 31 - FACILITY_BACKGROUNDCOPY = 32 - FACILITY_CONFIGURATION = 33 - FACILITY_WIA = 33 - FACILITY_STATE_MANAGEMENT = 34 - FACILITY_METADIRECTORY = 35 - FACILITY_WINDOWSUPDATE = 36 - FACILITY_DIRECTORYSERVICE = 37 - FACILITY_GRAPHICS = 38 - FACILITY_SHELL = 39 - FACILITY_NAP = 39 - FACILITY_TPM_SERVICES = 40 - FACILITY_TPM_SOFTWARE = 41 - FACILITY_UI = 42 - FACILITY_XAML = 43 - FACILITY_ACTION_QUEUE = 44 - FACILITY_PLA = 48 - FACILITY_WINDOWS_SETUP = 48 - FACILITY_FVE = 49 - FACILITY_FWP = 50 - FACILITY_WINRM = 51 - FACILITY_NDIS = 52 - FACILITY_USERMODE_HYPERVISOR = 53 - FACILITY_CMI = 54 - FACILITY_USERMODE_VIRTUALIZATION = 55 - FACILITY_USERMODE_VOLMGR = 56 - FACILITY_BCD = 57 - FACILITY_USERMODE_VHD = 58 - FACILITY_USERMODE_HNS = 59 - FACILITY_SDIAG = 60 - FACILITY_WEBSERVICES = 61 - FACILITY_WINPE = 61 - FACILITY_WPN = 62 - FACILITY_WINDOWS_STORE = 63 - FACILITY_INPUT = 64 - FACILITY_EAP = 66 - FACILITY_WINDOWS_DEFENDER = 80 - FACILITY_OPC = 81 - FACILITY_XPS = 82 - FACILITY_MBN = 84 - FACILITY_POWERSHELL = 84 - FACILITY_RAS = 83 - FACILITY_P2P_INT = 98 - FACILITY_P2P = 99 - FACILITY_DAF = 100 - FACILITY_BLUETOOTH_ATT = 101 - FACILITY_AUDIO = 102 - FACILITY_STATEREPOSITORY = 103 - FACILITY_VISUALCPP = 109 - FACILITY_SCRIPT = 112 - FACILITY_PARSE = 113 - FACILITY_BLB = 120 - FACILITY_BLB_CLI = 121 - FACILITY_WSBAPP = 122 - FACILITY_BLBUI = 128 - FACILITY_USN = 129 - FACILITY_USERMODE_VOLSNAP = 130 - FACILITY_TIERING = 131 - FACILITY_WSB_ONLINE = 133 - FACILITY_ONLINE_ID = 134 - FACILITY_DEVICE_UPDATE_AGENT = 135 - FACILITY_DRVSERVICING = 136 - FACILITY_DLS = 153 - FACILITY_DELIVERY_OPTIMIZATION = 208 - FACILITY_USERMODE_SPACES = 231 - FACILITY_USER_MODE_SECURITY_CORE = 232 - FACILITY_USERMODE_LICENSING = 234 - FACILITY_SOS = 160 - FACILITY_DEBUGGERS = 176 - FACILITY_SPP = 256 - FACILITY_RESTORE = 256 - FACILITY_DMSERVER = 256 - FACILITY_DEPLOYMENT_SERVICES_SERVER = 257 - FACILITY_DEPLOYMENT_SERVICES_IMAGING = 258 - FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT = 259 - FACILITY_DEPLOYMENT_SERVICES_UTIL = 260 - FACILITY_DEPLOYMENT_SERVICES_BINLSVC = 261 - FACILITY_DEPLOYMENT_SERVICES_PXE = 263 - FACILITY_DEPLOYMENT_SERVICES_TFTP = 264 - FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT = 272 - FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING = 278 - FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER = 289 - FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT = 290 - FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER = 293 - FACILITY_LINGUISTIC_SERVICES = 305 - FACILITY_AUDIOSTREAMING = 1094 - FACILITY_ACCELERATOR = 1536 - FACILITY_WMAAECMA = 1996 - FACILITY_DIRECTMUSIC = 2168 - FACILITY_DIRECT3D10 = 2169 - FACILITY_DXGI = 2170 - FACILITY_DXGI_DDI = 2171 - FACILITY_DIRECT3D11 = 2172 - FACILITY_DIRECT3D11_DEBUG = 2173 - FACILITY_DIRECT3D12 = 2174 - FACILITY_DIRECT3D12_DEBUG = 2175 - FACILITY_LEAP = 2184 - FACILITY_AUDCLNT = 2185 - FACILITY_WINCODEC_DWRITE_DWM = 2200 - FACILITY_WINML = 2192 - FACILITY_DIRECT2D = 2201 - FACILITY_DEFRAG = 2304 - FACILITY_USERMODE_SDBUS = 2305 - FACILITY_JSCRIPT = 2306 - FACILITY_PIDGENX = 2561 - FACILITY_EAS = 85 - FACILITY_WEB = 885 - FACILITY_WEB_SOCKET = 886 - FACILITY_MOBILE = 1793 - FACILITY_SQLITE = 1967 - FACILITY_UTC = 1989 - FACILITY_WEP = 2049 - FACILITY_SYNCENGINE = 2050 - FACILITY_XBOX = 2339 - FACILITY_PIX = 2748 - ERROR_SUCCESS syscall.Errno = 0 - NO_ERROR = 0 - SEC_E_OK Handle = 0x00000000 - ERROR_INVALID_FUNCTION syscall.Errno = 1 - ERROR_FILE_NOT_FOUND syscall.Errno = 2 - ERROR_PATH_NOT_FOUND syscall.Errno = 3 - ERROR_TOO_MANY_OPEN_FILES syscall.Errno = 4 - ERROR_ACCESS_DENIED syscall.Errno = 5 - ERROR_INVALID_HANDLE syscall.Errno = 6 - ERROR_ARENA_TRASHED syscall.Errno = 7 - ERROR_NOT_ENOUGH_MEMORY syscall.Errno = 8 - ERROR_INVALID_BLOCK syscall.Errno = 9 - ERROR_BAD_ENVIRONMENT syscall.Errno = 10 - ERROR_BAD_FORMAT syscall.Errno = 11 - ERROR_INVALID_ACCESS syscall.Errno = 12 - ERROR_INVALID_DATA syscall.Errno = 13 - ERROR_OUTOFMEMORY syscall.Errno = 14 - ERROR_INVALID_DRIVE syscall.Errno = 15 - ERROR_CURRENT_DIRECTORY syscall.Errno = 16 - ERROR_NOT_SAME_DEVICE syscall.Errno = 17 - ERROR_NO_MORE_FILES syscall.Errno = 18 - ERROR_WRITE_PROTECT syscall.Errno = 19 - ERROR_BAD_UNIT syscall.Errno = 20 - ERROR_NOT_READY syscall.Errno = 21 - ERROR_BAD_COMMAND syscall.Errno = 22 - ERROR_CRC syscall.Errno = 23 - ERROR_BAD_LENGTH syscall.Errno = 24 - ERROR_SEEK syscall.Errno = 25 - ERROR_NOT_DOS_DISK syscall.Errno = 26 - ERROR_SECTOR_NOT_FOUND syscall.Errno = 27 - ERROR_OUT_OF_PAPER syscall.Errno = 28 - ERROR_WRITE_FAULT syscall.Errno = 29 - ERROR_READ_FAULT syscall.Errno = 30 - ERROR_GEN_FAILURE syscall.Errno = 31 - ERROR_SHARING_VIOLATION syscall.Errno = 32 - ERROR_LOCK_VIOLATION syscall.Errno = 33 - ERROR_WRONG_DISK syscall.Errno = 34 - ERROR_SHARING_BUFFER_EXCEEDED syscall.Errno = 36 - ERROR_HANDLE_EOF syscall.Errno = 38 - ERROR_HANDLE_DISK_FULL syscall.Errno = 39 - ERROR_NOT_SUPPORTED syscall.Errno = 50 - ERROR_REM_NOT_LIST syscall.Errno = 51 - ERROR_DUP_NAME syscall.Errno = 52 - ERROR_BAD_NETPATH syscall.Errno = 53 - ERROR_NETWORK_BUSY syscall.Errno = 54 - ERROR_DEV_NOT_EXIST syscall.Errno = 55 - ERROR_TOO_MANY_CMDS syscall.Errno = 56 - ERROR_ADAP_HDW_ERR syscall.Errno = 57 - ERROR_BAD_NET_RESP syscall.Errno = 58 - ERROR_UNEXP_NET_ERR syscall.Errno = 59 - ERROR_BAD_REM_ADAP syscall.Errno = 60 - ERROR_PRINTQ_FULL syscall.Errno = 61 - ERROR_NO_SPOOL_SPACE syscall.Errno = 62 - ERROR_PRINT_CANCELLED syscall.Errno = 63 - ERROR_NETNAME_DELETED syscall.Errno = 64 - ERROR_NETWORK_ACCESS_DENIED syscall.Errno = 65 - ERROR_BAD_DEV_TYPE syscall.Errno = 66 - ERROR_BAD_NET_NAME syscall.Errno = 67 - ERROR_TOO_MANY_NAMES syscall.Errno = 68 - ERROR_TOO_MANY_SESS syscall.Errno = 69 - ERROR_SHARING_PAUSED syscall.Errno = 70 - ERROR_REQ_NOT_ACCEP syscall.Errno = 71 - ERROR_REDIR_PAUSED syscall.Errno = 72 - ERROR_FILE_EXISTS syscall.Errno = 80 - ERROR_CANNOT_MAKE syscall.Errno = 82 - ERROR_FAIL_I24 syscall.Errno = 83 - ERROR_OUT_OF_STRUCTURES syscall.Errno = 84 - ERROR_ALREADY_ASSIGNED syscall.Errno = 85 - ERROR_INVALID_PASSWORD syscall.Errno = 86 - ERROR_INVALID_PARAMETER syscall.Errno = 87 - ERROR_NET_WRITE_FAULT syscall.Errno = 88 - ERROR_NO_PROC_SLOTS syscall.Errno = 89 - ERROR_TOO_MANY_SEMAPHORES syscall.Errno = 100 - ERROR_EXCL_SEM_ALREADY_OWNED syscall.Errno = 101 - ERROR_SEM_IS_SET syscall.Errno = 102 - ERROR_TOO_MANY_SEM_REQUESTS syscall.Errno = 103 - ERROR_INVALID_AT_INTERRUPT_TIME syscall.Errno = 104 - ERROR_SEM_OWNER_DIED syscall.Errno = 105 - ERROR_SEM_USER_LIMIT syscall.Errno = 106 - ERROR_DISK_CHANGE syscall.Errno = 107 - ERROR_DRIVE_LOCKED syscall.Errno = 108 - ERROR_BROKEN_PIPE syscall.Errno = 109 - ERROR_OPEN_FAILED syscall.Errno = 110 - ERROR_BUFFER_OVERFLOW syscall.Errno = 111 - ERROR_DISK_FULL syscall.Errno = 112 - ERROR_NO_MORE_SEARCH_HANDLES syscall.Errno = 113 - ERROR_INVALID_TARGET_HANDLE syscall.Errno = 114 - ERROR_INVALID_CATEGORY syscall.Errno = 117 - ERROR_INVALID_VERIFY_SWITCH syscall.Errno = 118 - ERROR_BAD_DRIVER_LEVEL syscall.Errno = 119 - ERROR_CALL_NOT_IMPLEMENTED syscall.Errno = 120 - ERROR_SEM_TIMEOUT syscall.Errno = 121 - ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 - ERROR_INVALID_NAME syscall.Errno = 123 - ERROR_INVALID_LEVEL syscall.Errno = 124 - ERROR_NO_VOLUME_LABEL syscall.Errno = 125 - ERROR_MOD_NOT_FOUND syscall.Errno = 126 - ERROR_PROC_NOT_FOUND syscall.Errno = 127 - ERROR_WAIT_NO_CHILDREN syscall.Errno = 128 - ERROR_CHILD_NOT_COMPLETE syscall.Errno = 129 - ERROR_DIRECT_ACCESS_HANDLE syscall.Errno = 130 - ERROR_NEGATIVE_SEEK syscall.Errno = 131 - ERROR_SEEK_ON_DEVICE syscall.Errno = 132 - ERROR_IS_JOIN_TARGET syscall.Errno = 133 - ERROR_IS_JOINED syscall.Errno = 134 - ERROR_IS_SUBSTED syscall.Errno = 135 - ERROR_NOT_JOINED syscall.Errno = 136 - ERROR_NOT_SUBSTED syscall.Errno = 137 - ERROR_JOIN_TO_JOIN syscall.Errno = 138 - ERROR_SUBST_TO_SUBST syscall.Errno = 139 - ERROR_JOIN_TO_SUBST syscall.Errno = 140 - ERROR_SUBST_TO_JOIN syscall.Errno = 141 - ERROR_BUSY_DRIVE syscall.Errno = 142 - ERROR_SAME_DRIVE syscall.Errno = 143 - ERROR_DIR_NOT_ROOT syscall.Errno = 144 - ERROR_DIR_NOT_EMPTY syscall.Errno = 145 - ERROR_IS_SUBST_PATH syscall.Errno = 146 - ERROR_IS_JOIN_PATH syscall.Errno = 147 - ERROR_PATH_BUSY syscall.Errno = 148 - ERROR_IS_SUBST_TARGET syscall.Errno = 149 - ERROR_SYSTEM_TRACE syscall.Errno = 150 - ERROR_INVALID_EVENT_COUNT syscall.Errno = 151 - ERROR_TOO_MANY_MUXWAITERS syscall.Errno = 152 - ERROR_INVALID_LIST_FORMAT syscall.Errno = 153 - ERROR_LABEL_TOO_LONG syscall.Errno = 154 - ERROR_TOO_MANY_TCBS syscall.Errno = 155 - ERROR_SIGNAL_REFUSED syscall.Errno = 156 - ERROR_DISCARDED syscall.Errno = 157 - ERROR_NOT_LOCKED syscall.Errno = 158 - ERROR_BAD_THREADID_ADDR syscall.Errno = 159 - ERROR_BAD_ARGUMENTS syscall.Errno = 160 - ERROR_BAD_PATHNAME syscall.Errno = 161 - ERROR_SIGNAL_PENDING syscall.Errno = 162 - ERROR_MAX_THRDS_REACHED syscall.Errno = 164 - ERROR_LOCK_FAILED syscall.Errno = 167 - ERROR_BUSY syscall.Errno = 170 - ERROR_DEVICE_SUPPORT_IN_PROGRESS syscall.Errno = 171 - ERROR_CANCEL_VIOLATION syscall.Errno = 173 - ERROR_ATOMIC_LOCKS_NOT_SUPPORTED syscall.Errno = 174 - ERROR_INVALID_SEGMENT_NUMBER syscall.Errno = 180 - ERROR_INVALID_ORDINAL syscall.Errno = 182 - ERROR_ALREADY_EXISTS syscall.Errno = 183 - ERROR_INVALID_FLAG_NUMBER syscall.Errno = 186 - ERROR_SEM_NOT_FOUND syscall.Errno = 187 - ERROR_INVALID_STARTING_CODESEG syscall.Errno = 188 - ERROR_INVALID_STACKSEG syscall.Errno = 189 - ERROR_INVALID_MODULETYPE syscall.Errno = 190 - ERROR_INVALID_EXE_SIGNATURE syscall.Errno = 191 - ERROR_EXE_MARKED_INVALID syscall.Errno = 192 - ERROR_BAD_EXE_FORMAT syscall.Errno = 193 - ERROR_ITERATED_DATA_EXCEEDS_64k syscall.Errno = 194 - ERROR_INVALID_MINALLOCSIZE syscall.Errno = 195 - ERROR_DYNLINK_FROM_INVALID_RING syscall.Errno = 196 - ERROR_IOPL_NOT_ENABLED syscall.Errno = 197 - ERROR_INVALID_SEGDPL syscall.Errno = 198 - ERROR_AUTODATASEG_EXCEEDS_64k syscall.Errno = 199 - ERROR_RING2SEG_MUST_BE_MOVABLE syscall.Errno = 200 - ERROR_RELOC_CHAIN_XEEDS_SEGLIM syscall.Errno = 201 - ERROR_INFLOOP_IN_RELOC_CHAIN syscall.Errno = 202 - ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 - ERROR_NO_SIGNAL_SENT syscall.Errno = 205 - ERROR_FILENAME_EXCED_RANGE syscall.Errno = 206 - ERROR_RING2_STACK_IN_USE syscall.Errno = 207 - ERROR_META_EXPANSION_TOO_LONG syscall.Errno = 208 - ERROR_INVALID_SIGNAL_NUMBER syscall.Errno = 209 - ERROR_THREAD_1_INACTIVE syscall.Errno = 210 - ERROR_LOCKED syscall.Errno = 212 - ERROR_TOO_MANY_MODULES syscall.Errno = 214 - ERROR_NESTING_NOT_ALLOWED syscall.Errno = 215 - ERROR_EXE_MACHINE_TYPE_MISMATCH syscall.Errno = 216 - ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY syscall.Errno = 217 - ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY syscall.Errno = 218 - ERROR_FILE_CHECKED_OUT syscall.Errno = 220 - ERROR_CHECKOUT_REQUIRED syscall.Errno = 221 - ERROR_BAD_FILE_TYPE syscall.Errno = 222 - ERROR_FILE_TOO_LARGE syscall.Errno = 223 - ERROR_FORMS_AUTH_REQUIRED syscall.Errno = 224 - ERROR_VIRUS_INFECTED syscall.Errno = 225 - ERROR_VIRUS_DELETED syscall.Errno = 226 - ERROR_PIPE_LOCAL syscall.Errno = 229 - ERROR_BAD_PIPE syscall.Errno = 230 - ERROR_PIPE_BUSY syscall.Errno = 231 - ERROR_NO_DATA syscall.Errno = 232 - ERROR_PIPE_NOT_CONNECTED syscall.Errno = 233 - ERROR_MORE_DATA syscall.Errno = 234 - ERROR_NO_WORK_DONE syscall.Errno = 235 - ERROR_VC_DISCONNECTED syscall.Errno = 240 - ERROR_INVALID_EA_NAME syscall.Errno = 254 - ERROR_EA_LIST_INCONSISTENT syscall.Errno = 255 - WAIT_TIMEOUT syscall.Errno = 258 - ERROR_NO_MORE_ITEMS syscall.Errno = 259 - ERROR_CANNOT_COPY syscall.Errno = 266 - ERROR_DIRECTORY syscall.Errno = 267 - ERROR_EAS_DIDNT_FIT syscall.Errno = 275 - ERROR_EA_FILE_CORRUPT syscall.Errno = 276 - ERROR_EA_TABLE_FULL syscall.Errno = 277 - ERROR_INVALID_EA_HANDLE syscall.Errno = 278 - ERROR_EAS_NOT_SUPPORTED syscall.Errno = 282 - ERROR_NOT_OWNER syscall.Errno = 288 - ERROR_TOO_MANY_POSTS syscall.Errno = 298 - ERROR_PARTIAL_COPY syscall.Errno = 299 - ERROR_OPLOCK_NOT_GRANTED syscall.Errno = 300 - ERROR_INVALID_OPLOCK_PROTOCOL syscall.Errno = 301 - ERROR_DISK_TOO_FRAGMENTED syscall.Errno = 302 - ERROR_DELETE_PENDING syscall.Errno = 303 - ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING syscall.Errno = 304 - ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME syscall.Errno = 305 - ERROR_SECURITY_STREAM_IS_INCONSISTENT syscall.Errno = 306 - ERROR_INVALID_LOCK_RANGE syscall.Errno = 307 - ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT syscall.Errno = 308 - ERROR_NOTIFICATION_GUID_ALREADY_DEFINED syscall.Errno = 309 - ERROR_INVALID_EXCEPTION_HANDLER syscall.Errno = 310 - ERROR_DUPLICATE_PRIVILEGES syscall.Errno = 311 - ERROR_NO_RANGES_PROCESSED syscall.Errno = 312 - ERROR_NOT_ALLOWED_ON_SYSTEM_FILE syscall.Errno = 313 - ERROR_DISK_RESOURCES_EXHAUSTED syscall.Errno = 314 - ERROR_INVALID_TOKEN syscall.Errno = 315 - ERROR_DEVICE_FEATURE_NOT_SUPPORTED syscall.Errno = 316 - ERROR_MR_MID_NOT_FOUND syscall.Errno = 317 - ERROR_SCOPE_NOT_FOUND syscall.Errno = 318 - ERROR_UNDEFINED_SCOPE syscall.Errno = 319 - ERROR_INVALID_CAP syscall.Errno = 320 - ERROR_DEVICE_UNREACHABLE syscall.Errno = 321 - ERROR_DEVICE_NO_RESOURCES syscall.Errno = 322 - ERROR_DATA_CHECKSUM_ERROR syscall.Errno = 323 - ERROR_INTERMIXED_KERNEL_EA_OPERATION syscall.Errno = 324 - ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED syscall.Errno = 326 - ERROR_OFFSET_ALIGNMENT_VIOLATION syscall.Errno = 327 - ERROR_INVALID_FIELD_IN_PARAMETER_LIST syscall.Errno = 328 - ERROR_OPERATION_IN_PROGRESS syscall.Errno = 329 - ERROR_BAD_DEVICE_PATH syscall.Errno = 330 - ERROR_TOO_MANY_DESCRIPTORS syscall.Errno = 331 - ERROR_SCRUB_DATA_DISABLED syscall.Errno = 332 - ERROR_NOT_REDUNDANT_STORAGE syscall.Errno = 333 - ERROR_RESIDENT_FILE_NOT_SUPPORTED syscall.Errno = 334 - ERROR_COMPRESSED_FILE_NOT_SUPPORTED syscall.Errno = 335 - ERROR_DIRECTORY_NOT_SUPPORTED syscall.Errno = 336 - ERROR_NOT_READ_FROM_COPY syscall.Errno = 337 - ERROR_FT_WRITE_FAILURE syscall.Errno = 338 - ERROR_FT_DI_SCAN_REQUIRED syscall.Errno = 339 - ERROR_INVALID_KERNEL_INFO_VERSION syscall.Errno = 340 - ERROR_INVALID_PEP_INFO_VERSION syscall.Errno = 341 - ERROR_OBJECT_NOT_EXTERNALLY_BACKED syscall.Errno = 342 - ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN syscall.Errno = 343 - ERROR_COMPRESSION_NOT_BENEFICIAL syscall.Errno = 344 - ERROR_STORAGE_TOPOLOGY_ID_MISMATCH syscall.Errno = 345 - ERROR_BLOCKED_BY_PARENTAL_CONTROLS syscall.Errno = 346 - ERROR_BLOCK_TOO_MANY_REFERENCES syscall.Errno = 347 - ERROR_MARKED_TO_DISALLOW_WRITES syscall.Errno = 348 - ERROR_ENCLAVE_FAILURE syscall.Errno = 349 - ERROR_FAIL_NOACTION_REBOOT syscall.Errno = 350 - ERROR_FAIL_SHUTDOWN syscall.Errno = 351 - ERROR_FAIL_RESTART syscall.Errno = 352 - ERROR_MAX_SESSIONS_REACHED syscall.Errno = 353 - ERROR_NETWORK_ACCESS_DENIED_EDP syscall.Errno = 354 - ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL syscall.Errno = 355 - ERROR_EDP_POLICY_DENIES_OPERATION syscall.Errno = 356 - ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED syscall.Errno = 357 - ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT syscall.Errno = 358 - ERROR_DEVICE_IN_MAINTENANCE syscall.Errno = 359 - ERROR_NOT_SUPPORTED_ON_DAX syscall.Errno = 360 - ERROR_DAX_MAPPING_EXISTS syscall.Errno = 361 - ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING syscall.Errno = 362 - ERROR_CLOUD_FILE_METADATA_CORRUPT syscall.Errno = 363 - ERROR_CLOUD_FILE_METADATA_TOO_LARGE syscall.Errno = 364 - ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE syscall.Errno = 365 - ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH syscall.Errno = 366 - ERROR_CHILD_PROCESS_BLOCKED syscall.Errno = 367 - ERROR_STORAGE_LOST_DATA_PERSISTENCE syscall.Errno = 368 - ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE syscall.Errno = 369 - ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT syscall.Errno = 370 - ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY syscall.Errno = 371 - ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN syscall.Errno = 372 - ERROR_GDI_HANDLE_LEAK syscall.Errno = 373 - ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS syscall.Errno = 374 - ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED syscall.Errno = 375 - ERROR_NOT_A_CLOUD_FILE syscall.Errno = 376 - ERROR_CLOUD_FILE_NOT_IN_SYNC syscall.Errno = 377 - ERROR_CLOUD_FILE_ALREADY_CONNECTED syscall.Errno = 378 - ERROR_CLOUD_FILE_NOT_SUPPORTED syscall.Errno = 379 - ERROR_CLOUD_FILE_INVALID_REQUEST syscall.Errno = 380 - ERROR_CLOUD_FILE_READ_ONLY_VOLUME syscall.Errno = 381 - ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY syscall.Errno = 382 - ERROR_CLOUD_FILE_VALIDATION_FAILED syscall.Errno = 383 - ERROR_SMB1_NOT_AVAILABLE syscall.Errno = 384 - ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION syscall.Errno = 385 - ERROR_CLOUD_FILE_AUTHENTICATION_FAILED syscall.Errno = 386 - ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES syscall.Errno = 387 - ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE syscall.Errno = 388 - ERROR_CLOUD_FILE_UNSUCCESSFUL syscall.Errno = 389 - ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT syscall.Errno = 390 - ERROR_CLOUD_FILE_IN_USE syscall.Errno = 391 - ERROR_CLOUD_FILE_PINNED syscall.Errno = 392 - ERROR_CLOUD_FILE_REQUEST_ABORTED syscall.Errno = 393 - ERROR_CLOUD_FILE_PROPERTY_CORRUPT syscall.Errno = 394 - ERROR_CLOUD_FILE_ACCESS_DENIED syscall.Errno = 395 - ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS syscall.Errno = 396 - ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT syscall.Errno = 397 - ERROR_CLOUD_FILE_REQUEST_CANCELED syscall.Errno = 398 - ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED syscall.Errno = 399 - ERROR_THREAD_MODE_ALREADY_BACKGROUND syscall.Errno = 400 - ERROR_THREAD_MODE_NOT_BACKGROUND syscall.Errno = 401 - ERROR_PROCESS_MODE_ALREADY_BACKGROUND syscall.Errno = 402 - ERROR_PROCESS_MODE_NOT_BACKGROUND syscall.Errno = 403 - ERROR_CLOUD_FILE_PROVIDER_TERMINATED syscall.Errno = 404 - ERROR_NOT_A_CLOUD_SYNC_ROOT syscall.Errno = 405 - ERROR_FILE_PROTECTED_UNDER_DPL syscall.Errno = 406 - ERROR_VOLUME_NOT_CLUSTER_ALIGNED syscall.Errno = 407 - ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND syscall.Errno = 408 - ERROR_APPX_FILE_NOT_ENCRYPTED syscall.Errno = 409 - ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED syscall.Errno = 410 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET syscall.Errno = 411 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE syscall.Errno = 412 - ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER syscall.Errno = 413 - ERROR_LINUX_SUBSYSTEM_NOT_PRESENT syscall.Errno = 414 - ERROR_FT_READ_FAILURE syscall.Errno = 415 - ERROR_STORAGE_RESERVE_ID_INVALID syscall.Errno = 416 - ERROR_STORAGE_RESERVE_DOES_NOT_EXIST syscall.Errno = 417 - ERROR_STORAGE_RESERVE_ALREADY_EXISTS syscall.Errno = 418 - ERROR_STORAGE_RESERVE_NOT_EMPTY syscall.Errno = 419 - ERROR_NOT_A_DAX_VOLUME syscall.Errno = 420 - ERROR_NOT_DAX_MAPPABLE syscall.Errno = 421 - ERROR_TIME_CRITICAL_THREAD syscall.Errno = 422 - ERROR_DPL_NOT_SUPPORTED_FOR_USER syscall.Errno = 423 - ERROR_CASE_DIFFERING_NAMES_IN_DIR syscall.Errno = 424 - ERROR_CAPAUTHZ_NOT_DEVUNLOCKED syscall.Errno = 450 - ERROR_CAPAUTHZ_CHANGE_TYPE syscall.Errno = 451 - ERROR_CAPAUTHZ_NOT_PROVISIONED syscall.Errno = 452 - ERROR_CAPAUTHZ_NOT_AUTHORIZED syscall.Errno = 453 - ERROR_CAPAUTHZ_NO_POLICY syscall.Errno = 454 - ERROR_CAPAUTHZ_DB_CORRUPTED syscall.Errno = 455 - ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG syscall.Errno = 456 - ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY syscall.Errno = 457 - ERROR_CAPAUTHZ_SCCD_PARSE_ERROR syscall.Errno = 458 - ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED syscall.Errno = 459 - ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH syscall.Errno = 460 - ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT syscall.Errno = 480 - ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT syscall.Errno = 481 - ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT syscall.Errno = 482 - ERROR_DEVICE_HARDWARE_ERROR syscall.Errno = 483 - ERROR_INVALID_ADDRESS syscall.Errno = 487 - ERROR_VRF_CFG_ENABLED syscall.Errno = 1183 - ERROR_PARTITION_TERMINATING syscall.Errno = 1184 - ERROR_USER_PROFILE_LOAD syscall.Errno = 500 - ERROR_ARITHMETIC_OVERFLOW syscall.Errno = 534 - ERROR_PIPE_CONNECTED syscall.Errno = 535 - ERROR_PIPE_LISTENING syscall.Errno = 536 - ERROR_VERIFIER_STOP syscall.Errno = 537 - ERROR_ABIOS_ERROR syscall.Errno = 538 - ERROR_WX86_WARNING syscall.Errno = 539 - ERROR_WX86_ERROR syscall.Errno = 540 - ERROR_TIMER_NOT_CANCELED syscall.Errno = 541 - ERROR_UNWIND syscall.Errno = 542 - ERROR_BAD_STACK syscall.Errno = 543 - ERROR_INVALID_UNWIND_TARGET syscall.Errno = 544 - ERROR_INVALID_PORT_ATTRIBUTES syscall.Errno = 545 - ERROR_PORT_MESSAGE_TOO_LONG syscall.Errno = 546 - ERROR_INVALID_QUOTA_LOWER syscall.Errno = 547 - ERROR_DEVICE_ALREADY_ATTACHED syscall.Errno = 548 - ERROR_INSTRUCTION_MISALIGNMENT syscall.Errno = 549 - ERROR_PROFILING_NOT_STARTED syscall.Errno = 550 - ERROR_PROFILING_NOT_STOPPED syscall.Errno = 551 - ERROR_COULD_NOT_INTERPRET syscall.Errno = 552 - ERROR_PROFILING_AT_LIMIT syscall.Errno = 553 - ERROR_CANT_WAIT syscall.Errno = 554 - ERROR_CANT_TERMINATE_SELF syscall.Errno = 555 - ERROR_UNEXPECTED_MM_CREATE_ERR syscall.Errno = 556 - ERROR_UNEXPECTED_MM_MAP_ERROR syscall.Errno = 557 - ERROR_UNEXPECTED_MM_EXTEND_ERR syscall.Errno = 558 - ERROR_BAD_FUNCTION_TABLE syscall.Errno = 559 - ERROR_NO_GUID_TRANSLATION syscall.Errno = 560 - ERROR_INVALID_LDT_SIZE syscall.Errno = 561 - ERROR_INVALID_LDT_OFFSET syscall.Errno = 563 - ERROR_INVALID_LDT_DESCRIPTOR syscall.Errno = 564 - ERROR_TOO_MANY_THREADS syscall.Errno = 565 - ERROR_THREAD_NOT_IN_PROCESS syscall.Errno = 566 - ERROR_PAGEFILE_QUOTA_EXCEEDED syscall.Errno = 567 - ERROR_LOGON_SERVER_CONFLICT syscall.Errno = 568 - ERROR_SYNCHRONIZATION_REQUIRED syscall.Errno = 569 - ERROR_NET_OPEN_FAILED syscall.Errno = 570 - ERROR_IO_PRIVILEGE_FAILED syscall.Errno = 571 - ERROR_CONTROL_C_EXIT syscall.Errno = 572 - ERROR_MISSING_SYSTEMFILE syscall.Errno = 573 - ERROR_UNHANDLED_EXCEPTION syscall.Errno = 574 - ERROR_APP_INIT_FAILURE syscall.Errno = 575 - ERROR_PAGEFILE_CREATE_FAILED syscall.Errno = 576 - ERROR_INVALID_IMAGE_HASH syscall.Errno = 577 - ERROR_NO_PAGEFILE syscall.Errno = 578 - ERROR_ILLEGAL_FLOAT_CONTEXT syscall.Errno = 579 - ERROR_NO_EVENT_PAIR syscall.Errno = 580 - ERROR_DOMAIN_CTRLR_CONFIG_ERROR syscall.Errno = 581 - ERROR_ILLEGAL_CHARACTER syscall.Errno = 582 - ERROR_UNDEFINED_CHARACTER syscall.Errno = 583 - ERROR_FLOPPY_VOLUME syscall.Errno = 584 - ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT syscall.Errno = 585 - ERROR_BACKUP_CONTROLLER syscall.Errno = 586 - ERROR_MUTANT_LIMIT_EXCEEDED syscall.Errno = 587 - ERROR_FS_DRIVER_REQUIRED syscall.Errno = 588 - ERROR_CANNOT_LOAD_REGISTRY_FILE syscall.Errno = 589 - ERROR_DEBUG_ATTACH_FAILED syscall.Errno = 590 - ERROR_SYSTEM_PROCESS_TERMINATED syscall.Errno = 591 - ERROR_DATA_NOT_ACCEPTED syscall.Errno = 592 - ERROR_VDM_HARD_ERROR syscall.Errno = 593 - ERROR_DRIVER_CANCEL_TIMEOUT syscall.Errno = 594 - ERROR_REPLY_MESSAGE_MISMATCH syscall.Errno = 595 - ERROR_LOST_WRITEBEHIND_DATA syscall.Errno = 596 - ERROR_CLIENT_SERVER_PARAMETERS_INVALID syscall.Errno = 597 - ERROR_NOT_TINY_STREAM syscall.Errno = 598 - ERROR_STACK_OVERFLOW_READ syscall.Errno = 599 - ERROR_CONVERT_TO_LARGE syscall.Errno = 600 - ERROR_FOUND_OUT_OF_SCOPE syscall.Errno = 601 - ERROR_ALLOCATE_BUCKET syscall.Errno = 602 - ERROR_MARSHALL_OVERFLOW syscall.Errno = 603 - ERROR_INVALID_VARIANT syscall.Errno = 604 - ERROR_BAD_COMPRESSION_BUFFER syscall.Errno = 605 - ERROR_AUDIT_FAILED syscall.Errno = 606 - ERROR_TIMER_RESOLUTION_NOT_SET syscall.Errno = 607 - ERROR_INSUFFICIENT_LOGON_INFO syscall.Errno = 608 - ERROR_BAD_DLL_ENTRYPOINT syscall.Errno = 609 - ERROR_BAD_SERVICE_ENTRYPOINT syscall.Errno = 610 - ERROR_IP_ADDRESS_CONFLICT1 syscall.Errno = 611 - ERROR_IP_ADDRESS_CONFLICT2 syscall.Errno = 612 - ERROR_REGISTRY_QUOTA_LIMIT syscall.Errno = 613 - ERROR_NO_CALLBACK_ACTIVE syscall.Errno = 614 - ERROR_PWD_TOO_SHORT syscall.Errno = 615 - ERROR_PWD_TOO_RECENT syscall.Errno = 616 - ERROR_PWD_HISTORY_CONFLICT syscall.Errno = 617 - ERROR_UNSUPPORTED_COMPRESSION syscall.Errno = 618 - ERROR_INVALID_HW_PROFILE syscall.Errno = 619 - ERROR_INVALID_PLUGPLAY_DEVICE_PATH syscall.Errno = 620 - ERROR_QUOTA_LIST_INCONSISTENT syscall.Errno = 621 - ERROR_EVALUATION_EXPIRATION syscall.Errno = 622 - ERROR_ILLEGAL_DLL_RELOCATION syscall.Errno = 623 - ERROR_DLL_INIT_FAILED_LOGOFF syscall.Errno = 624 - ERROR_VALIDATE_CONTINUE syscall.Errno = 625 - ERROR_NO_MORE_MATCHES syscall.Errno = 626 - ERROR_RANGE_LIST_CONFLICT syscall.Errno = 627 - ERROR_SERVER_SID_MISMATCH syscall.Errno = 628 - ERROR_CANT_ENABLE_DENY_ONLY syscall.Errno = 629 - ERROR_FLOAT_MULTIPLE_FAULTS syscall.Errno = 630 - ERROR_FLOAT_MULTIPLE_TRAPS syscall.Errno = 631 - ERROR_NOINTERFACE syscall.Errno = 632 - ERROR_DRIVER_FAILED_SLEEP syscall.Errno = 633 - ERROR_CORRUPT_SYSTEM_FILE syscall.Errno = 634 - ERROR_COMMITMENT_MINIMUM syscall.Errno = 635 - ERROR_PNP_RESTART_ENUMERATION syscall.Errno = 636 - ERROR_SYSTEM_IMAGE_BAD_SIGNATURE syscall.Errno = 637 - ERROR_PNP_REBOOT_REQUIRED syscall.Errno = 638 - ERROR_INSUFFICIENT_POWER syscall.Errno = 639 - ERROR_MULTIPLE_FAULT_VIOLATION syscall.Errno = 640 - ERROR_SYSTEM_SHUTDOWN syscall.Errno = 641 - ERROR_PORT_NOT_SET syscall.Errno = 642 - ERROR_DS_VERSION_CHECK_FAILURE syscall.Errno = 643 - ERROR_RANGE_NOT_FOUND syscall.Errno = 644 - ERROR_NOT_SAFE_MODE_DRIVER syscall.Errno = 646 - ERROR_FAILED_DRIVER_ENTRY syscall.Errno = 647 - ERROR_DEVICE_ENUMERATION_ERROR syscall.Errno = 648 - ERROR_MOUNT_POINT_NOT_RESOLVED syscall.Errno = 649 - ERROR_INVALID_DEVICE_OBJECT_PARAMETER syscall.Errno = 650 - ERROR_MCA_OCCURED syscall.Errno = 651 - ERROR_DRIVER_DATABASE_ERROR syscall.Errno = 652 - ERROR_SYSTEM_HIVE_TOO_LARGE syscall.Errno = 653 - ERROR_DRIVER_FAILED_PRIOR_UNLOAD syscall.Errno = 654 - ERROR_VOLSNAP_PREPARE_HIBERNATE syscall.Errno = 655 - ERROR_HIBERNATION_FAILURE syscall.Errno = 656 - ERROR_PWD_TOO_LONG syscall.Errno = 657 - ERROR_FILE_SYSTEM_LIMITATION syscall.Errno = 665 - ERROR_ASSERTION_FAILURE syscall.Errno = 668 - ERROR_ACPI_ERROR syscall.Errno = 669 - ERROR_WOW_ASSERTION syscall.Errno = 670 - ERROR_PNP_BAD_MPS_TABLE syscall.Errno = 671 - ERROR_PNP_TRANSLATION_FAILED syscall.Errno = 672 - ERROR_PNP_IRQ_TRANSLATION_FAILED syscall.Errno = 673 - ERROR_PNP_INVALID_ID syscall.Errno = 674 - ERROR_WAKE_SYSTEM_DEBUGGER syscall.Errno = 675 - ERROR_HANDLES_CLOSED syscall.Errno = 676 - ERROR_EXTRANEOUS_INFORMATION syscall.Errno = 677 - ERROR_RXACT_COMMIT_NECESSARY syscall.Errno = 678 - ERROR_MEDIA_CHECK syscall.Errno = 679 - ERROR_GUID_SUBSTITUTION_MADE syscall.Errno = 680 - ERROR_STOPPED_ON_SYMLINK syscall.Errno = 681 - ERROR_LONGJUMP syscall.Errno = 682 - ERROR_PLUGPLAY_QUERY_VETOED syscall.Errno = 683 - ERROR_UNWIND_CONSOLIDATE syscall.Errno = 684 - ERROR_REGISTRY_HIVE_RECOVERED syscall.Errno = 685 - ERROR_DLL_MIGHT_BE_INSECURE syscall.Errno = 686 - ERROR_DLL_MIGHT_BE_INCOMPATIBLE syscall.Errno = 687 - ERROR_DBG_EXCEPTION_NOT_HANDLED syscall.Errno = 688 - ERROR_DBG_REPLY_LATER syscall.Errno = 689 - ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE syscall.Errno = 690 - ERROR_DBG_TERMINATE_THREAD syscall.Errno = 691 - ERROR_DBG_TERMINATE_PROCESS syscall.Errno = 692 - ERROR_DBG_CONTROL_C syscall.Errno = 693 - ERROR_DBG_PRINTEXCEPTION_C syscall.Errno = 694 - ERROR_DBG_RIPEXCEPTION syscall.Errno = 695 - ERROR_DBG_CONTROL_BREAK syscall.Errno = 696 - ERROR_DBG_COMMAND_EXCEPTION syscall.Errno = 697 - ERROR_OBJECT_NAME_EXISTS syscall.Errno = 698 - ERROR_THREAD_WAS_SUSPENDED syscall.Errno = 699 - ERROR_IMAGE_NOT_AT_BASE syscall.Errno = 700 - ERROR_RXACT_STATE_CREATED syscall.Errno = 701 - ERROR_SEGMENT_NOTIFICATION syscall.Errno = 702 - ERROR_BAD_CURRENT_DIRECTORY syscall.Errno = 703 - ERROR_FT_READ_RECOVERY_FROM_BACKUP syscall.Errno = 704 - ERROR_FT_WRITE_RECOVERY syscall.Errno = 705 - ERROR_IMAGE_MACHINE_TYPE_MISMATCH syscall.Errno = 706 - ERROR_RECEIVE_PARTIAL syscall.Errno = 707 - ERROR_RECEIVE_EXPEDITED syscall.Errno = 708 - ERROR_RECEIVE_PARTIAL_EXPEDITED syscall.Errno = 709 - ERROR_EVENT_DONE syscall.Errno = 710 - ERROR_EVENT_PENDING syscall.Errno = 711 - ERROR_CHECKING_FILE_SYSTEM syscall.Errno = 712 - ERROR_FATAL_APP_EXIT syscall.Errno = 713 - ERROR_PREDEFINED_HANDLE syscall.Errno = 714 - ERROR_WAS_UNLOCKED syscall.Errno = 715 - ERROR_SERVICE_NOTIFICATION syscall.Errno = 716 - ERROR_WAS_LOCKED syscall.Errno = 717 - ERROR_LOG_HARD_ERROR syscall.Errno = 718 - ERROR_ALREADY_WIN32 syscall.Errno = 719 - ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE syscall.Errno = 720 - ERROR_NO_YIELD_PERFORMED syscall.Errno = 721 - ERROR_TIMER_RESUME_IGNORED syscall.Errno = 722 - ERROR_ARBITRATION_UNHANDLED syscall.Errno = 723 - ERROR_CARDBUS_NOT_SUPPORTED syscall.Errno = 724 - ERROR_MP_PROCESSOR_MISMATCH syscall.Errno = 725 - ERROR_HIBERNATED syscall.Errno = 726 - ERROR_RESUME_HIBERNATION syscall.Errno = 727 - ERROR_FIRMWARE_UPDATED syscall.Errno = 728 - ERROR_DRIVERS_LEAKING_LOCKED_PAGES syscall.Errno = 729 - ERROR_WAKE_SYSTEM syscall.Errno = 730 - ERROR_WAIT_1 syscall.Errno = 731 - ERROR_WAIT_2 syscall.Errno = 732 - ERROR_WAIT_3 syscall.Errno = 733 - ERROR_WAIT_63 syscall.Errno = 734 - ERROR_ABANDONED_WAIT_0 syscall.Errno = 735 - ERROR_ABANDONED_WAIT_63 syscall.Errno = 736 - ERROR_USER_APC syscall.Errno = 737 - ERROR_KERNEL_APC syscall.Errno = 738 - ERROR_ALERTED syscall.Errno = 739 - ERROR_ELEVATION_REQUIRED syscall.Errno = 740 - ERROR_REPARSE syscall.Errno = 741 - ERROR_OPLOCK_BREAK_IN_PROGRESS syscall.Errno = 742 - ERROR_VOLUME_MOUNTED syscall.Errno = 743 - ERROR_RXACT_COMMITTED syscall.Errno = 744 - ERROR_NOTIFY_CLEANUP syscall.Errno = 745 - ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED syscall.Errno = 746 - ERROR_PAGE_FAULT_TRANSITION syscall.Errno = 747 - ERROR_PAGE_FAULT_DEMAND_ZERO syscall.Errno = 748 - ERROR_PAGE_FAULT_COPY_ON_WRITE syscall.Errno = 749 - ERROR_PAGE_FAULT_GUARD_PAGE syscall.Errno = 750 - ERROR_PAGE_FAULT_PAGING_FILE syscall.Errno = 751 - ERROR_CACHE_PAGE_LOCKED syscall.Errno = 752 - ERROR_CRASH_DUMP syscall.Errno = 753 - ERROR_BUFFER_ALL_ZEROS syscall.Errno = 754 - ERROR_REPARSE_OBJECT syscall.Errno = 755 - ERROR_RESOURCE_REQUIREMENTS_CHANGED syscall.Errno = 756 - ERROR_TRANSLATION_COMPLETE syscall.Errno = 757 - ERROR_NOTHING_TO_TERMINATE syscall.Errno = 758 - ERROR_PROCESS_NOT_IN_JOB syscall.Errno = 759 - ERROR_PROCESS_IN_JOB syscall.Errno = 760 - ERROR_VOLSNAP_HIBERNATE_READY syscall.Errno = 761 - ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY syscall.Errno = 762 - ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED syscall.Errno = 763 - ERROR_INTERRUPT_STILL_CONNECTED syscall.Errno = 764 - ERROR_WAIT_FOR_OPLOCK syscall.Errno = 765 - ERROR_DBG_EXCEPTION_HANDLED syscall.Errno = 766 - ERROR_DBG_CONTINUE syscall.Errno = 767 - ERROR_CALLBACK_POP_STACK syscall.Errno = 768 - ERROR_COMPRESSION_DISABLED syscall.Errno = 769 - ERROR_CANTFETCHBACKWARDS syscall.Errno = 770 - ERROR_CANTSCROLLBACKWARDS syscall.Errno = 771 - ERROR_ROWSNOTRELEASED syscall.Errno = 772 - ERROR_BAD_ACCESSOR_FLAGS syscall.Errno = 773 - ERROR_ERRORS_ENCOUNTERED syscall.Errno = 774 - ERROR_NOT_CAPABLE syscall.Errno = 775 - ERROR_REQUEST_OUT_OF_SEQUENCE syscall.Errno = 776 - ERROR_VERSION_PARSE_ERROR syscall.Errno = 777 - ERROR_BADSTARTPOSITION syscall.Errno = 778 - ERROR_MEMORY_HARDWARE syscall.Errno = 779 - ERROR_DISK_REPAIR_DISABLED syscall.Errno = 780 - ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE syscall.Errno = 781 - ERROR_SYSTEM_POWERSTATE_TRANSITION syscall.Errno = 782 - ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION syscall.Errno = 783 - ERROR_MCA_EXCEPTION syscall.Errno = 784 - ERROR_ACCESS_AUDIT_BY_POLICY syscall.Errno = 785 - ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY syscall.Errno = 786 - ERROR_ABANDON_HIBERFILE syscall.Errno = 787 - ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED syscall.Errno = 788 - ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR syscall.Errno = 789 - ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR syscall.Errno = 790 - ERROR_BAD_MCFG_TABLE syscall.Errno = 791 - ERROR_DISK_REPAIR_REDIRECTED syscall.Errno = 792 - ERROR_DISK_REPAIR_UNSUCCESSFUL syscall.Errno = 793 - ERROR_CORRUPT_LOG_OVERFULL syscall.Errno = 794 - ERROR_CORRUPT_LOG_CORRUPTED syscall.Errno = 795 - ERROR_CORRUPT_LOG_UNAVAILABLE syscall.Errno = 796 - ERROR_CORRUPT_LOG_DELETED_FULL syscall.Errno = 797 - ERROR_CORRUPT_LOG_CLEARED syscall.Errno = 798 - ERROR_ORPHAN_NAME_EXHAUSTED syscall.Errno = 799 - ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE syscall.Errno = 800 - ERROR_CANNOT_GRANT_REQUESTED_OPLOCK syscall.Errno = 801 - ERROR_CANNOT_BREAK_OPLOCK syscall.Errno = 802 - ERROR_OPLOCK_HANDLE_CLOSED syscall.Errno = 803 - ERROR_NO_ACE_CONDITION syscall.Errno = 804 - ERROR_INVALID_ACE_CONDITION syscall.Errno = 805 - ERROR_FILE_HANDLE_REVOKED syscall.Errno = 806 - ERROR_IMAGE_AT_DIFFERENT_BASE syscall.Errno = 807 - ERROR_ENCRYPTED_IO_NOT_POSSIBLE syscall.Errno = 808 - ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS syscall.Errno = 809 - ERROR_QUOTA_ACTIVITY syscall.Errno = 810 - ERROR_HANDLE_REVOKED syscall.Errno = 811 - ERROR_CALLBACK_INVOKE_INLINE syscall.Errno = 812 - ERROR_CPU_SET_INVALID syscall.Errno = 813 - ERROR_ENCLAVE_NOT_TERMINATED syscall.Errno = 814 - ERROR_ENCLAVE_VIOLATION syscall.Errno = 815 - ERROR_EA_ACCESS_DENIED syscall.Errno = 994 - ERROR_OPERATION_ABORTED syscall.Errno = 995 - ERROR_IO_INCOMPLETE syscall.Errno = 996 - ERROR_IO_PENDING syscall.Errno = 997 - ERROR_NOACCESS syscall.Errno = 998 - ERROR_SWAPERROR syscall.Errno = 999 - ERROR_STACK_OVERFLOW syscall.Errno = 1001 - ERROR_INVALID_MESSAGE syscall.Errno = 1002 - ERROR_CAN_NOT_COMPLETE syscall.Errno = 1003 - ERROR_INVALID_FLAGS syscall.Errno = 1004 - ERROR_UNRECOGNIZED_VOLUME syscall.Errno = 1005 - ERROR_FILE_INVALID syscall.Errno = 1006 - ERROR_FULLSCREEN_MODE syscall.Errno = 1007 - ERROR_NO_TOKEN syscall.Errno = 1008 - ERROR_BADDB syscall.Errno = 1009 - ERROR_BADKEY syscall.Errno = 1010 - ERROR_CANTOPEN syscall.Errno = 1011 - ERROR_CANTREAD syscall.Errno = 1012 - ERROR_CANTWRITE syscall.Errno = 1013 - ERROR_REGISTRY_RECOVERED syscall.Errno = 1014 - ERROR_REGISTRY_CORRUPT syscall.Errno = 1015 - ERROR_REGISTRY_IO_FAILED syscall.Errno = 1016 - ERROR_NOT_REGISTRY_FILE syscall.Errno = 1017 - ERROR_KEY_DELETED syscall.Errno = 1018 - ERROR_NO_LOG_SPACE syscall.Errno = 1019 - ERROR_KEY_HAS_CHILDREN syscall.Errno = 1020 - ERROR_CHILD_MUST_BE_VOLATILE syscall.Errno = 1021 - ERROR_NOTIFY_ENUM_DIR syscall.Errno = 1022 - ERROR_DEPENDENT_SERVICES_RUNNING syscall.Errno = 1051 - ERROR_INVALID_SERVICE_CONTROL syscall.Errno = 1052 - ERROR_SERVICE_REQUEST_TIMEOUT syscall.Errno = 1053 - ERROR_SERVICE_NO_THREAD syscall.Errno = 1054 - ERROR_SERVICE_DATABASE_LOCKED syscall.Errno = 1055 - ERROR_SERVICE_ALREADY_RUNNING syscall.Errno = 1056 - ERROR_INVALID_SERVICE_ACCOUNT syscall.Errno = 1057 - ERROR_SERVICE_DISABLED syscall.Errno = 1058 - ERROR_CIRCULAR_DEPENDENCY syscall.Errno = 1059 - ERROR_SERVICE_DOES_NOT_EXIST syscall.Errno = 1060 - ERROR_SERVICE_CANNOT_ACCEPT_CTRL syscall.Errno = 1061 - ERROR_SERVICE_NOT_ACTIVE syscall.Errno = 1062 - ERROR_FAILED_SERVICE_CONTROLLER_CONNECT syscall.Errno = 1063 - ERROR_EXCEPTION_IN_SERVICE syscall.Errno = 1064 - ERROR_DATABASE_DOES_NOT_EXIST syscall.Errno = 1065 - ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 - ERROR_PROCESS_ABORTED syscall.Errno = 1067 - ERROR_SERVICE_DEPENDENCY_FAIL syscall.Errno = 1068 - ERROR_SERVICE_LOGON_FAILED syscall.Errno = 1069 - ERROR_SERVICE_START_HANG syscall.Errno = 1070 - ERROR_INVALID_SERVICE_LOCK syscall.Errno = 1071 - ERROR_SERVICE_MARKED_FOR_DELETE syscall.Errno = 1072 - ERROR_SERVICE_EXISTS syscall.Errno = 1073 - ERROR_ALREADY_RUNNING_LKG syscall.Errno = 1074 - ERROR_SERVICE_DEPENDENCY_DELETED syscall.Errno = 1075 - ERROR_BOOT_ALREADY_ACCEPTED syscall.Errno = 1076 - ERROR_SERVICE_NEVER_STARTED syscall.Errno = 1077 - ERROR_DUPLICATE_SERVICE_NAME syscall.Errno = 1078 - ERROR_DIFFERENT_SERVICE_ACCOUNT syscall.Errno = 1079 - ERROR_CANNOT_DETECT_DRIVER_FAILURE syscall.Errno = 1080 - ERROR_CANNOT_DETECT_PROCESS_ABORT syscall.Errno = 1081 - ERROR_NO_RECOVERY_PROGRAM syscall.Errno = 1082 - ERROR_SERVICE_NOT_IN_EXE syscall.Errno = 1083 - ERROR_NOT_SAFEBOOT_SERVICE syscall.Errno = 1084 - ERROR_END_OF_MEDIA syscall.Errno = 1100 - ERROR_FILEMARK_DETECTED syscall.Errno = 1101 - ERROR_BEGINNING_OF_MEDIA syscall.Errno = 1102 - ERROR_SETMARK_DETECTED syscall.Errno = 1103 - ERROR_NO_DATA_DETECTED syscall.Errno = 1104 - ERROR_PARTITION_FAILURE syscall.Errno = 1105 - ERROR_INVALID_BLOCK_LENGTH syscall.Errno = 1106 - ERROR_DEVICE_NOT_PARTITIONED syscall.Errno = 1107 - ERROR_UNABLE_TO_LOCK_MEDIA syscall.Errno = 1108 - ERROR_UNABLE_TO_UNLOAD_MEDIA syscall.Errno = 1109 - ERROR_MEDIA_CHANGED syscall.Errno = 1110 - ERROR_BUS_RESET syscall.Errno = 1111 - ERROR_NO_MEDIA_IN_DRIVE syscall.Errno = 1112 - ERROR_NO_UNICODE_TRANSLATION syscall.Errno = 1113 - ERROR_DLL_INIT_FAILED syscall.Errno = 1114 - ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 1115 - ERROR_NO_SHUTDOWN_IN_PROGRESS syscall.Errno = 1116 - ERROR_IO_DEVICE syscall.Errno = 1117 - ERROR_SERIAL_NO_DEVICE syscall.Errno = 1118 - ERROR_IRQ_BUSY syscall.Errno = 1119 - ERROR_MORE_WRITES syscall.Errno = 1120 - ERROR_COUNTER_TIMEOUT syscall.Errno = 1121 - ERROR_FLOPPY_ID_MARK_NOT_FOUND syscall.Errno = 1122 - ERROR_FLOPPY_WRONG_CYLINDER syscall.Errno = 1123 - ERROR_FLOPPY_UNKNOWN_ERROR syscall.Errno = 1124 - ERROR_FLOPPY_BAD_REGISTERS syscall.Errno = 1125 - ERROR_DISK_RECALIBRATE_FAILED syscall.Errno = 1126 - ERROR_DISK_OPERATION_FAILED syscall.Errno = 1127 - ERROR_DISK_RESET_FAILED syscall.Errno = 1128 - ERROR_EOM_OVERFLOW syscall.Errno = 1129 - ERROR_NOT_ENOUGH_SERVER_MEMORY syscall.Errno = 1130 - ERROR_POSSIBLE_DEADLOCK syscall.Errno = 1131 - ERROR_MAPPED_ALIGNMENT syscall.Errno = 1132 - ERROR_SET_POWER_STATE_VETOED syscall.Errno = 1140 - ERROR_SET_POWER_STATE_FAILED syscall.Errno = 1141 - ERROR_TOO_MANY_LINKS syscall.Errno = 1142 - ERROR_OLD_WIN_VERSION syscall.Errno = 1150 - ERROR_APP_WRONG_OS syscall.Errno = 1151 - ERROR_SINGLE_INSTANCE_APP syscall.Errno = 1152 - ERROR_RMODE_APP syscall.Errno = 1153 - ERROR_INVALID_DLL syscall.Errno = 1154 - ERROR_NO_ASSOCIATION syscall.Errno = 1155 - ERROR_DDE_FAIL syscall.Errno = 1156 - ERROR_DLL_NOT_FOUND syscall.Errno = 1157 - ERROR_NO_MORE_USER_HANDLES syscall.Errno = 1158 - ERROR_MESSAGE_SYNC_ONLY syscall.Errno = 1159 - ERROR_SOURCE_ELEMENT_EMPTY syscall.Errno = 1160 - ERROR_DESTINATION_ELEMENT_FULL syscall.Errno = 1161 - ERROR_ILLEGAL_ELEMENT_ADDRESS syscall.Errno = 1162 - ERROR_MAGAZINE_NOT_PRESENT syscall.Errno = 1163 - ERROR_DEVICE_REINITIALIZATION_NEEDED syscall.Errno = 1164 - ERROR_DEVICE_REQUIRES_CLEANING syscall.Errno = 1165 - ERROR_DEVICE_DOOR_OPEN syscall.Errno = 1166 - ERROR_DEVICE_NOT_CONNECTED syscall.Errno = 1167 - ERROR_NOT_FOUND syscall.Errno = 1168 - ERROR_NO_MATCH syscall.Errno = 1169 - ERROR_SET_NOT_FOUND syscall.Errno = 1170 - ERROR_POINT_NOT_FOUND syscall.Errno = 1171 - ERROR_NO_TRACKING_SERVICE syscall.Errno = 1172 - ERROR_NO_VOLUME_ID syscall.Errno = 1173 - ERROR_UNABLE_TO_REMOVE_REPLACED syscall.Errno = 1175 - ERROR_UNABLE_TO_MOVE_REPLACEMENT syscall.Errno = 1176 - ERROR_UNABLE_TO_MOVE_REPLACEMENT_2 syscall.Errno = 1177 - ERROR_JOURNAL_DELETE_IN_PROGRESS syscall.Errno = 1178 - ERROR_JOURNAL_NOT_ACTIVE syscall.Errno = 1179 - ERROR_POTENTIAL_FILE_FOUND syscall.Errno = 1180 - ERROR_JOURNAL_ENTRY_DELETED syscall.Errno = 1181 - ERROR_SHUTDOWN_IS_SCHEDULED syscall.Errno = 1190 - ERROR_SHUTDOWN_USERS_LOGGED_ON syscall.Errno = 1191 - ERROR_BAD_DEVICE syscall.Errno = 1200 - ERROR_CONNECTION_UNAVAIL syscall.Errno = 1201 - ERROR_DEVICE_ALREADY_REMEMBERED syscall.Errno = 1202 - ERROR_NO_NET_OR_BAD_PATH syscall.Errno = 1203 - ERROR_BAD_PROVIDER syscall.Errno = 1204 - ERROR_CANNOT_OPEN_PROFILE syscall.Errno = 1205 - ERROR_BAD_PROFILE syscall.Errno = 1206 - ERROR_NOT_CONTAINER syscall.Errno = 1207 - ERROR_EXTENDED_ERROR syscall.Errno = 1208 - ERROR_INVALID_GROUPNAME syscall.Errno = 1209 - ERROR_INVALID_COMPUTERNAME syscall.Errno = 1210 - ERROR_INVALID_EVENTNAME syscall.Errno = 1211 - ERROR_INVALID_DOMAINNAME syscall.Errno = 1212 - ERROR_INVALID_SERVICENAME syscall.Errno = 1213 - ERROR_INVALID_NETNAME syscall.Errno = 1214 - ERROR_INVALID_SHARENAME syscall.Errno = 1215 - ERROR_INVALID_PASSWORDNAME syscall.Errno = 1216 - ERROR_INVALID_MESSAGENAME syscall.Errno = 1217 - ERROR_INVALID_MESSAGEDEST syscall.Errno = 1218 - ERROR_SESSION_CREDENTIAL_CONFLICT syscall.Errno = 1219 - ERROR_REMOTE_SESSION_LIMIT_EXCEEDED syscall.Errno = 1220 - ERROR_DUP_DOMAINNAME syscall.Errno = 1221 - ERROR_NO_NETWORK syscall.Errno = 1222 - ERROR_CANCELLED syscall.Errno = 1223 - ERROR_USER_MAPPED_FILE syscall.Errno = 1224 - ERROR_CONNECTION_REFUSED syscall.Errno = 1225 - ERROR_GRACEFUL_DISCONNECT syscall.Errno = 1226 - ERROR_ADDRESS_ALREADY_ASSOCIATED syscall.Errno = 1227 - ERROR_ADDRESS_NOT_ASSOCIATED syscall.Errno = 1228 - ERROR_CONNECTION_INVALID syscall.Errno = 1229 - ERROR_CONNECTION_ACTIVE syscall.Errno = 1230 - ERROR_NETWORK_UNREACHABLE syscall.Errno = 1231 - ERROR_HOST_UNREACHABLE syscall.Errno = 1232 - ERROR_PROTOCOL_UNREACHABLE syscall.Errno = 1233 - ERROR_PORT_UNREACHABLE syscall.Errno = 1234 - ERROR_REQUEST_ABORTED syscall.Errno = 1235 - ERROR_CONNECTION_ABORTED syscall.Errno = 1236 - ERROR_RETRY syscall.Errno = 1237 - ERROR_CONNECTION_COUNT_LIMIT syscall.Errno = 1238 - ERROR_LOGIN_TIME_RESTRICTION syscall.Errno = 1239 - ERROR_LOGIN_WKSTA_RESTRICTION syscall.Errno = 1240 - ERROR_INCORRECT_ADDRESS syscall.Errno = 1241 - ERROR_ALREADY_REGISTERED syscall.Errno = 1242 - ERROR_SERVICE_NOT_FOUND syscall.Errno = 1243 - ERROR_NOT_AUTHENTICATED syscall.Errno = 1244 - ERROR_NOT_LOGGED_ON syscall.Errno = 1245 - ERROR_CONTINUE syscall.Errno = 1246 - ERROR_ALREADY_INITIALIZED syscall.Errno = 1247 - ERROR_NO_MORE_DEVICES syscall.Errno = 1248 - ERROR_NO_SUCH_SITE syscall.Errno = 1249 - ERROR_DOMAIN_CONTROLLER_EXISTS syscall.Errno = 1250 - ERROR_ONLY_IF_CONNECTED syscall.Errno = 1251 - ERROR_OVERRIDE_NOCHANGES syscall.Errno = 1252 - ERROR_BAD_USER_PROFILE syscall.Errno = 1253 - ERROR_NOT_SUPPORTED_ON_SBS syscall.Errno = 1254 - ERROR_SERVER_SHUTDOWN_IN_PROGRESS syscall.Errno = 1255 - ERROR_HOST_DOWN syscall.Errno = 1256 - ERROR_NON_ACCOUNT_SID syscall.Errno = 1257 - ERROR_NON_DOMAIN_SID syscall.Errno = 1258 - ERROR_APPHELP_BLOCK syscall.Errno = 1259 - ERROR_ACCESS_DISABLED_BY_POLICY syscall.Errno = 1260 - ERROR_REG_NAT_CONSUMPTION syscall.Errno = 1261 - ERROR_CSCSHARE_OFFLINE syscall.Errno = 1262 - ERROR_PKINIT_FAILURE syscall.Errno = 1263 - ERROR_SMARTCARD_SUBSYSTEM_FAILURE syscall.Errno = 1264 - ERROR_DOWNGRADE_DETECTED syscall.Errno = 1265 - ERROR_MACHINE_LOCKED syscall.Errno = 1271 - ERROR_SMB_GUEST_LOGON_BLOCKED syscall.Errno = 1272 - ERROR_CALLBACK_SUPPLIED_INVALID_DATA syscall.Errno = 1273 - ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED syscall.Errno = 1274 - ERROR_DRIVER_BLOCKED syscall.Errno = 1275 - ERROR_INVALID_IMPORT_OF_NON_DLL syscall.Errno = 1276 - ERROR_ACCESS_DISABLED_WEBBLADE syscall.Errno = 1277 - ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER syscall.Errno = 1278 - ERROR_RECOVERY_FAILURE syscall.Errno = 1279 - ERROR_ALREADY_FIBER syscall.Errno = 1280 - ERROR_ALREADY_THREAD syscall.Errno = 1281 - ERROR_STACK_BUFFER_OVERRUN syscall.Errno = 1282 - ERROR_PARAMETER_QUOTA_EXCEEDED syscall.Errno = 1283 - ERROR_DEBUGGER_INACTIVE syscall.Errno = 1284 - ERROR_DELAY_LOAD_FAILED syscall.Errno = 1285 - ERROR_VDM_DISALLOWED syscall.Errno = 1286 - ERROR_UNIDENTIFIED_ERROR syscall.Errno = 1287 - ERROR_INVALID_CRUNTIME_PARAMETER syscall.Errno = 1288 - ERROR_BEYOND_VDL syscall.Errno = 1289 - ERROR_INCOMPATIBLE_SERVICE_SID_TYPE syscall.Errno = 1290 - ERROR_DRIVER_PROCESS_TERMINATED syscall.Errno = 1291 - ERROR_IMPLEMENTATION_LIMIT syscall.Errno = 1292 - ERROR_PROCESS_IS_PROTECTED syscall.Errno = 1293 - ERROR_SERVICE_NOTIFY_CLIENT_LAGGING syscall.Errno = 1294 - ERROR_DISK_QUOTA_EXCEEDED syscall.Errno = 1295 - ERROR_CONTENT_BLOCKED syscall.Errno = 1296 - ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE syscall.Errno = 1297 - ERROR_APP_HANG syscall.Errno = 1298 - ERROR_INVALID_LABEL syscall.Errno = 1299 - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - ERROR_SOME_NOT_MAPPED syscall.Errno = 1301 - ERROR_NO_QUOTAS_FOR_ACCOUNT syscall.Errno = 1302 - ERROR_LOCAL_USER_SESSION_KEY syscall.Errno = 1303 - ERROR_NULL_LM_PASSWORD syscall.Errno = 1304 - ERROR_UNKNOWN_REVISION syscall.Errno = 1305 - ERROR_REVISION_MISMATCH syscall.Errno = 1306 - ERROR_INVALID_OWNER syscall.Errno = 1307 - ERROR_INVALID_PRIMARY_GROUP syscall.Errno = 1308 - ERROR_NO_IMPERSONATION_TOKEN syscall.Errno = 1309 - ERROR_CANT_DISABLE_MANDATORY syscall.Errno = 1310 - ERROR_NO_LOGON_SERVERS syscall.Errno = 1311 - ERROR_NO_SUCH_LOGON_SESSION syscall.Errno = 1312 - ERROR_NO_SUCH_PRIVILEGE syscall.Errno = 1313 - ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 - ERROR_INVALID_ACCOUNT_NAME syscall.Errno = 1315 - ERROR_USER_EXISTS syscall.Errno = 1316 - ERROR_NO_SUCH_USER syscall.Errno = 1317 - ERROR_GROUP_EXISTS syscall.Errno = 1318 - ERROR_NO_SUCH_GROUP syscall.Errno = 1319 - ERROR_MEMBER_IN_GROUP syscall.Errno = 1320 - ERROR_MEMBER_NOT_IN_GROUP syscall.Errno = 1321 - ERROR_LAST_ADMIN syscall.Errno = 1322 - ERROR_WRONG_PASSWORD syscall.Errno = 1323 - ERROR_ILL_FORMED_PASSWORD syscall.Errno = 1324 - ERROR_PASSWORD_RESTRICTION syscall.Errno = 1325 - ERROR_LOGON_FAILURE syscall.Errno = 1326 - ERROR_ACCOUNT_RESTRICTION syscall.Errno = 1327 - ERROR_INVALID_LOGON_HOURS syscall.Errno = 1328 - ERROR_INVALID_WORKSTATION syscall.Errno = 1329 - ERROR_PASSWORD_EXPIRED syscall.Errno = 1330 - ERROR_ACCOUNT_DISABLED syscall.Errno = 1331 - ERROR_NONE_MAPPED syscall.Errno = 1332 - ERROR_TOO_MANY_LUIDS_REQUESTED syscall.Errno = 1333 - ERROR_LUIDS_EXHAUSTED syscall.Errno = 1334 - ERROR_INVALID_SUB_AUTHORITY syscall.Errno = 1335 - ERROR_INVALID_ACL syscall.Errno = 1336 - ERROR_INVALID_SID syscall.Errno = 1337 - ERROR_INVALID_SECURITY_DESCR syscall.Errno = 1338 - ERROR_BAD_INHERITANCE_ACL syscall.Errno = 1340 - ERROR_SERVER_DISABLED syscall.Errno = 1341 - ERROR_SERVER_NOT_DISABLED syscall.Errno = 1342 - ERROR_INVALID_ID_AUTHORITY syscall.Errno = 1343 - ERROR_ALLOTTED_SPACE_EXCEEDED syscall.Errno = 1344 - ERROR_INVALID_GROUP_ATTRIBUTES syscall.Errno = 1345 - ERROR_BAD_IMPERSONATION_LEVEL syscall.Errno = 1346 - ERROR_CANT_OPEN_ANONYMOUS syscall.Errno = 1347 - ERROR_BAD_VALIDATION_CLASS syscall.Errno = 1348 - ERROR_BAD_TOKEN_TYPE syscall.Errno = 1349 - ERROR_NO_SECURITY_ON_OBJECT syscall.Errno = 1350 - ERROR_CANT_ACCESS_DOMAIN_INFO syscall.Errno = 1351 - ERROR_INVALID_SERVER_STATE syscall.Errno = 1352 - ERROR_INVALID_DOMAIN_STATE syscall.Errno = 1353 - ERROR_INVALID_DOMAIN_ROLE syscall.Errno = 1354 - ERROR_NO_SUCH_DOMAIN syscall.Errno = 1355 - ERROR_DOMAIN_EXISTS syscall.Errno = 1356 - ERROR_DOMAIN_LIMIT_EXCEEDED syscall.Errno = 1357 - ERROR_INTERNAL_DB_CORRUPTION syscall.Errno = 1358 - ERROR_INTERNAL_ERROR syscall.Errno = 1359 - ERROR_GENERIC_NOT_MAPPED syscall.Errno = 1360 - ERROR_BAD_DESCRIPTOR_FORMAT syscall.Errno = 1361 - ERROR_NOT_LOGON_PROCESS syscall.Errno = 1362 - ERROR_LOGON_SESSION_EXISTS syscall.Errno = 1363 - ERROR_NO_SUCH_PACKAGE syscall.Errno = 1364 - ERROR_BAD_LOGON_SESSION_STATE syscall.Errno = 1365 - ERROR_LOGON_SESSION_COLLISION syscall.Errno = 1366 - ERROR_INVALID_LOGON_TYPE syscall.Errno = 1367 - ERROR_CANNOT_IMPERSONATE syscall.Errno = 1368 - ERROR_RXACT_INVALID_STATE syscall.Errno = 1369 - ERROR_RXACT_COMMIT_FAILURE syscall.Errno = 1370 - ERROR_SPECIAL_ACCOUNT syscall.Errno = 1371 - ERROR_SPECIAL_GROUP syscall.Errno = 1372 - ERROR_SPECIAL_USER syscall.Errno = 1373 - ERROR_MEMBERS_PRIMARY_GROUP syscall.Errno = 1374 - ERROR_TOKEN_ALREADY_IN_USE syscall.Errno = 1375 - ERROR_NO_SUCH_ALIAS syscall.Errno = 1376 - ERROR_MEMBER_NOT_IN_ALIAS syscall.Errno = 1377 - ERROR_MEMBER_IN_ALIAS syscall.Errno = 1378 - ERROR_ALIAS_EXISTS syscall.Errno = 1379 - ERROR_LOGON_NOT_GRANTED syscall.Errno = 1380 - ERROR_TOO_MANY_SECRETS syscall.Errno = 1381 - ERROR_SECRET_TOO_LONG syscall.Errno = 1382 - ERROR_INTERNAL_DB_ERROR syscall.Errno = 1383 - ERROR_TOO_MANY_CONTEXT_IDS syscall.Errno = 1384 - ERROR_LOGON_TYPE_NOT_GRANTED syscall.Errno = 1385 - ERROR_NT_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1386 - ERROR_NO_SUCH_MEMBER syscall.Errno = 1387 - ERROR_INVALID_MEMBER syscall.Errno = 1388 - ERROR_TOO_MANY_SIDS syscall.Errno = 1389 - ERROR_LM_CROSS_ENCRYPTION_REQUIRED syscall.Errno = 1390 - ERROR_NO_INHERITANCE syscall.Errno = 1391 - ERROR_FILE_CORRUPT syscall.Errno = 1392 - ERROR_DISK_CORRUPT syscall.Errno = 1393 - ERROR_NO_USER_SESSION_KEY syscall.Errno = 1394 - ERROR_LICENSE_QUOTA_EXCEEDED syscall.Errno = 1395 - ERROR_WRONG_TARGET_NAME syscall.Errno = 1396 - ERROR_MUTUAL_AUTH_FAILED syscall.Errno = 1397 - ERROR_TIME_SKEW syscall.Errno = 1398 - ERROR_CURRENT_DOMAIN_NOT_ALLOWED syscall.Errno = 1399 - ERROR_INVALID_WINDOW_HANDLE syscall.Errno = 1400 - ERROR_INVALID_MENU_HANDLE syscall.Errno = 1401 - ERROR_INVALID_CURSOR_HANDLE syscall.Errno = 1402 - ERROR_INVALID_ACCEL_HANDLE syscall.Errno = 1403 - ERROR_INVALID_HOOK_HANDLE syscall.Errno = 1404 - ERROR_INVALID_DWP_HANDLE syscall.Errno = 1405 - ERROR_TLW_WITH_WSCHILD syscall.Errno = 1406 - ERROR_CANNOT_FIND_WND_CLASS syscall.Errno = 1407 - ERROR_WINDOW_OF_OTHER_THREAD syscall.Errno = 1408 - ERROR_HOTKEY_ALREADY_REGISTERED syscall.Errno = 1409 - ERROR_CLASS_ALREADY_EXISTS syscall.Errno = 1410 - ERROR_CLASS_DOES_NOT_EXIST syscall.Errno = 1411 - ERROR_CLASS_HAS_WINDOWS syscall.Errno = 1412 - ERROR_INVALID_INDEX syscall.Errno = 1413 - ERROR_INVALID_ICON_HANDLE syscall.Errno = 1414 - ERROR_PRIVATE_DIALOG_INDEX syscall.Errno = 1415 - ERROR_LISTBOX_ID_NOT_FOUND syscall.Errno = 1416 - ERROR_NO_WILDCARD_CHARACTERS syscall.Errno = 1417 - ERROR_CLIPBOARD_NOT_OPEN syscall.Errno = 1418 - ERROR_HOTKEY_NOT_REGISTERED syscall.Errno = 1419 - ERROR_WINDOW_NOT_DIALOG syscall.Errno = 1420 - ERROR_CONTROL_ID_NOT_FOUND syscall.Errno = 1421 - ERROR_INVALID_COMBOBOX_MESSAGE syscall.Errno = 1422 - ERROR_WINDOW_NOT_COMBOBOX syscall.Errno = 1423 - ERROR_INVALID_EDIT_HEIGHT syscall.Errno = 1424 - ERROR_DC_NOT_FOUND syscall.Errno = 1425 - ERROR_INVALID_HOOK_FILTER syscall.Errno = 1426 - ERROR_INVALID_FILTER_PROC syscall.Errno = 1427 - ERROR_HOOK_NEEDS_HMOD syscall.Errno = 1428 - ERROR_GLOBAL_ONLY_HOOK syscall.Errno = 1429 - ERROR_JOURNAL_HOOK_SET syscall.Errno = 1430 - ERROR_HOOK_NOT_INSTALLED syscall.Errno = 1431 - ERROR_INVALID_LB_MESSAGE syscall.Errno = 1432 - ERROR_SETCOUNT_ON_BAD_LB syscall.Errno = 1433 - ERROR_LB_WITHOUT_TABSTOPS syscall.Errno = 1434 - ERROR_DESTROY_OBJECT_OF_OTHER_THREAD syscall.Errno = 1435 - ERROR_CHILD_WINDOW_MENU syscall.Errno = 1436 - ERROR_NO_SYSTEM_MENU syscall.Errno = 1437 - ERROR_INVALID_MSGBOX_STYLE syscall.Errno = 1438 - ERROR_INVALID_SPI_VALUE syscall.Errno = 1439 - ERROR_SCREEN_ALREADY_LOCKED syscall.Errno = 1440 - ERROR_HWNDS_HAVE_DIFF_PARENT syscall.Errno = 1441 - ERROR_NOT_CHILD_WINDOW syscall.Errno = 1442 - ERROR_INVALID_GW_COMMAND syscall.Errno = 1443 - ERROR_INVALID_THREAD_ID syscall.Errno = 1444 - ERROR_NON_MDICHILD_WINDOW syscall.Errno = 1445 - ERROR_POPUP_ALREADY_ACTIVE syscall.Errno = 1446 - ERROR_NO_SCROLLBARS syscall.Errno = 1447 - ERROR_INVALID_SCROLLBAR_RANGE syscall.Errno = 1448 - ERROR_INVALID_SHOWWIN_COMMAND syscall.Errno = 1449 - ERROR_NO_SYSTEM_RESOURCES syscall.Errno = 1450 - ERROR_NONPAGED_SYSTEM_RESOURCES syscall.Errno = 1451 - ERROR_PAGED_SYSTEM_RESOURCES syscall.Errno = 1452 - ERROR_WORKING_SET_QUOTA syscall.Errno = 1453 - ERROR_PAGEFILE_QUOTA syscall.Errno = 1454 - ERROR_COMMITMENT_LIMIT syscall.Errno = 1455 - ERROR_MENU_ITEM_NOT_FOUND syscall.Errno = 1456 - ERROR_INVALID_KEYBOARD_HANDLE syscall.Errno = 1457 - ERROR_HOOK_TYPE_NOT_ALLOWED syscall.Errno = 1458 - ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION syscall.Errno = 1459 - ERROR_TIMEOUT syscall.Errno = 1460 - ERROR_INVALID_MONITOR_HANDLE syscall.Errno = 1461 - ERROR_INCORRECT_SIZE syscall.Errno = 1462 - ERROR_SYMLINK_CLASS_DISABLED syscall.Errno = 1463 - ERROR_SYMLINK_NOT_SUPPORTED syscall.Errno = 1464 - ERROR_XML_PARSE_ERROR syscall.Errno = 1465 - ERROR_XMLDSIG_ERROR syscall.Errno = 1466 - ERROR_RESTART_APPLICATION syscall.Errno = 1467 - ERROR_WRONG_COMPARTMENT syscall.Errno = 1468 - ERROR_AUTHIP_FAILURE syscall.Errno = 1469 - ERROR_NO_NVRAM_RESOURCES syscall.Errno = 1470 - ERROR_NOT_GUI_PROCESS syscall.Errno = 1471 - ERROR_EVENTLOG_FILE_CORRUPT syscall.Errno = 1500 - ERROR_EVENTLOG_CANT_START syscall.Errno = 1501 - ERROR_LOG_FILE_FULL syscall.Errno = 1502 - ERROR_EVENTLOG_FILE_CHANGED syscall.Errno = 1503 - ERROR_CONTAINER_ASSIGNED syscall.Errno = 1504 - ERROR_JOB_NO_CONTAINER syscall.Errno = 1505 - ERROR_INVALID_TASK_NAME syscall.Errno = 1550 - ERROR_INVALID_TASK_INDEX syscall.Errno = 1551 - ERROR_THREAD_ALREADY_IN_TASK syscall.Errno = 1552 - ERROR_INSTALL_SERVICE_FAILURE syscall.Errno = 1601 - ERROR_INSTALL_USEREXIT syscall.Errno = 1602 - ERROR_INSTALL_FAILURE syscall.Errno = 1603 - ERROR_INSTALL_SUSPEND syscall.Errno = 1604 - ERROR_UNKNOWN_PRODUCT syscall.Errno = 1605 - ERROR_UNKNOWN_FEATURE syscall.Errno = 1606 - ERROR_UNKNOWN_COMPONENT syscall.Errno = 1607 - ERROR_UNKNOWN_PROPERTY syscall.Errno = 1608 - ERROR_INVALID_HANDLE_STATE syscall.Errno = 1609 - ERROR_BAD_CONFIGURATION syscall.Errno = 1610 - ERROR_INDEX_ABSENT syscall.Errno = 1611 - ERROR_INSTALL_SOURCE_ABSENT syscall.Errno = 1612 - ERROR_INSTALL_PACKAGE_VERSION syscall.Errno = 1613 - ERROR_PRODUCT_UNINSTALLED syscall.Errno = 1614 - ERROR_BAD_QUERY_SYNTAX syscall.Errno = 1615 - ERROR_INVALID_FIELD syscall.Errno = 1616 - ERROR_DEVICE_REMOVED syscall.Errno = 1617 - ERROR_INSTALL_ALREADY_RUNNING syscall.Errno = 1618 - ERROR_INSTALL_PACKAGE_OPEN_FAILED syscall.Errno = 1619 - ERROR_INSTALL_PACKAGE_INVALID syscall.Errno = 1620 - ERROR_INSTALL_UI_FAILURE syscall.Errno = 1621 - ERROR_INSTALL_LOG_FAILURE syscall.Errno = 1622 - ERROR_INSTALL_LANGUAGE_UNSUPPORTED syscall.Errno = 1623 - ERROR_INSTALL_TRANSFORM_FAILURE syscall.Errno = 1624 - ERROR_INSTALL_PACKAGE_REJECTED syscall.Errno = 1625 - ERROR_FUNCTION_NOT_CALLED syscall.Errno = 1626 - ERROR_FUNCTION_FAILED syscall.Errno = 1627 - ERROR_INVALID_TABLE syscall.Errno = 1628 - ERROR_DATATYPE_MISMATCH syscall.Errno = 1629 - ERROR_UNSUPPORTED_TYPE syscall.Errno = 1630 - ERROR_CREATE_FAILED syscall.Errno = 1631 - ERROR_INSTALL_TEMP_UNWRITABLE syscall.Errno = 1632 - ERROR_INSTALL_PLATFORM_UNSUPPORTED syscall.Errno = 1633 - ERROR_INSTALL_NOTUSED syscall.Errno = 1634 - ERROR_PATCH_PACKAGE_OPEN_FAILED syscall.Errno = 1635 - ERROR_PATCH_PACKAGE_INVALID syscall.Errno = 1636 - ERROR_PATCH_PACKAGE_UNSUPPORTED syscall.Errno = 1637 - ERROR_PRODUCT_VERSION syscall.Errno = 1638 - ERROR_INVALID_COMMAND_LINE syscall.Errno = 1639 - ERROR_INSTALL_REMOTE_DISALLOWED syscall.Errno = 1640 - ERROR_SUCCESS_REBOOT_INITIATED syscall.Errno = 1641 - ERROR_PATCH_TARGET_NOT_FOUND syscall.Errno = 1642 - ERROR_PATCH_PACKAGE_REJECTED syscall.Errno = 1643 - ERROR_INSTALL_TRANSFORM_REJECTED syscall.Errno = 1644 - ERROR_INSTALL_REMOTE_PROHIBITED syscall.Errno = 1645 - ERROR_PATCH_REMOVAL_UNSUPPORTED syscall.Errno = 1646 - ERROR_UNKNOWN_PATCH syscall.Errno = 1647 - ERROR_PATCH_NO_SEQUENCE syscall.Errno = 1648 - ERROR_PATCH_REMOVAL_DISALLOWED syscall.Errno = 1649 - ERROR_INVALID_PATCH_XML syscall.Errno = 1650 - ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT syscall.Errno = 1651 - ERROR_INSTALL_SERVICE_SAFEBOOT syscall.Errno = 1652 - ERROR_FAIL_FAST_EXCEPTION syscall.Errno = 1653 - ERROR_INSTALL_REJECTED syscall.Errno = 1654 - ERROR_DYNAMIC_CODE_BLOCKED syscall.Errno = 1655 - ERROR_NOT_SAME_OBJECT syscall.Errno = 1656 - ERROR_STRICT_CFG_VIOLATION syscall.Errno = 1657 - ERROR_SET_CONTEXT_DENIED syscall.Errno = 1660 - ERROR_CROSS_PARTITION_VIOLATION syscall.Errno = 1661 - RPC_S_INVALID_STRING_BINDING syscall.Errno = 1700 - RPC_S_WRONG_KIND_OF_BINDING syscall.Errno = 1701 - RPC_S_INVALID_BINDING syscall.Errno = 1702 - RPC_S_PROTSEQ_NOT_SUPPORTED syscall.Errno = 1703 - RPC_S_INVALID_RPC_PROTSEQ syscall.Errno = 1704 - RPC_S_INVALID_STRING_UUID syscall.Errno = 1705 - RPC_S_INVALID_ENDPOINT_FORMAT syscall.Errno = 1706 - RPC_S_INVALID_NET_ADDR syscall.Errno = 1707 - RPC_S_NO_ENDPOINT_FOUND syscall.Errno = 1708 - RPC_S_INVALID_TIMEOUT syscall.Errno = 1709 - RPC_S_OBJECT_NOT_FOUND syscall.Errno = 1710 - RPC_S_ALREADY_REGISTERED syscall.Errno = 1711 - RPC_S_TYPE_ALREADY_REGISTERED syscall.Errno = 1712 - RPC_S_ALREADY_LISTENING syscall.Errno = 1713 - RPC_S_NO_PROTSEQS_REGISTERED syscall.Errno = 1714 - RPC_S_NOT_LISTENING syscall.Errno = 1715 - RPC_S_UNKNOWN_MGR_TYPE syscall.Errno = 1716 - RPC_S_UNKNOWN_IF syscall.Errno = 1717 - RPC_S_NO_BINDINGS syscall.Errno = 1718 - RPC_S_NO_PROTSEQS syscall.Errno = 1719 - RPC_S_CANT_CREATE_ENDPOINT syscall.Errno = 1720 - RPC_S_OUT_OF_RESOURCES syscall.Errno = 1721 - RPC_S_SERVER_UNAVAILABLE syscall.Errno = 1722 - RPC_S_SERVER_TOO_BUSY syscall.Errno = 1723 - RPC_S_INVALID_NETWORK_OPTIONS syscall.Errno = 1724 - RPC_S_NO_CALL_ACTIVE syscall.Errno = 1725 - RPC_S_CALL_FAILED syscall.Errno = 1726 - RPC_S_CALL_FAILED_DNE syscall.Errno = 1727 - RPC_S_PROTOCOL_ERROR syscall.Errno = 1728 - RPC_S_PROXY_ACCESS_DENIED syscall.Errno = 1729 - RPC_S_UNSUPPORTED_TRANS_SYN syscall.Errno = 1730 - RPC_S_UNSUPPORTED_TYPE syscall.Errno = 1732 - RPC_S_INVALID_TAG syscall.Errno = 1733 - RPC_S_INVALID_BOUND syscall.Errno = 1734 - RPC_S_NO_ENTRY_NAME syscall.Errno = 1735 - RPC_S_INVALID_NAME_SYNTAX syscall.Errno = 1736 - RPC_S_UNSUPPORTED_NAME_SYNTAX syscall.Errno = 1737 - RPC_S_UUID_NO_ADDRESS syscall.Errno = 1739 - RPC_S_DUPLICATE_ENDPOINT syscall.Errno = 1740 - RPC_S_UNKNOWN_AUTHN_TYPE syscall.Errno = 1741 - RPC_S_MAX_CALLS_TOO_SMALL syscall.Errno = 1742 - RPC_S_STRING_TOO_LONG syscall.Errno = 1743 - RPC_S_PROTSEQ_NOT_FOUND syscall.Errno = 1744 - RPC_S_PROCNUM_OUT_OF_RANGE syscall.Errno = 1745 - RPC_S_BINDING_HAS_NO_AUTH syscall.Errno = 1746 - RPC_S_UNKNOWN_AUTHN_SERVICE syscall.Errno = 1747 - RPC_S_UNKNOWN_AUTHN_LEVEL syscall.Errno = 1748 - RPC_S_INVALID_AUTH_IDENTITY syscall.Errno = 1749 - RPC_S_UNKNOWN_AUTHZ_SERVICE syscall.Errno = 1750 - EPT_S_INVALID_ENTRY syscall.Errno = 1751 - EPT_S_CANT_PERFORM_OP syscall.Errno = 1752 - EPT_S_NOT_REGISTERED syscall.Errno = 1753 - RPC_S_NOTHING_TO_EXPORT syscall.Errno = 1754 - RPC_S_INCOMPLETE_NAME syscall.Errno = 1755 - RPC_S_INVALID_VERS_OPTION syscall.Errno = 1756 - RPC_S_NO_MORE_MEMBERS syscall.Errno = 1757 - RPC_S_NOT_ALL_OBJS_UNEXPORTED syscall.Errno = 1758 - RPC_S_INTERFACE_NOT_FOUND syscall.Errno = 1759 - RPC_S_ENTRY_ALREADY_EXISTS syscall.Errno = 1760 - RPC_S_ENTRY_NOT_FOUND syscall.Errno = 1761 - RPC_S_NAME_SERVICE_UNAVAILABLE syscall.Errno = 1762 - RPC_S_INVALID_NAF_ID syscall.Errno = 1763 - RPC_S_CANNOT_SUPPORT syscall.Errno = 1764 - RPC_S_NO_CONTEXT_AVAILABLE syscall.Errno = 1765 - RPC_S_INTERNAL_ERROR syscall.Errno = 1766 - RPC_S_ZERO_DIVIDE syscall.Errno = 1767 - RPC_S_ADDRESS_ERROR syscall.Errno = 1768 - RPC_S_FP_DIV_ZERO syscall.Errno = 1769 - RPC_S_FP_UNDERFLOW syscall.Errno = 1770 - RPC_S_FP_OVERFLOW syscall.Errno = 1771 - RPC_X_NO_MORE_ENTRIES syscall.Errno = 1772 - RPC_X_SS_CHAR_TRANS_OPEN_FAIL syscall.Errno = 1773 - RPC_X_SS_CHAR_TRANS_SHORT_FILE syscall.Errno = 1774 - RPC_X_SS_IN_NULL_CONTEXT syscall.Errno = 1775 - RPC_X_SS_CONTEXT_DAMAGED syscall.Errno = 1777 - RPC_X_SS_HANDLES_MISMATCH syscall.Errno = 1778 - RPC_X_SS_CANNOT_GET_CALL_HANDLE syscall.Errno = 1779 - RPC_X_NULL_REF_POINTER syscall.Errno = 1780 - RPC_X_ENUM_VALUE_OUT_OF_RANGE syscall.Errno = 1781 - RPC_X_BYTE_COUNT_TOO_SMALL syscall.Errno = 1782 - RPC_X_BAD_STUB_DATA syscall.Errno = 1783 - ERROR_INVALID_USER_BUFFER syscall.Errno = 1784 - ERROR_UNRECOGNIZED_MEDIA syscall.Errno = 1785 - ERROR_NO_TRUST_LSA_SECRET syscall.Errno = 1786 - ERROR_NO_TRUST_SAM_ACCOUNT syscall.Errno = 1787 - ERROR_TRUSTED_DOMAIN_FAILURE syscall.Errno = 1788 - ERROR_TRUSTED_RELATIONSHIP_FAILURE syscall.Errno = 1789 - ERROR_TRUST_FAILURE syscall.Errno = 1790 - RPC_S_CALL_IN_PROGRESS syscall.Errno = 1791 - ERROR_NETLOGON_NOT_STARTED syscall.Errno = 1792 - ERROR_ACCOUNT_EXPIRED syscall.Errno = 1793 - ERROR_REDIRECTOR_HAS_OPEN_HANDLES syscall.Errno = 1794 - ERROR_PRINTER_DRIVER_ALREADY_INSTALLED syscall.Errno = 1795 - ERROR_UNKNOWN_PORT syscall.Errno = 1796 - ERROR_UNKNOWN_PRINTER_DRIVER syscall.Errno = 1797 - ERROR_UNKNOWN_PRINTPROCESSOR syscall.Errno = 1798 - ERROR_INVALID_SEPARATOR_FILE syscall.Errno = 1799 - ERROR_INVALID_PRIORITY syscall.Errno = 1800 - ERROR_INVALID_PRINTER_NAME syscall.Errno = 1801 - ERROR_PRINTER_ALREADY_EXISTS syscall.Errno = 1802 - ERROR_INVALID_PRINTER_COMMAND syscall.Errno = 1803 - ERROR_INVALID_DATATYPE syscall.Errno = 1804 - ERROR_INVALID_ENVIRONMENT syscall.Errno = 1805 - RPC_S_NO_MORE_BINDINGS syscall.Errno = 1806 - ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT syscall.Errno = 1807 - ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT syscall.Errno = 1808 - ERROR_NOLOGON_SERVER_TRUST_ACCOUNT syscall.Errno = 1809 - ERROR_DOMAIN_TRUST_INCONSISTENT syscall.Errno = 1810 - ERROR_SERVER_HAS_OPEN_HANDLES syscall.Errno = 1811 - ERROR_RESOURCE_DATA_NOT_FOUND syscall.Errno = 1812 - ERROR_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 1813 - ERROR_RESOURCE_NAME_NOT_FOUND syscall.Errno = 1814 - ERROR_RESOURCE_LANG_NOT_FOUND syscall.Errno = 1815 - ERROR_NOT_ENOUGH_QUOTA syscall.Errno = 1816 - RPC_S_NO_INTERFACES syscall.Errno = 1817 - RPC_S_CALL_CANCELLED syscall.Errno = 1818 - RPC_S_BINDING_INCOMPLETE syscall.Errno = 1819 - RPC_S_COMM_FAILURE syscall.Errno = 1820 - RPC_S_UNSUPPORTED_AUTHN_LEVEL syscall.Errno = 1821 - RPC_S_NO_PRINC_NAME syscall.Errno = 1822 - RPC_S_NOT_RPC_ERROR syscall.Errno = 1823 - RPC_S_UUID_LOCAL_ONLY syscall.Errno = 1824 - RPC_S_SEC_PKG_ERROR syscall.Errno = 1825 - RPC_S_NOT_CANCELLED syscall.Errno = 1826 - RPC_X_INVALID_ES_ACTION syscall.Errno = 1827 - RPC_X_WRONG_ES_VERSION syscall.Errno = 1828 - RPC_X_WRONG_STUB_VERSION syscall.Errno = 1829 - RPC_X_INVALID_PIPE_OBJECT syscall.Errno = 1830 - RPC_X_WRONG_PIPE_ORDER syscall.Errno = 1831 - RPC_X_WRONG_PIPE_VERSION syscall.Errno = 1832 - RPC_S_COOKIE_AUTH_FAILED syscall.Errno = 1833 - RPC_S_DO_NOT_DISTURB syscall.Errno = 1834 - RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED syscall.Errno = 1835 - RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH syscall.Errno = 1836 - RPC_S_GROUP_MEMBER_NOT_FOUND syscall.Errno = 1898 - EPT_S_CANT_CREATE syscall.Errno = 1899 - RPC_S_INVALID_OBJECT syscall.Errno = 1900 - ERROR_INVALID_TIME syscall.Errno = 1901 - ERROR_INVALID_FORM_NAME syscall.Errno = 1902 - ERROR_INVALID_FORM_SIZE syscall.Errno = 1903 - ERROR_ALREADY_WAITING syscall.Errno = 1904 - ERROR_PRINTER_DELETED syscall.Errno = 1905 - ERROR_INVALID_PRINTER_STATE syscall.Errno = 1906 - ERROR_PASSWORD_MUST_CHANGE syscall.Errno = 1907 - ERROR_DOMAIN_CONTROLLER_NOT_FOUND syscall.Errno = 1908 - ERROR_ACCOUNT_LOCKED_OUT syscall.Errno = 1909 - OR_INVALID_OXID syscall.Errno = 1910 - OR_INVALID_OID syscall.Errno = 1911 - OR_INVALID_SET syscall.Errno = 1912 - RPC_S_SEND_INCOMPLETE syscall.Errno = 1913 - RPC_S_INVALID_ASYNC_HANDLE syscall.Errno = 1914 - RPC_S_INVALID_ASYNC_CALL syscall.Errno = 1915 - RPC_X_PIPE_CLOSED syscall.Errno = 1916 - RPC_X_PIPE_DISCIPLINE_ERROR syscall.Errno = 1917 - RPC_X_PIPE_EMPTY syscall.Errno = 1918 - ERROR_NO_SITENAME syscall.Errno = 1919 - ERROR_CANT_ACCESS_FILE syscall.Errno = 1920 - ERROR_CANT_RESOLVE_FILENAME syscall.Errno = 1921 - RPC_S_ENTRY_TYPE_MISMATCH syscall.Errno = 1922 - RPC_S_NOT_ALL_OBJS_EXPORTED syscall.Errno = 1923 - RPC_S_INTERFACE_NOT_EXPORTED syscall.Errno = 1924 - RPC_S_PROFILE_NOT_ADDED syscall.Errno = 1925 - RPC_S_PRF_ELT_NOT_ADDED syscall.Errno = 1926 - RPC_S_PRF_ELT_NOT_REMOVED syscall.Errno = 1927 - RPC_S_GRP_ELT_NOT_ADDED syscall.Errno = 1928 - RPC_S_GRP_ELT_NOT_REMOVED syscall.Errno = 1929 - ERROR_KM_DRIVER_BLOCKED syscall.Errno = 1930 - ERROR_CONTEXT_EXPIRED syscall.Errno = 1931 - ERROR_PER_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1932 - ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED syscall.Errno = 1933 - ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED syscall.Errno = 1934 - ERROR_AUTHENTICATION_FIREWALL_FAILED syscall.Errno = 1935 - ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED syscall.Errno = 1936 - ERROR_NTLM_BLOCKED syscall.Errno = 1937 - ERROR_PASSWORD_CHANGE_REQUIRED syscall.Errno = 1938 - ERROR_LOST_MODE_LOGON_RESTRICTION syscall.Errno = 1939 - ERROR_INVALID_PIXEL_FORMAT syscall.Errno = 2000 - ERROR_BAD_DRIVER syscall.Errno = 2001 - ERROR_INVALID_WINDOW_STYLE syscall.Errno = 2002 - ERROR_METAFILE_NOT_SUPPORTED syscall.Errno = 2003 - ERROR_TRANSFORM_NOT_SUPPORTED syscall.Errno = 2004 - ERROR_CLIPPING_NOT_SUPPORTED syscall.Errno = 2005 - ERROR_INVALID_CMM syscall.Errno = 2010 - ERROR_INVALID_PROFILE syscall.Errno = 2011 - ERROR_TAG_NOT_FOUND syscall.Errno = 2012 - ERROR_TAG_NOT_PRESENT syscall.Errno = 2013 - ERROR_DUPLICATE_TAG syscall.Errno = 2014 - ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE syscall.Errno = 2015 - ERROR_PROFILE_NOT_FOUND syscall.Errno = 2016 - ERROR_INVALID_COLORSPACE syscall.Errno = 2017 - ERROR_ICM_NOT_ENABLED syscall.Errno = 2018 - ERROR_DELETING_ICM_XFORM syscall.Errno = 2019 - ERROR_INVALID_TRANSFORM syscall.Errno = 2020 - ERROR_COLORSPACE_MISMATCH syscall.Errno = 2021 - ERROR_INVALID_COLORINDEX syscall.Errno = 2022 - ERROR_PROFILE_DOES_NOT_MATCH_DEVICE syscall.Errno = 2023 - ERROR_CONNECTED_OTHER_PASSWORD syscall.Errno = 2108 - ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT syscall.Errno = 2109 - ERROR_BAD_USERNAME syscall.Errno = 2202 - ERROR_NOT_CONNECTED syscall.Errno = 2250 - ERROR_OPEN_FILES syscall.Errno = 2401 - ERROR_ACTIVE_CONNECTIONS syscall.Errno = 2402 - ERROR_DEVICE_IN_USE syscall.Errno = 2404 - ERROR_UNKNOWN_PRINT_MONITOR syscall.Errno = 3000 - ERROR_PRINTER_DRIVER_IN_USE syscall.Errno = 3001 - ERROR_SPOOL_FILE_NOT_FOUND syscall.Errno = 3002 - ERROR_SPL_NO_STARTDOC syscall.Errno = 3003 - ERROR_SPL_NO_ADDJOB syscall.Errno = 3004 - ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED syscall.Errno = 3005 - ERROR_PRINT_MONITOR_ALREADY_INSTALLED syscall.Errno = 3006 - ERROR_INVALID_PRINT_MONITOR syscall.Errno = 3007 - ERROR_PRINT_MONITOR_IN_USE syscall.Errno = 3008 - ERROR_PRINTER_HAS_JOBS_QUEUED syscall.Errno = 3009 - ERROR_SUCCESS_REBOOT_REQUIRED syscall.Errno = 3010 - ERROR_SUCCESS_RESTART_REQUIRED syscall.Errno = 3011 - ERROR_PRINTER_NOT_FOUND syscall.Errno = 3012 - ERROR_PRINTER_DRIVER_WARNED syscall.Errno = 3013 - ERROR_PRINTER_DRIVER_BLOCKED syscall.Errno = 3014 - ERROR_PRINTER_DRIVER_PACKAGE_IN_USE syscall.Errno = 3015 - ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND syscall.Errno = 3016 - ERROR_FAIL_REBOOT_REQUIRED syscall.Errno = 3017 - ERROR_FAIL_REBOOT_INITIATED syscall.Errno = 3018 - ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED syscall.Errno = 3019 - ERROR_PRINT_JOB_RESTART_REQUIRED syscall.Errno = 3020 - ERROR_INVALID_PRINTER_DRIVER_MANIFEST syscall.Errno = 3021 - ERROR_PRINTER_NOT_SHAREABLE syscall.Errno = 3022 - ERROR_REQUEST_PAUSED syscall.Errno = 3050 - ERROR_APPEXEC_CONDITION_NOT_SATISFIED syscall.Errno = 3060 - ERROR_APPEXEC_HANDLE_INVALIDATED syscall.Errno = 3061 - ERROR_APPEXEC_INVALID_HOST_GENERATION syscall.Errno = 3062 - ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION syscall.Errno = 3063 - ERROR_APPEXEC_INVALID_HOST_STATE syscall.Errno = 3064 - ERROR_APPEXEC_NO_DONOR syscall.Errno = 3065 - ERROR_APPEXEC_HOST_ID_MISMATCH syscall.Errno = 3066 - ERROR_APPEXEC_UNKNOWN_USER syscall.Errno = 3067 - ERROR_IO_REISSUE_AS_CACHED syscall.Errno = 3950 - ERROR_WINS_INTERNAL syscall.Errno = 4000 - ERROR_CAN_NOT_DEL_LOCAL_WINS syscall.Errno = 4001 - ERROR_STATIC_INIT syscall.Errno = 4002 - ERROR_INC_BACKUP syscall.Errno = 4003 - ERROR_FULL_BACKUP syscall.Errno = 4004 - ERROR_REC_NON_EXISTENT syscall.Errno = 4005 - ERROR_RPL_NOT_ALLOWED syscall.Errno = 4006 - PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED syscall.Errno = 4050 - PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO syscall.Errno = 4051 - PEERDIST_ERROR_MISSING_DATA syscall.Errno = 4052 - PEERDIST_ERROR_NO_MORE syscall.Errno = 4053 - PEERDIST_ERROR_NOT_INITIALIZED syscall.Errno = 4054 - PEERDIST_ERROR_ALREADY_INITIALIZED syscall.Errno = 4055 - PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS syscall.Errno = 4056 - PEERDIST_ERROR_INVALIDATED syscall.Errno = 4057 - PEERDIST_ERROR_ALREADY_EXISTS syscall.Errno = 4058 - PEERDIST_ERROR_OPERATION_NOTFOUND syscall.Errno = 4059 - PEERDIST_ERROR_ALREADY_COMPLETED syscall.Errno = 4060 - PEERDIST_ERROR_OUT_OF_BOUNDS syscall.Errno = 4061 - PEERDIST_ERROR_VERSION_UNSUPPORTED syscall.Errno = 4062 - PEERDIST_ERROR_INVALID_CONFIGURATION syscall.Errno = 4063 - PEERDIST_ERROR_NOT_LICENSED syscall.Errno = 4064 - PEERDIST_ERROR_SERVICE_UNAVAILABLE syscall.Errno = 4065 - PEERDIST_ERROR_TRUST_FAILURE syscall.Errno = 4066 - ERROR_DHCP_ADDRESS_CONFLICT syscall.Errno = 4100 - ERROR_WMI_GUID_NOT_FOUND syscall.Errno = 4200 - ERROR_WMI_INSTANCE_NOT_FOUND syscall.Errno = 4201 - ERROR_WMI_ITEMID_NOT_FOUND syscall.Errno = 4202 - ERROR_WMI_TRY_AGAIN syscall.Errno = 4203 - ERROR_WMI_DP_NOT_FOUND syscall.Errno = 4204 - ERROR_WMI_UNRESOLVED_INSTANCE_REF syscall.Errno = 4205 - ERROR_WMI_ALREADY_ENABLED syscall.Errno = 4206 - ERROR_WMI_GUID_DISCONNECTED syscall.Errno = 4207 - ERROR_WMI_SERVER_UNAVAILABLE syscall.Errno = 4208 - ERROR_WMI_DP_FAILED syscall.Errno = 4209 - ERROR_WMI_INVALID_MOF syscall.Errno = 4210 - ERROR_WMI_INVALID_REGINFO syscall.Errno = 4211 - ERROR_WMI_ALREADY_DISABLED syscall.Errno = 4212 - ERROR_WMI_READ_ONLY syscall.Errno = 4213 - ERROR_WMI_SET_FAILURE syscall.Errno = 4214 - ERROR_NOT_APPCONTAINER syscall.Errno = 4250 - ERROR_APPCONTAINER_REQUIRED syscall.Errno = 4251 - ERROR_NOT_SUPPORTED_IN_APPCONTAINER syscall.Errno = 4252 - ERROR_INVALID_PACKAGE_SID_LENGTH syscall.Errno = 4253 - ERROR_INVALID_MEDIA syscall.Errno = 4300 - ERROR_INVALID_LIBRARY syscall.Errno = 4301 - ERROR_INVALID_MEDIA_POOL syscall.Errno = 4302 - ERROR_DRIVE_MEDIA_MISMATCH syscall.Errno = 4303 - ERROR_MEDIA_OFFLINE syscall.Errno = 4304 - ERROR_LIBRARY_OFFLINE syscall.Errno = 4305 - ERROR_EMPTY syscall.Errno = 4306 - ERROR_NOT_EMPTY syscall.Errno = 4307 - ERROR_MEDIA_UNAVAILABLE syscall.Errno = 4308 - ERROR_RESOURCE_DISABLED syscall.Errno = 4309 - ERROR_INVALID_CLEANER syscall.Errno = 4310 - ERROR_UNABLE_TO_CLEAN syscall.Errno = 4311 - ERROR_OBJECT_NOT_FOUND syscall.Errno = 4312 - ERROR_DATABASE_FAILURE syscall.Errno = 4313 - ERROR_DATABASE_FULL syscall.Errno = 4314 - ERROR_MEDIA_INCOMPATIBLE syscall.Errno = 4315 - ERROR_RESOURCE_NOT_PRESENT syscall.Errno = 4316 - ERROR_INVALID_OPERATION syscall.Errno = 4317 - ERROR_MEDIA_NOT_AVAILABLE syscall.Errno = 4318 - ERROR_DEVICE_NOT_AVAILABLE syscall.Errno = 4319 - ERROR_REQUEST_REFUSED syscall.Errno = 4320 - ERROR_INVALID_DRIVE_OBJECT syscall.Errno = 4321 - ERROR_LIBRARY_FULL syscall.Errno = 4322 - ERROR_MEDIUM_NOT_ACCESSIBLE syscall.Errno = 4323 - ERROR_UNABLE_TO_LOAD_MEDIUM syscall.Errno = 4324 - ERROR_UNABLE_TO_INVENTORY_DRIVE syscall.Errno = 4325 - ERROR_UNABLE_TO_INVENTORY_SLOT syscall.Errno = 4326 - ERROR_UNABLE_TO_INVENTORY_TRANSPORT syscall.Errno = 4327 - ERROR_TRANSPORT_FULL syscall.Errno = 4328 - ERROR_CONTROLLING_IEPORT syscall.Errno = 4329 - ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA syscall.Errno = 4330 - ERROR_CLEANER_SLOT_SET syscall.Errno = 4331 - ERROR_CLEANER_SLOT_NOT_SET syscall.Errno = 4332 - ERROR_CLEANER_CARTRIDGE_SPENT syscall.Errno = 4333 - ERROR_UNEXPECTED_OMID syscall.Errno = 4334 - ERROR_CANT_DELETE_LAST_ITEM syscall.Errno = 4335 - ERROR_MESSAGE_EXCEEDS_MAX_SIZE syscall.Errno = 4336 - ERROR_VOLUME_CONTAINS_SYS_FILES syscall.Errno = 4337 - ERROR_INDIGENOUS_TYPE syscall.Errno = 4338 - ERROR_NO_SUPPORTING_DRIVES syscall.Errno = 4339 - ERROR_CLEANER_CARTRIDGE_INSTALLED syscall.Errno = 4340 - ERROR_IEPORT_FULL syscall.Errno = 4341 - ERROR_FILE_OFFLINE syscall.Errno = 4350 - ERROR_REMOTE_STORAGE_NOT_ACTIVE syscall.Errno = 4351 - ERROR_REMOTE_STORAGE_MEDIA_ERROR syscall.Errno = 4352 - ERROR_NOT_A_REPARSE_POINT syscall.Errno = 4390 - ERROR_REPARSE_ATTRIBUTE_CONFLICT syscall.Errno = 4391 - ERROR_INVALID_REPARSE_DATA syscall.Errno = 4392 - ERROR_REPARSE_TAG_INVALID syscall.Errno = 4393 - ERROR_REPARSE_TAG_MISMATCH syscall.Errno = 4394 - ERROR_REPARSE_POINT_ENCOUNTERED syscall.Errno = 4395 - ERROR_APP_DATA_NOT_FOUND syscall.Errno = 4400 - ERROR_APP_DATA_EXPIRED syscall.Errno = 4401 - ERROR_APP_DATA_CORRUPT syscall.Errno = 4402 - ERROR_APP_DATA_LIMIT_EXCEEDED syscall.Errno = 4403 - ERROR_APP_DATA_REBOOT_REQUIRED syscall.Errno = 4404 - ERROR_SECUREBOOT_ROLLBACK_DETECTED syscall.Errno = 4420 - ERROR_SECUREBOOT_POLICY_VIOLATION syscall.Errno = 4421 - ERROR_SECUREBOOT_INVALID_POLICY syscall.Errno = 4422 - ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND syscall.Errno = 4423 - ERROR_SECUREBOOT_POLICY_NOT_SIGNED syscall.Errno = 4424 - ERROR_SECUREBOOT_NOT_ENABLED syscall.Errno = 4425 - ERROR_SECUREBOOT_FILE_REPLACED syscall.Errno = 4426 - ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED syscall.Errno = 4427 - ERROR_SECUREBOOT_POLICY_UNKNOWN syscall.Errno = 4428 - ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION syscall.Errno = 4429 - ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH syscall.Errno = 4430 - ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED syscall.Errno = 4431 - ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH syscall.Errno = 4432 - ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING syscall.Errno = 4433 - ERROR_SECUREBOOT_NOT_BASE_POLICY syscall.Errno = 4434 - ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY syscall.Errno = 4435 - ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED syscall.Errno = 4440 - ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED syscall.Errno = 4441 - ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED syscall.Errno = 4442 - ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED syscall.Errno = 4443 - ERROR_ALREADY_HAS_STREAM_ID syscall.Errno = 4444 - ERROR_SMR_GARBAGE_COLLECTION_REQUIRED syscall.Errno = 4445 - ERROR_WOF_WIM_HEADER_CORRUPT syscall.Errno = 4446 - ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT syscall.Errno = 4447 - ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT syscall.Errno = 4448 - ERROR_VOLUME_NOT_SIS_ENABLED syscall.Errno = 4500 - ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED syscall.Errno = 4550 - ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION syscall.Errno = 4551 - ERROR_SYSTEM_INTEGRITY_INVALID_POLICY syscall.Errno = 4552 - ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED syscall.Errno = 4553 - ERROR_VSM_NOT_INITIALIZED syscall.Errno = 4560 - ERROR_VSM_DMA_PROTECTION_NOT_IN_USE syscall.Errno = 4561 - ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED syscall.Errno = 4570 - ERROR_PLATFORM_MANIFEST_INVALID syscall.Errno = 4571 - ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED syscall.Errno = 4572 - ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED syscall.Errno = 4573 - ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND syscall.Errno = 4574 - ERROR_PLATFORM_MANIFEST_NOT_ACTIVE syscall.Errno = 4575 - ERROR_PLATFORM_MANIFEST_NOT_SIGNED syscall.Errno = 4576 - ERROR_DEPENDENT_RESOURCE_EXISTS syscall.Errno = 5001 - ERROR_DEPENDENCY_NOT_FOUND syscall.Errno = 5002 - ERROR_DEPENDENCY_ALREADY_EXISTS syscall.Errno = 5003 - ERROR_RESOURCE_NOT_ONLINE syscall.Errno = 5004 - ERROR_HOST_NODE_NOT_AVAILABLE syscall.Errno = 5005 - ERROR_RESOURCE_NOT_AVAILABLE syscall.Errno = 5006 - ERROR_RESOURCE_NOT_FOUND syscall.Errno = 5007 - ERROR_SHUTDOWN_CLUSTER syscall.Errno = 5008 - ERROR_CANT_EVICT_ACTIVE_NODE syscall.Errno = 5009 - ERROR_OBJECT_ALREADY_EXISTS syscall.Errno = 5010 - ERROR_OBJECT_IN_LIST syscall.Errno = 5011 - ERROR_GROUP_NOT_AVAILABLE syscall.Errno = 5012 - ERROR_GROUP_NOT_FOUND syscall.Errno = 5013 - ERROR_GROUP_NOT_ONLINE syscall.Errno = 5014 - ERROR_HOST_NODE_NOT_RESOURCE_OWNER syscall.Errno = 5015 - ERROR_HOST_NODE_NOT_GROUP_OWNER syscall.Errno = 5016 - ERROR_RESMON_CREATE_FAILED syscall.Errno = 5017 - ERROR_RESMON_ONLINE_FAILED syscall.Errno = 5018 - ERROR_RESOURCE_ONLINE syscall.Errno = 5019 - ERROR_QUORUM_RESOURCE syscall.Errno = 5020 - ERROR_NOT_QUORUM_CAPABLE syscall.Errno = 5021 - ERROR_CLUSTER_SHUTTING_DOWN syscall.Errno = 5022 - ERROR_INVALID_STATE syscall.Errno = 5023 - ERROR_RESOURCE_PROPERTIES_STORED syscall.Errno = 5024 - ERROR_NOT_QUORUM_CLASS syscall.Errno = 5025 - ERROR_CORE_RESOURCE syscall.Errno = 5026 - ERROR_QUORUM_RESOURCE_ONLINE_FAILED syscall.Errno = 5027 - ERROR_QUORUMLOG_OPEN_FAILED syscall.Errno = 5028 - ERROR_CLUSTERLOG_CORRUPT syscall.Errno = 5029 - ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE syscall.Errno = 5030 - ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE syscall.Errno = 5031 - ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND syscall.Errno = 5032 - ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE syscall.Errno = 5033 - ERROR_QUORUM_OWNER_ALIVE syscall.Errno = 5034 - ERROR_NETWORK_NOT_AVAILABLE syscall.Errno = 5035 - ERROR_NODE_NOT_AVAILABLE syscall.Errno = 5036 - ERROR_ALL_NODES_NOT_AVAILABLE syscall.Errno = 5037 - ERROR_RESOURCE_FAILED syscall.Errno = 5038 - ERROR_CLUSTER_INVALID_NODE syscall.Errno = 5039 - ERROR_CLUSTER_NODE_EXISTS syscall.Errno = 5040 - ERROR_CLUSTER_JOIN_IN_PROGRESS syscall.Errno = 5041 - ERROR_CLUSTER_NODE_NOT_FOUND syscall.Errno = 5042 - ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND syscall.Errno = 5043 - ERROR_CLUSTER_NETWORK_EXISTS syscall.Errno = 5044 - ERROR_CLUSTER_NETWORK_NOT_FOUND syscall.Errno = 5045 - ERROR_CLUSTER_NETINTERFACE_EXISTS syscall.Errno = 5046 - ERROR_CLUSTER_NETINTERFACE_NOT_FOUND syscall.Errno = 5047 - ERROR_CLUSTER_INVALID_REQUEST syscall.Errno = 5048 - ERROR_CLUSTER_INVALID_NETWORK_PROVIDER syscall.Errno = 5049 - ERROR_CLUSTER_NODE_DOWN syscall.Errno = 5050 - ERROR_CLUSTER_NODE_UNREACHABLE syscall.Errno = 5051 - ERROR_CLUSTER_NODE_NOT_MEMBER syscall.Errno = 5052 - ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS syscall.Errno = 5053 - ERROR_CLUSTER_INVALID_NETWORK syscall.Errno = 5054 - ERROR_CLUSTER_NODE_UP syscall.Errno = 5056 - ERROR_CLUSTER_IPADDR_IN_USE syscall.Errno = 5057 - ERROR_CLUSTER_NODE_NOT_PAUSED syscall.Errno = 5058 - ERROR_CLUSTER_NO_SECURITY_CONTEXT syscall.Errno = 5059 - ERROR_CLUSTER_NETWORK_NOT_INTERNAL syscall.Errno = 5060 - ERROR_CLUSTER_NODE_ALREADY_UP syscall.Errno = 5061 - ERROR_CLUSTER_NODE_ALREADY_DOWN syscall.Errno = 5062 - ERROR_CLUSTER_NETWORK_ALREADY_ONLINE syscall.Errno = 5063 - ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE syscall.Errno = 5064 - ERROR_CLUSTER_NODE_ALREADY_MEMBER syscall.Errno = 5065 - ERROR_CLUSTER_LAST_INTERNAL_NETWORK syscall.Errno = 5066 - ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS syscall.Errno = 5067 - ERROR_INVALID_OPERATION_ON_QUORUM syscall.Errno = 5068 - ERROR_DEPENDENCY_NOT_ALLOWED syscall.Errno = 5069 - ERROR_CLUSTER_NODE_PAUSED syscall.Errno = 5070 - ERROR_NODE_CANT_HOST_RESOURCE syscall.Errno = 5071 - ERROR_CLUSTER_NODE_NOT_READY syscall.Errno = 5072 - ERROR_CLUSTER_NODE_SHUTTING_DOWN syscall.Errno = 5073 - ERROR_CLUSTER_JOIN_ABORTED syscall.Errno = 5074 - ERROR_CLUSTER_INCOMPATIBLE_VERSIONS syscall.Errno = 5075 - ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED syscall.Errno = 5076 - ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED syscall.Errno = 5077 - ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND syscall.Errno = 5078 - ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED syscall.Errno = 5079 - ERROR_CLUSTER_RESNAME_NOT_FOUND syscall.Errno = 5080 - ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED syscall.Errno = 5081 - ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST syscall.Errno = 5082 - ERROR_CLUSTER_DATABASE_SEQMISMATCH syscall.Errno = 5083 - ERROR_RESMON_INVALID_STATE syscall.Errno = 5084 - ERROR_CLUSTER_GUM_NOT_LOCKER syscall.Errno = 5085 - ERROR_QUORUM_DISK_NOT_FOUND syscall.Errno = 5086 - ERROR_DATABASE_BACKUP_CORRUPT syscall.Errno = 5087 - ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT syscall.Errno = 5088 - ERROR_RESOURCE_PROPERTY_UNCHANGEABLE syscall.Errno = 5089 - ERROR_NO_ADMIN_ACCESS_POINT syscall.Errno = 5090 - ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE syscall.Errno = 5890 - ERROR_CLUSTER_QUORUMLOG_NOT_FOUND syscall.Errno = 5891 - ERROR_CLUSTER_MEMBERSHIP_HALT syscall.Errno = 5892 - ERROR_CLUSTER_INSTANCE_ID_MISMATCH syscall.Errno = 5893 - ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP syscall.Errno = 5894 - ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH syscall.Errno = 5895 - ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP syscall.Errno = 5896 - ERROR_CLUSTER_PARAMETER_MISMATCH syscall.Errno = 5897 - ERROR_NODE_CANNOT_BE_CLUSTERED syscall.Errno = 5898 - ERROR_CLUSTER_WRONG_OS_VERSION syscall.Errno = 5899 - ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME syscall.Errno = 5900 - ERROR_CLUSCFG_ALREADY_COMMITTED syscall.Errno = 5901 - ERROR_CLUSCFG_ROLLBACK_FAILED syscall.Errno = 5902 - ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT syscall.Errno = 5903 - ERROR_CLUSTER_OLD_VERSION syscall.Errno = 5904 - ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME syscall.Errno = 5905 - ERROR_CLUSTER_NO_NET_ADAPTERS syscall.Errno = 5906 - ERROR_CLUSTER_POISONED syscall.Errno = 5907 - ERROR_CLUSTER_GROUP_MOVING syscall.Errno = 5908 - ERROR_CLUSTER_RESOURCE_TYPE_BUSY syscall.Errno = 5909 - ERROR_RESOURCE_CALL_TIMED_OUT syscall.Errno = 5910 - ERROR_INVALID_CLUSTER_IPV6_ADDRESS syscall.Errno = 5911 - ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION syscall.Errno = 5912 - ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS syscall.Errno = 5913 - ERROR_CLUSTER_PARTIAL_SEND syscall.Errno = 5914 - ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION syscall.Errno = 5915 - ERROR_CLUSTER_INVALID_STRING_TERMINATION syscall.Errno = 5916 - ERROR_CLUSTER_INVALID_STRING_FORMAT syscall.Errno = 5917 - ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS syscall.Errno = 5918 - ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS syscall.Errno = 5919 - ERROR_CLUSTER_NULL_DATA syscall.Errno = 5920 - ERROR_CLUSTER_PARTIAL_READ syscall.Errno = 5921 - ERROR_CLUSTER_PARTIAL_WRITE syscall.Errno = 5922 - ERROR_CLUSTER_CANT_DESERIALIZE_DATA syscall.Errno = 5923 - ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT syscall.Errno = 5924 - ERROR_CLUSTER_NO_QUORUM syscall.Errno = 5925 - ERROR_CLUSTER_INVALID_IPV6_NETWORK syscall.Errno = 5926 - ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK syscall.Errno = 5927 - ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP syscall.Errno = 5928 - ERROR_DEPENDENCY_TREE_TOO_COMPLEX syscall.Errno = 5929 - ERROR_EXCEPTION_IN_RESOURCE_CALL syscall.Errno = 5930 - ERROR_CLUSTER_RHS_FAILED_INITIALIZATION syscall.Errno = 5931 - ERROR_CLUSTER_NOT_INSTALLED syscall.Errno = 5932 - ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE syscall.Errno = 5933 - ERROR_CLUSTER_MAX_NODES_IN_CLUSTER syscall.Errno = 5934 - ERROR_CLUSTER_TOO_MANY_NODES syscall.Errno = 5935 - ERROR_CLUSTER_OBJECT_ALREADY_USED syscall.Errno = 5936 - ERROR_NONCORE_GROUPS_FOUND syscall.Errno = 5937 - ERROR_FILE_SHARE_RESOURCE_CONFLICT syscall.Errno = 5938 - ERROR_CLUSTER_EVICT_INVALID_REQUEST syscall.Errno = 5939 - ERROR_CLUSTER_SINGLETON_RESOURCE syscall.Errno = 5940 - ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE syscall.Errno = 5941 - ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED syscall.Errno = 5942 - ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR syscall.Errno = 5943 - ERROR_CLUSTER_GROUP_BUSY syscall.Errno = 5944 - ERROR_CLUSTER_NOT_SHARED_VOLUME syscall.Errno = 5945 - ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR syscall.Errno = 5946 - ERROR_CLUSTER_SHARED_VOLUMES_IN_USE syscall.Errno = 5947 - ERROR_CLUSTER_USE_SHARED_VOLUMES_API syscall.Errno = 5948 - ERROR_CLUSTER_BACKUP_IN_PROGRESS syscall.Errno = 5949 - ERROR_NON_CSV_PATH syscall.Errno = 5950 - ERROR_CSV_VOLUME_NOT_LOCAL syscall.Errno = 5951 - ERROR_CLUSTER_WATCHDOG_TERMINATING syscall.Errno = 5952 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES syscall.Errno = 5953 - ERROR_CLUSTER_INVALID_NODE_WEIGHT syscall.Errno = 5954 - ERROR_CLUSTER_RESOURCE_VETOED_CALL syscall.Errno = 5955 - ERROR_RESMON_SYSTEM_RESOURCES_LACKING syscall.Errno = 5956 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION syscall.Errno = 5957 - ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE syscall.Errno = 5958 - ERROR_CLUSTER_GROUP_QUEUED syscall.Errno = 5959 - ERROR_CLUSTER_RESOURCE_LOCKED_STATUS syscall.Errno = 5960 - ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED syscall.Errno = 5961 - ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS syscall.Errno = 5962 - ERROR_CLUSTER_DISK_NOT_CONNECTED syscall.Errno = 5963 - ERROR_DISK_NOT_CSV_CAPABLE syscall.Errno = 5964 - ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE syscall.Errno = 5965 - ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED syscall.Errno = 5966 - ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED syscall.Errno = 5967 - ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES syscall.Errno = 5968 - ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES syscall.Errno = 5969 - ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE syscall.Errno = 5970 - ERROR_CLUSTER_AFFINITY_CONFLICT syscall.Errno = 5971 - ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE syscall.Errno = 5972 - ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS syscall.Errno = 5973 - ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED syscall.Errno = 5974 - ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED syscall.Errno = 5975 - ERROR_CLUSTER_UPGRADE_IN_PROGRESS syscall.Errno = 5976 - ERROR_CLUSTER_UPGRADE_INCOMPLETE syscall.Errno = 5977 - ERROR_CLUSTER_NODE_IN_GRACE_PERIOD syscall.Errno = 5978 - ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT syscall.Errno = 5979 - ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER syscall.Errno = 5980 - ERROR_CLUSTER_RESOURCE_NOT_MONITORED syscall.Errno = 5981 - ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED syscall.Errno = 5982 - ERROR_CLUSTER_RESOURCE_IS_REPLICATED syscall.Errno = 5983 - ERROR_CLUSTER_NODE_ISOLATED syscall.Errno = 5984 - ERROR_CLUSTER_NODE_QUARANTINED syscall.Errno = 5985 - ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED syscall.Errno = 5986 - ERROR_CLUSTER_SPACE_DEGRADED syscall.Errno = 5987 - ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED syscall.Errno = 5988 - ERROR_CLUSTER_CSV_INVALID_HANDLE syscall.Errno = 5989 - ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR syscall.Errno = 5990 - ERROR_GROUPSET_NOT_AVAILABLE syscall.Errno = 5991 - ERROR_GROUPSET_NOT_FOUND syscall.Errno = 5992 - ERROR_GROUPSET_CANT_PROVIDE syscall.Errno = 5993 - ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND syscall.Errno = 5994 - ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY syscall.Errno = 5995 - ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION syscall.Errno = 5996 - ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS syscall.Errno = 5997 - ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME syscall.Errno = 5998 - ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE syscall.Errno = 5999 - ERROR_ENCRYPTION_FAILED syscall.Errno = 6000 - ERROR_DECRYPTION_FAILED syscall.Errno = 6001 - ERROR_FILE_ENCRYPTED syscall.Errno = 6002 - ERROR_NO_RECOVERY_POLICY syscall.Errno = 6003 - ERROR_NO_EFS syscall.Errno = 6004 - ERROR_WRONG_EFS syscall.Errno = 6005 - ERROR_NO_USER_KEYS syscall.Errno = 6006 - ERROR_FILE_NOT_ENCRYPTED syscall.Errno = 6007 - ERROR_NOT_EXPORT_FORMAT syscall.Errno = 6008 - ERROR_FILE_READ_ONLY syscall.Errno = 6009 - ERROR_DIR_EFS_DISALLOWED syscall.Errno = 6010 - ERROR_EFS_SERVER_NOT_TRUSTED syscall.Errno = 6011 - ERROR_BAD_RECOVERY_POLICY syscall.Errno = 6012 - ERROR_EFS_ALG_BLOB_TOO_BIG syscall.Errno = 6013 - ERROR_VOLUME_NOT_SUPPORT_EFS syscall.Errno = 6014 - ERROR_EFS_DISABLED syscall.Errno = 6015 - ERROR_EFS_VERSION_NOT_SUPPORT syscall.Errno = 6016 - ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE syscall.Errno = 6017 - ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER syscall.Errno = 6018 - ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE syscall.Errno = 6019 - ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE syscall.Errno = 6020 - ERROR_CS_ENCRYPTION_FILE_NOT_CSE syscall.Errno = 6021 - ERROR_ENCRYPTION_POLICY_DENIES_OPERATION syscall.Errno = 6022 - ERROR_NO_BROWSER_SERVERS_FOUND syscall.Errno = 6118 - SCHED_E_SERVICE_NOT_LOCALSYSTEM syscall.Errno = 6200 - ERROR_LOG_SECTOR_INVALID syscall.Errno = 6600 - ERROR_LOG_SECTOR_PARITY_INVALID syscall.Errno = 6601 - ERROR_LOG_SECTOR_REMAPPED syscall.Errno = 6602 - ERROR_LOG_BLOCK_INCOMPLETE syscall.Errno = 6603 - ERROR_LOG_INVALID_RANGE syscall.Errno = 6604 - ERROR_LOG_BLOCKS_EXHAUSTED syscall.Errno = 6605 - ERROR_LOG_READ_CONTEXT_INVALID syscall.Errno = 6606 - ERROR_LOG_RESTART_INVALID syscall.Errno = 6607 - ERROR_LOG_BLOCK_VERSION syscall.Errno = 6608 - ERROR_LOG_BLOCK_INVALID syscall.Errno = 6609 - ERROR_LOG_READ_MODE_INVALID syscall.Errno = 6610 - ERROR_LOG_NO_RESTART syscall.Errno = 6611 - ERROR_LOG_METADATA_CORRUPT syscall.Errno = 6612 - ERROR_LOG_METADATA_INVALID syscall.Errno = 6613 - ERROR_LOG_METADATA_INCONSISTENT syscall.Errno = 6614 - ERROR_LOG_RESERVATION_INVALID syscall.Errno = 6615 - ERROR_LOG_CANT_DELETE syscall.Errno = 6616 - ERROR_LOG_CONTAINER_LIMIT_EXCEEDED syscall.Errno = 6617 - ERROR_LOG_START_OF_LOG syscall.Errno = 6618 - ERROR_LOG_POLICY_ALREADY_INSTALLED syscall.Errno = 6619 - ERROR_LOG_POLICY_NOT_INSTALLED syscall.Errno = 6620 - ERROR_LOG_POLICY_INVALID syscall.Errno = 6621 - ERROR_LOG_POLICY_CONFLICT syscall.Errno = 6622 - ERROR_LOG_PINNED_ARCHIVE_TAIL syscall.Errno = 6623 - ERROR_LOG_RECORD_NONEXISTENT syscall.Errno = 6624 - ERROR_LOG_RECORDS_RESERVED_INVALID syscall.Errno = 6625 - ERROR_LOG_SPACE_RESERVED_INVALID syscall.Errno = 6626 - ERROR_LOG_TAIL_INVALID syscall.Errno = 6627 - ERROR_LOG_FULL syscall.Errno = 6628 - ERROR_COULD_NOT_RESIZE_LOG syscall.Errno = 6629 - ERROR_LOG_MULTIPLEXED syscall.Errno = 6630 - ERROR_LOG_DEDICATED syscall.Errno = 6631 - ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS syscall.Errno = 6632 - ERROR_LOG_ARCHIVE_IN_PROGRESS syscall.Errno = 6633 - ERROR_LOG_EPHEMERAL syscall.Errno = 6634 - ERROR_LOG_NOT_ENOUGH_CONTAINERS syscall.Errno = 6635 - ERROR_LOG_CLIENT_ALREADY_REGISTERED syscall.Errno = 6636 - ERROR_LOG_CLIENT_NOT_REGISTERED syscall.Errno = 6637 - ERROR_LOG_FULL_HANDLER_IN_PROGRESS syscall.Errno = 6638 - ERROR_LOG_CONTAINER_READ_FAILED syscall.Errno = 6639 - ERROR_LOG_CONTAINER_WRITE_FAILED syscall.Errno = 6640 - ERROR_LOG_CONTAINER_OPEN_FAILED syscall.Errno = 6641 - ERROR_LOG_CONTAINER_STATE_INVALID syscall.Errno = 6642 - ERROR_LOG_STATE_INVALID syscall.Errno = 6643 - ERROR_LOG_PINNED syscall.Errno = 6644 - ERROR_LOG_METADATA_FLUSH_FAILED syscall.Errno = 6645 - ERROR_LOG_INCONSISTENT_SECURITY syscall.Errno = 6646 - ERROR_LOG_APPENDED_FLUSH_FAILED syscall.Errno = 6647 - ERROR_LOG_PINNED_RESERVATION syscall.Errno = 6648 - ERROR_INVALID_TRANSACTION syscall.Errno = 6700 - ERROR_TRANSACTION_NOT_ACTIVE syscall.Errno = 6701 - ERROR_TRANSACTION_REQUEST_NOT_VALID syscall.Errno = 6702 - ERROR_TRANSACTION_NOT_REQUESTED syscall.Errno = 6703 - ERROR_TRANSACTION_ALREADY_ABORTED syscall.Errno = 6704 - ERROR_TRANSACTION_ALREADY_COMMITTED syscall.Errno = 6705 - ERROR_TM_INITIALIZATION_FAILED syscall.Errno = 6706 - ERROR_RESOURCEMANAGER_READ_ONLY syscall.Errno = 6707 - ERROR_TRANSACTION_NOT_JOINED syscall.Errno = 6708 - ERROR_TRANSACTION_SUPERIOR_EXISTS syscall.Errno = 6709 - ERROR_CRM_PROTOCOL_ALREADY_EXISTS syscall.Errno = 6710 - ERROR_TRANSACTION_PROPAGATION_FAILED syscall.Errno = 6711 - ERROR_CRM_PROTOCOL_NOT_FOUND syscall.Errno = 6712 - ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER syscall.Errno = 6713 - ERROR_CURRENT_TRANSACTION_NOT_VALID syscall.Errno = 6714 - ERROR_TRANSACTION_NOT_FOUND syscall.Errno = 6715 - ERROR_RESOURCEMANAGER_NOT_FOUND syscall.Errno = 6716 - ERROR_ENLISTMENT_NOT_FOUND syscall.Errno = 6717 - ERROR_TRANSACTIONMANAGER_NOT_FOUND syscall.Errno = 6718 - ERROR_TRANSACTIONMANAGER_NOT_ONLINE syscall.Errno = 6719 - ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION syscall.Errno = 6720 - ERROR_TRANSACTION_NOT_ROOT syscall.Errno = 6721 - ERROR_TRANSACTION_OBJECT_EXPIRED syscall.Errno = 6722 - ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED syscall.Errno = 6723 - ERROR_TRANSACTION_RECORD_TOO_LONG syscall.Errno = 6724 - ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED syscall.Errno = 6725 - ERROR_TRANSACTION_INTEGRITY_VIOLATED syscall.Errno = 6726 - ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH syscall.Errno = 6727 - ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT syscall.Errno = 6728 - ERROR_TRANSACTION_MUST_WRITETHROUGH syscall.Errno = 6729 - ERROR_TRANSACTION_NO_SUPERIOR syscall.Errno = 6730 - ERROR_HEURISTIC_DAMAGE_POSSIBLE syscall.Errno = 6731 - ERROR_TRANSACTIONAL_CONFLICT syscall.Errno = 6800 - ERROR_RM_NOT_ACTIVE syscall.Errno = 6801 - ERROR_RM_METADATA_CORRUPT syscall.Errno = 6802 - ERROR_DIRECTORY_NOT_RM syscall.Errno = 6803 - ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE syscall.Errno = 6805 - ERROR_LOG_RESIZE_INVALID_SIZE syscall.Errno = 6806 - ERROR_OBJECT_NO_LONGER_EXISTS syscall.Errno = 6807 - ERROR_STREAM_MINIVERSION_NOT_FOUND syscall.Errno = 6808 - ERROR_STREAM_MINIVERSION_NOT_VALID syscall.Errno = 6809 - ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION syscall.Errno = 6810 - ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT syscall.Errno = 6811 - ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS syscall.Errno = 6812 - ERROR_REMOTE_FILE_VERSION_MISMATCH syscall.Errno = 6814 - ERROR_HANDLE_NO_LONGER_VALID syscall.Errno = 6815 - ERROR_NO_TXF_METADATA syscall.Errno = 6816 - ERROR_LOG_CORRUPTION_DETECTED syscall.Errno = 6817 - ERROR_CANT_RECOVER_WITH_HANDLE_OPEN syscall.Errno = 6818 - ERROR_RM_DISCONNECTED syscall.Errno = 6819 - ERROR_ENLISTMENT_NOT_SUPERIOR syscall.Errno = 6820 - ERROR_RECOVERY_NOT_NEEDED syscall.Errno = 6821 - ERROR_RM_ALREADY_STARTED syscall.Errno = 6822 - ERROR_FILE_IDENTITY_NOT_PERSISTENT syscall.Errno = 6823 - ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY syscall.Errno = 6824 - ERROR_CANT_CROSS_RM_BOUNDARY syscall.Errno = 6825 - ERROR_TXF_DIR_NOT_EMPTY syscall.Errno = 6826 - ERROR_INDOUBT_TRANSACTIONS_EXIST syscall.Errno = 6827 - ERROR_TM_VOLATILE syscall.Errno = 6828 - ERROR_ROLLBACK_TIMER_EXPIRED syscall.Errno = 6829 - ERROR_TXF_ATTRIBUTE_CORRUPT syscall.Errno = 6830 - ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6831 - ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED syscall.Errno = 6832 - ERROR_LOG_GROWTH_FAILED syscall.Errno = 6833 - ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE syscall.Errno = 6834 - ERROR_TXF_METADATA_ALREADY_PRESENT syscall.Errno = 6835 - ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET syscall.Errno = 6836 - ERROR_TRANSACTION_REQUIRED_PROMOTION syscall.Errno = 6837 - ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION syscall.Errno = 6838 - ERROR_TRANSACTIONS_NOT_FROZEN syscall.Errno = 6839 - ERROR_TRANSACTION_FREEZE_IN_PROGRESS syscall.Errno = 6840 - ERROR_NOT_SNAPSHOT_VOLUME syscall.Errno = 6841 - ERROR_NO_SAVEPOINT_WITH_OPEN_FILES syscall.Errno = 6842 - ERROR_DATA_LOST_REPAIR syscall.Errno = 6843 - ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6844 - ERROR_TM_IDENTITY_MISMATCH syscall.Errno = 6845 - ERROR_FLOATED_SECTION syscall.Errno = 6846 - ERROR_CANNOT_ACCEPT_TRANSACTED_WORK syscall.Errno = 6847 - ERROR_CANNOT_ABORT_TRANSACTIONS syscall.Errno = 6848 - ERROR_BAD_CLUSTERS syscall.Errno = 6849 - ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION syscall.Errno = 6850 - ERROR_VOLUME_DIRTY syscall.Errno = 6851 - ERROR_NO_LINK_TRACKING_IN_TRANSACTION syscall.Errno = 6852 - ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION syscall.Errno = 6853 - ERROR_EXPIRED_HANDLE syscall.Errno = 6854 - ERROR_TRANSACTION_NOT_ENLISTED syscall.Errno = 6855 - ERROR_CTX_WINSTATION_NAME_INVALID syscall.Errno = 7001 - ERROR_CTX_INVALID_PD syscall.Errno = 7002 - ERROR_CTX_PD_NOT_FOUND syscall.Errno = 7003 - ERROR_CTX_WD_NOT_FOUND syscall.Errno = 7004 - ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY syscall.Errno = 7005 - ERROR_CTX_SERVICE_NAME_COLLISION syscall.Errno = 7006 - ERROR_CTX_CLOSE_PENDING syscall.Errno = 7007 - ERROR_CTX_NO_OUTBUF syscall.Errno = 7008 - ERROR_CTX_MODEM_INF_NOT_FOUND syscall.Errno = 7009 - ERROR_CTX_INVALID_MODEMNAME syscall.Errno = 7010 - ERROR_CTX_MODEM_RESPONSE_ERROR syscall.Errno = 7011 - ERROR_CTX_MODEM_RESPONSE_TIMEOUT syscall.Errno = 7012 - ERROR_CTX_MODEM_RESPONSE_NO_CARRIER syscall.Errno = 7013 - ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE syscall.Errno = 7014 - ERROR_CTX_MODEM_RESPONSE_BUSY syscall.Errno = 7015 - ERROR_CTX_MODEM_RESPONSE_VOICE syscall.Errno = 7016 - ERROR_CTX_TD_ERROR syscall.Errno = 7017 - ERROR_CTX_WINSTATION_NOT_FOUND syscall.Errno = 7022 - ERROR_CTX_WINSTATION_ALREADY_EXISTS syscall.Errno = 7023 - ERROR_CTX_WINSTATION_BUSY syscall.Errno = 7024 - ERROR_CTX_BAD_VIDEO_MODE syscall.Errno = 7025 - ERROR_CTX_GRAPHICS_INVALID syscall.Errno = 7035 - ERROR_CTX_LOGON_DISABLED syscall.Errno = 7037 - ERROR_CTX_NOT_CONSOLE syscall.Errno = 7038 - ERROR_CTX_CLIENT_QUERY_TIMEOUT syscall.Errno = 7040 - ERROR_CTX_CONSOLE_DISCONNECT syscall.Errno = 7041 - ERROR_CTX_CONSOLE_CONNECT syscall.Errno = 7042 - ERROR_CTX_SHADOW_DENIED syscall.Errno = 7044 - ERROR_CTX_WINSTATION_ACCESS_DENIED syscall.Errno = 7045 - ERROR_CTX_INVALID_WD syscall.Errno = 7049 - ERROR_CTX_SHADOW_INVALID syscall.Errno = 7050 - ERROR_CTX_SHADOW_DISABLED syscall.Errno = 7051 - ERROR_CTX_CLIENT_LICENSE_IN_USE syscall.Errno = 7052 - ERROR_CTX_CLIENT_LICENSE_NOT_SET syscall.Errno = 7053 - ERROR_CTX_LICENSE_NOT_AVAILABLE syscall.Errno = 7054 - ERROR_CTX_LICENSE_CLIENT_INVALID syscall.Errno = 7055 - ERROR_CTX_LICENSE_EXPIRED syscall.Errno = 7056 - ERROR_CTX_SHADOW_NOT_RUNNING syscall.Errno = 7057 - ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE syscall.Errno = 7058 - ERROR_ACTIVATION_COUNT_EXCEEDED syscall.Errno = 7059 - ERROR_CTX_WINSTATIONS_DISABLED syscall.Errno = 7060 - ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED syscall.Errno = 7061 - ERROR_CTX_SESSION_IN_USE syscall.Errno = 7062 - ERROR_CTX_NO_FORCE_LOGOFF syscall.Errno = 7063 - ERROR_CTX_ACCOUNT_RESTRICTION syscall.Errno = 7064 - ERROR_RDP_PROTOCOL_ERROR syscall.Errno = 7065 - ERROR_CTX_CDM_CONNECT syscall.Errno = 7066 - ERROR_CTX_CDM_DISCONNECT syscall.Errno = 7067 - ERROR_CTX_SECURITY_LAYER_ERROR syscall.Errno = 7068 - ERROR_TS_INCOMPATIBLE_SESSIONS syscall.Errno = 7069 - ERROR_TS_VIDEO_SUBSYSTEM_ERROR syscall.Errno = 7070 - FRS_ERR_INVALID_API_SEQUENCE syscall.Errno = 8001 - FRS_ERR_STARTING_SERVICE syscall.Errno = 8002 - FRS_ERR_STOPPING_SERVICE syscall.Errno = 8003 - FRS_ERR_INTERNAL_API syscall.Errno = 8004 - FRS_ERR_INTERNAL syscall.Errno = 8005 - FRS_ERR_SERVICE_COMM syscall.Errno = 8006 - FRS_ERR_INSUFFICIENT_PRIV syscall.Errno = 8007 - FRS_ERR_AUTHENTICATION syscall.Errno = 8008 - FRS_ERR_PARENT_INSUFFICIENT_PRIV syscall.Errno = 8009 - FRS_ERR_PARENT_AUTHENTICATION syscall.Errno = 8010 - FRS_ERR_CHILD_TO_PARENT_COMM syscall.Errno = 8011 - FRS_ERR_PARENT_TO_CHILD_COMM syscall.Errno = 8012 - FRS_ERR_SYSVOL_POPULATE syscall.Errno = 8013 - FRS_ERR_SYSVOL_POPULATE_TIMEOUT syscall.Errno = 8014 - FRS_ERR_SYSVOL_IS_BUSY syscall.Errno = 8015 - FRS_ERR_SYSVOL_DEMOTE syscall.Errno = 8016 - FRS_ERR_INVALID_SERVICE_PARAMETER syscall.Errno = 8017 - DS_S_SUCCESS = ERROR_SUCCESS - ERROR_DS_NOT_INSTALLED syscall.Errno = 8200 - ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY syscall.Errno = 8201 - ERROR_DS_NO_ATTRIBUTE_OR_VALUE syscall.Errno = 8202 - ERROR_DS_INVALID_ATTRIBUTE_SYNTAX syscall.Errno = 8203 - ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED syscall.Errno = 8204 - ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS syscall.Errno = 8205 - ERROR_DS_BUSY syscall.Errno = 8206 - ERROR_DS_UNAVAILABLE syscall.Errno = 8207 - ERROR_DS_NO_RIDS_ALLOCATED syscall.Errno = 8208 - ERROR_DS_NO_MORE_RIDS syscall.Errno = 8209 - ERROR_DS_INCORRECT_ROLE_OWNER syscall.Errno = 8210 - ERROR_DS_RIDMGR_INIT_ERROR syscall.Errno = 8211 - ERROR_DS_OBJ_CLASS_VIOLATION syscall.Errno = 8212 - ERROR_DS_CANT_ON_NON_LEAF syscall.Errno = 8213 - ERROR_DS_CANT_ON_RDN syscall.Errno = 8214 - ERROR_DS_CANT_MOD_OBJ_CLASS syscall.Errno = 8215 - ERROR_DS_CROSS_DOM_MOVE_ERROR syscall.Errno = 8216 - ERROR_DS_GC_NOT_AVAILABLE syscall.Errno = 8217 - ERROR_SHARED_POLICY syscall.Errno = 8218 - ERROR_POLICY_OBJECT_NOT_FOUND syscall.Errno = 8219 - ERROR_POLICY_ONLY_IN_DS syscall.Errno = 8220 - ERROR_PROMOTION_ACTIVE syscall.Errno = 8221 - ERROR_NO_PROMOTION_ACTIVE syscall.Errno = 8222 - ERROR_DS_OPERATIONS_ERROR syscall.Errno = 8224 - ERROR_DS_PROTOCOL_ERROR syscall.Errno = 8225 - ERROR_DS_TIMELIMIT_EXCEEDED syscall.Errno = 8226 - ERROR_DS_SIZELIMIT_EXCEEDED syscall.Errno = 8227 - ERROR_DS_ADMIN_LIMIT_EXCEEDED syscall.Errno = 8228 - ERROR_DS_COMPARE_FALSE syscall.Errno = 8229 - ERROR_DS_COMPARE_TRUE syscall.Errno = 8230 - ERROR_DS_AUTH_METHOD_NOT_SUPPORTED syscall.Errno = 8231 - ERROR_DS_STRONG_AUTH_REQUIRED syscall.Errno = 8232 - ERROR_DS_INAPPROPRIATE_AUTH syscall.Errno = 8233 - ERROR_DS_AUTH_UNKNOWN syscall.Errno = 8234 - ERROR_DS_REFERRAL syscall.Errno = 8235 - ERROR_DS_UNAVAILABLE_CRIT_EXTENSION syscall.Errno = 8236 - ERROR_DS_CONFIDENTIALITY_REQUIRED syscall.Errno = 8237 - ERROR_DS_INAPPROPRIATE_MATCHING syscall.Errno = 8238 - ERROR_DS_CONSTRAINT_VIOLATION syscall.Errno = 8239 - ERROR_DS_NO_SUCH_OBJECT syscall.Errno = 8240 - ERROR_DS_ALIAS_PROBLEM syscall.Errno = 8241 - ERROR_DS_INVALID_DN_SYNTAX syscall.Errno = 8242 - ERROR_DS_IS_LEAF syscall.Errno = 8243 - ERROR_DS_ALIAS_DEREF_PROBLEM syscall.Errno = 8244 - ERROR_DS_UNWILLING_TO_PERFORM syscall.Errno = 8245 - ERROR_DS_LOOP_DETECT syscall.Errno = 8246 - ERROR_DS_NAMING_VIOLATION syscall.Errno = 8247 - ERROR_DS_OBJECT_RESULTS_TOO_LARGE syscall.Errno = 8248 - ERROR_DS_AFFECTS_MULTIPLE_DSAS syscall.Errno = 8249 - ERROR_DS_SERVER_DOWN syscall.Errno = 8250 - ERROR_DS_LOCAL_ERROR syscall.Errno = 8251 - ERROR_DS_ENCODING_ERROR syscall.Errno = 8252 - ERROR_DS_DECODING_ERROR syscall.Errno = 8253 - ERROR_DS_FILTER_UNKNOWN syscall.Errno = 8254 - ERROR_DS_PARAM_ERROR syscall.Errno = 8255 - ERROR_DS_NOT_SUPPORTED syscall.Errno = 8256 - ERROR_DS_NO_RESULTS_RETURNED syscall.Errno = 8257 - ERROR_DS_CONTROL_NOT_FOUND syscall.Errno = 8258 - ERROR_DS_CLIENT_LOOP syscall.Errno = 8259 - ERROR_DS_REFERRAL_LIMIT_EXCEEDED syscall.Errno = 8260 - ERROR_DS_SORT_CONTROL_MISSING syscall.Errno = 8261 - ERROR_DS_OFFSET_RANGE_ERROR syscall.Errno = 8262 - ERROR_DS_RIDMGR_DISABLED syscall.Errno = 8263 - ERROR_DS_ROOT_MUST_BE_NC syscall.Errno = 8301 - ERROR_DS_ADD_REPLICA_INHIBITED syscall.Errno = 8302 - ERROR_DS_ATT_NOT_DEF_IN_SCHEMA syscall.Errno = 8303 - ERROR_DS_MAX_OBJ_SIZE_EXCEEDED syscall.Errno = 8304 - ERROR_DS_OBJ_STRING_NAME_EXISTS syscall.Errno = 8305 - ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA syscall.Errno = 8306 - ERROR_DS_RDN_DOESNT_MATCH_SCHEMA syscall.Errno = 8307 - ERROR_DS_NO_REQUESTED_ATTS_FOUND syscall.Errno = 8308 - ERROR_DS_USER_BUFFER_TO_SMALL syscall.Errno = 8309 - ERROR_DS_ATT_IS_NOT_ON_OBJ syscall.Errno = 8310 - ERROR_DS_ILLEGAL_MOD_OPERATION syscall.Errno = 8311 - ERROR_DS_OBJ_TOO_LARGE syscall.Errno = 8312 - ERROR_DS_BAD_INSTANCE_TYPE syscall.Errno = 8313 - ERROR_DS_MASTERDSA_REQUIRED syscall.Errno = 8314 - ERROR_DS_OBJECT_CLASS_REQUIRED syscall.Errno = 8315 - ERROR_DS_MISSING_REQUIRED_ATT syscall.Errno = 8316 - ERROR_DS_ATT_NOT_DEF_FOR_CLASS syscall.Errno = 8317 - ERROR_DS_ATT_ALREADY_EXISTS syscall.Errno = 8318 - ERROR_DS_CANT_ADD_ATT_VALUES syscall.Errno = 8320 - ERROR_DS_SINGLE_VALUE_CONSTRAINT syscall.Errno = 8321 - ERROR_DS_RANGE_CONSTRAINT syscall.Errno = 8322 - ERROR_DS_ATT_VAL_ALREADY_EXISTS syscall.Errno = 8323 - ERROR_DS_CANT_REM_MISSING_ATT syscall.Errno = 8324 - ERROR_DS_CANT_REM_MISSING_ATT_VAL syscall.Errno = 8325 - ERROR_DS_ROOT_CANT_BE_SUBREF syscall.Errno = 8326 - ERROR_DS_NO_CHAINING syscall.Errno = 8327 - ERROR_DS_NO_CHAINED_EVAL syscall.Errno = 8328 - ERROR_DS_NO_PARENT_OBJECT syscall.Errno = 8329 - ERROR_DS_PARENT_IS_AN_ALIAS syscall.Errno = 8330 - ERROR_DS_CANT_MIX_MASTER_AND_REPS syscall.Errno = 8331 - ERROR_DS_CHILDREN_EXIST syscall.Errno = 8332 - ERROR_DS_OBJ_NOT_FOUND syscall.Errno = 8333 - ERROR_DS_ALIASED_OBJ_MISSING syscall.Errno = 8334 - ERROR_DS_BAD_NAME_SYNTAX syscall.Errno = 8335 - ERROR_DS_ALIAS_POINTS_TO_ALIAS syscall.Errno = 8336 - ERROR_DS_CANT_DEREF_ALIAS syscall.Errno = 8337 - ERROR_DS_OUT_OF_SCOPE syscall.Errno = 8338 - ERROR_DS_OBJECT_BEING_REMOVED syscall.Errno = 8339 - ERROR_DS_CANT_DELETE_DSA_OBJ syscall.Errno = 8340 - ERROR_DS_GENERIC_ERROR syscall.Errno = 8341 - ERROR_DS_DSA_MUST_BE_INT_MASTER syscall.Errno = 8342 - ERROR_DS_CLASS_NOT_DSA syscall.Errno = 8343 - ERROR_DS_INSUFF_ACCESS_RIGHTS syscall.Errno = 8344 - ERROR_DS_ILLEGAL_SUPERIOR syscall.Errno = 8345 - ERROR_DS_ATTRIBUTE_OWNED_BY_SAM syscall.Errno = 8346 - ERROR_DS_NAME_TOO_MANY_PARTS syscall.Errno = 8347 - ERROR_DS_NAME_TOO_LONG syscall.Errno = 8348 - ERROR_DS_NAME_VALUE_TOO_LONG syscall.Errno = 8349 - ERROR_DS_NAME_UNPARSEABLE syscall.Errno = 8350 - ERROR_DS_NAME_TYPE_UNKNOWN syscall.Errno = 8351 - ERROR_DS_NOT_AN_OBJECT syscall.Errno = 8352 - ERROR_DS_SEC_DESC_TOO_SHORT syscall.Errno = 8353 - ERROR_DS_SEC_DESC_INVALID syscall.Errno = 8354 - ERROR_DS_NO_DELETED_NAME syscall.Errno = 8355 - ERROR_DS_SUBREF_MUST_HAVE_PARENT syscall.Errno = 8356 - ERROR_DS_NCNAME_MUST_BE_NC syscall.Errno = 8357 - ERROR_DS_CANT_ADD_SYSTEM_ONLY syscall.Errno = 8358 - ERROR_DS_CLASS_MUST_BE_CONCRETE syscall.Errno = 8359 - ERROR_DS_INVALID_DMD syscall.Errno = 8360 - ERROR_DS_OBJ_GUID_EXISTS syscall.Errno = 8361 - ERROR_DS_NOT_ON_BACKLINK syscall.Errno = 8362 - ERROR_DS_NO_CROSSREF_FOR_NC syscall.Errno = 8363 - ERROR_DS_SHUTTING_DOWN syscall.Errno = 8364 - ERROR_DS_UNKNOWN_OPERATION syscall.Errno = 8365 - ERROR_DS_INVALID_ROLE_OWNER syscall.Errno = 8366 - ERROR_DS_COULDNT_CONTACT_FSMO syscall.Errno = 8367 - ERROR_DS_CROSS_NC_DN_RENAME syscall.Errno = 8368 - ERROR_DS_CANT_MOD_SYSTEM_ONLY syscall.Errno = 8369 - ERROR_DS_REPLICATOR_ONLY syscall.Errno = 8370 - ERROR_DS_OBJ_CLASS_NOT_DEFINED syscall.Errno = 8371 - ERROR_DS_OBJ_CLASS_NOT_SUBCLASS syscall.Errno = 8372 - ERROR_DS_NAME_REFERENCE_INVALID syscall.Errno = 8373 - ERROR_DS_CROSS_REF_EXISTS syscall.Errno = 8374 - ERROR_DS_CANT_DEL_MASTER_CROSSREF syscall.Errno = 8375 - ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD syscall.Errno = 8376 - ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX syscall.Errno = 8377 - ERROR_DS_DUP_RDN syscall.Errno = 8378 - ERROR_DS_DUP_OID syscall.Errno = 8379 - ERROR_DS_DUP_MAPI_ID syscall.Errno = 8380 - ERROR_DS_DUP_SCHEMA_ID_GUID syscall.Errno = 8381 - ERROR_DS_DUP_LDAP_DISPLAY_NAME syscall.Errno = 8382 - ERROR_DS_SEMANTIC_ATT_TEST syscall.Errno = 8383 - ERROR_DS_SYNTAX_MISMATCH syscall.Errno = 8384 - ERROR_DS_EXISTS_IN_MUST_HAVE syscall.Errno = 8385 - ERROR_DS_EXISTS_IN_MAY_HAVE syscall.Errno = 8386 - ERROR_DS_NONEXISTENT_MAY_HAVE syscall.Errno = 8387 - ERROR_DS_NONEXISTENT_MUST_HAVE syscall.Errno = 8388 - ERROR_DS_AUX_CLS_TEST_FAIL syscall.Errno = 8389 - ERROR_DS_NONEXISTENT_POSS_SUP syscall.Errno = 8390 - ERROR_DS_SUB_CLS_TEST_FAIL syscall.Errno = 8391 - ERROR_DS_BAD_RDN_ATT_ID_SYNTAX syscall.Errno = 8392 - ERROR_DS_EXISTS_IN_AUX_CLS syscall.Errno = 8393 - ERROR_DS_EXISTS_IN_SUB_CLS syscall.Errno = 8394 - ERROR_DS_EXISTS_IN_POSS_SUP syscall.Errno = 8395 - ERROR_DS_RECALCSCHEMA_FAILED syscall.Errno = 8396 - ERROR_DS_TREE_DELETE_NOT_FINISHED syscall.Errno = 8397 - ERROR_DS_CANT_DELETE syscall.Errno = 8398 - ERROR_DS_ATT_SCHEMA_REQ_ID syscall.Errno = 8399 - ERROR_DS_BAD_ATT_SCHEMA_SYNTAX syscall.Errno = 8400 - ERROR_DS_CANT_CACHE_ATT syscall.Errno = 8401 - ERROR_DS_CANT_CACHE_CLASS syscall.Errno = 8402 - ERROR_DS_CANT_REMOVE_ATT_CACHE syscall.Errno = 8403 - ERROR_DS_CANT_REMOVE_CLASS_CACHE syscall.Errno = 8404 - ERROR_DS_CANT_RETRIEVE_DN syscall.Errno = 8405 - ERROR_DS_MISSING_SUPREF syscall.Errno = 8406 - ERROR_DS_CANT_RETRIEVE_INSTANCE syscall.Errno = 8407 - ERROR_DS_CODE_INCONSISTENCY syscall.Errno = 8408 - ERROR_DS_DATABASE_ERROR syscall.Errno = 8409 - ERROR_DS_GOVERNSID_MISSING syscall.Errno = 8410 - ERROR_DS_MISSING_EXPECTED_ATT syscall.Errno = 8411 - ERROR_DS_NCNAME_MISSING_CR_REF syscall.Errno = 8412 - ERROR_DS_SECURITY_CHECKING_ERROR syscall.Errno = 8413 - ERROR_DS_SCHEMA_NOT_LOADED syscall.Errno = 8414 - ERROR_DS_SCHEMA_ALLOC_FAILED syscall.Errno = 8415 - ERROR_DS_ATT_SCHEMA_REQ_SYNTAX syscall.Errno = 8416 - ERROR_DS_GCVERIFY_ERROR syscall.Errno = 8417 - ERROR_DS_DRA_SCHEMA_MISMATCH syscall.Errno = 8418 - ERROR_DS_CANT_FIND_DSA_OBJ syscall.Errno = 8419 - ERROR_DS_CANT_FIND_EXPECTED_NC syscall.Errno = 8420 - ERROR_DS_CANT_FIND_NC_IN_CACHE syscall.Errno = 8421 - ERROR_DS_CANT_RETRIEVE_CHILD syscall.Errno = 8422 - ERROR_DS_SECURITY_ILLEGAL_MODIFY syscall.Errno = 8423 - ERROR_DS_CANT_REPLACE_HIDDEN_REC syscall.Errno = 8424 - ERROR_DS_BAD_HIERARCHY_FILE syscall.Errno = 8425 - ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED syscall.Errno = 8426 - ERROR_DS_CONFIG_PARAM_MISSING syscall.Errno = 8427 - ERROR_DS_COUNTING_AB_INDICES_FAILED syscall.Errno = 8428 - ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED syscall.Errno = 8429 - ERROR_DS_INTERNAL_FAILURE syscall.Errno = 8430 - ERROR_DS_UNKNOWN_ERROR syscall.Errno = 8431 - ERROR_DS_ROOT_REQUIRES_CLASS_TOP syscall.Errno = 8432 - ERROR_DS_REFUSING_FSMO_ROLES syscall.Errno = 8433 - ERROR_DS_MISSING_FSMO_SETTINGS syscall.Errno = 8434 - ERROR_DS_UNABLE_TO_SURRENDER_ROLES syscall.Errno = 8435 - ERROR_DS_DRA_GENERIC syscall.Errno = 8436 - ERROR_DS_DRA_INVALID_PARAMETER syscall.Errno = 8437 - ERROR_DS_DRA_BUSY syscall.Errno = 8438 - ERROR_DS_DRA_BAD_DN syscall.Errno = 8439 - ERROR_DS_DRA_BAD_NC syscall.Errno = 8440 - ERROR_DS_DRA_DN_EXISTS syscall.Errno = 8441 - ERROR_DS_DRA_INTERNAL_ERROR syscall.Errno = 8442 - ERROR_DS_DRA_INCONSISTENT_DIT syscall.Errno = 8443 - ERROR_DS_DRA_CONNECTION_FAILED syscall.Errno = 8444 - ERROR_DS_DRA_BAD_INSTANCE_TYPE syscall.Errno = 8445 - ERROR_DS_DRA_OUT_OF_MEM syscall.Errno = 8446 - ERROR_DS_DRA_MAIL_PROBLEM syscall.Errno = 8447 - ERROR_DS_DRA_REF_ALREADY_EXISTS syscall.Errno = 8448 - ERROR_DS_DRA_REF_NOT_FOUND syscall.Errno = 8449 - ERROR_DS_DRA_OBJ_IS_REP_SOURCE syscall.Errno = 8450 - ERROR_DS_DRA_DB_ERROR syscall.Errno = 8451 - ERROR_DS_DRA_NO_REPLICA syscall.Errno = 8452 - ERROR_DS_DRA_ACCESS_DENIED syscall.Errno = 8453 - ERROR_DS_DRA_NOT_SUPPORTED syscall.Errno = 8454 - ERROR_DS_DRA_RPC_CANCELLED syscall.Errno = 8455 - ERROR_DS_DRA_SOURCE_DISABLED syscall.Errno = 8456 - ERROR_DS_DRA_SINK_DISABLED syscall.Errno = 8457 - ERROR_DS_DRA_NAME_COLLISION syscall.Errno = 8458 - ERROR_DS_DRA_SOURCE_REINSTALLED syscall.Errno = 8459 - ERROR_DS_DRA_MISSING_PARENT syscall.Errno = 8460 - ERROR_DS_DRA_PREEMPTED syscall.Errno = 8461 - ERROR_DS_DRA_ABANDON_SYNC syscall.Errno = 8462 - ERROR_DS_DRA_SHUTDOWN syscall.Errno = 8463 - ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET syscall.Errno = 8464 - ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA syscall.Errno = 8465 - ERROR_DS_DRA_EXTN_CONNECTION_FAILED syscall.Errno = 8466 - ERROR_DS_INSTALL_SCHEMA_MISMATCH syscall.Errno = 8467 - ERROR_DS_DUP_LINK_ID syscall.Errno = 8468 - ERROR_DS_NAME_ERROR_RESOLVING syscall.Errno = 8469 - ERROR_DS_NAME_ERROR_NOT_FOUND syscall.Errno = 8470 - ERROR_DS_NAME_ERROR_NOT_UNIQUE syscall.Errno = 8471 - ERROR_DS_NAME_ERROR_NO_MAPPING syscall.Errno = 8472 - ERROR_DS_NAME_ERROR_DOMAIN_ONLY syscall.Errno = 8473 - ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING syscall.Errno = 8474 - ERROR_DS_CONSTRUCTED_ATT_MOD syscall.Errno = 8475 - ERROR_DS_WRONG_OM_OBJ_CLASS syscall.Errno = 8476 - ERROR_DS_DRA_REPL_PENDING syscall.Errno = 8477 - ERROR_DS_DS_REQUIRED syscall.Errno = 8478 - ERROR_DS_INVALID_LDAP_DISPLAY_NAME syscall.Errno = 8479 - ERROR_DS_NON_BASE_SEARCH syscall.Errno = 8480 - ERROR_DS_CANT_RETRIEVE_ATTS syscall.Errno = 8481 - ERROR_DS_BACKLINK_WITHOUT_LINK syscall.Errno = 8482 - ERROR_DS_EPOCH_MISMATCH syscall.Errno = 8483 - ERROR_DS_SRC_NAME_MISMATCH syscall.Errno = 8484 - ERROR_DS_SRC_AND_DST_NC_IDENTICAL syscall.Errno = 8485 - ERROR_DS_DST_NC_MISMATCH syscall.Errno = 8486 - ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC syscall.Errno = 8487 - ERROR_DS_SRC_GUID_MISMATCH syscall.Errno = 8488 - ERROR_DS_CANT_MOVE_DELETED_OBJECT syscall.Errno = 8489 - ERROR_DS_PDC_OPERATION_IN_PROGRESS syscall.Errno = 8490 - ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD syscall.Errno = 8491 - ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION syscall.Errno = 8492 - ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS syscall.Errno = 8493 - ERROR_DS_NC_MUST_HAVE_NC_PARENT syscall.Errno = 8494 - ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE syscall.Errno = 8495 - ERROR_DS_DST_DOMAIN_NOT_NATIVE syscall.Errno = 8496 - ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER syscall.Errno = 8497 - ERROR_DS_CANT_MOVE_ACCOUNT_GROUP syscall.Errno = 8498 - ERROR_DS_CANT_MOVE_RESOURCE_GROUP syscall.Errno = 8499 - ERROR_DS_INVALID_SEARCH_FLAG syscall.Errno = 8500 - ERROR_DS_NO_TREE_DELETE_ABOVE_NC syscall.Errno = 8501 - ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE syscall.Errno = 8502 - ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE syscall.Errno = 8503 - ERROR_DS_SAM_INIT_FAILURE syscall.Errno = 8504 - ERROR_DS_SENSITIVE_GROUP_VIOLATION syscall.Errno = 8505 - ERROR_DS_CANT_MOD_PRIMARYGROUPID syscall.Errno = 8506 - ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD syscall.Errno = 8507 - ERROR_DS_NONSAFE_SCHEMA_CHANGE syscall.Errno = 8508 - ERROR_DS_SCHEMA_UPDATE_DISALLOWED syscall.Errno = 8509 - ERROR_DS_CANT_CREATE_UNDER_SCHEMA syscall.Errno = 8510 - ERROR_DS_INSTALL_NO_SRC_SCH_VERSION syscall.Errno = 8511 - ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE syscall.Errno = 8512 - ERROR_DS_INVALID_GROUP_TYPE syscall.Errno = 8513 - ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8514 - ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN syscall.Errno = 8515 - ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8516 - ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8517 - ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER syscall.Errno = 8518 - ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER syscall.Errno = 8519 - ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER syscall.Errno = 8520 - ERROR_DS_HAVE_PRIMARY_MEMBERS syscall.Errno = 8521 - ERROR_DS_STRING_SD_CONVERSION_FAILED syscall.Errno = 8522 - ERROR_DS_NAMING_MASTER_GC syscall.Errno = 8523 - ERROR_DS_DNS_LOOKUP_FAILURE syscall.Errno = 8524 - ERROR_DS_COULDNT_UPDATE_SPNS syscall.Errno = 8525 - ERROR_DS_CANT_RETRIEVE_SD syscall.Errno = 8526 - ERROR_DS_KEY_NOT_UNIQUE syscall.Errno = 8527 - ERROR_DS_WRONG_LINKED_ATT_SYNTAX syscall.Errno = 8528 - ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD syscall.Errno = 8529 - ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY syscall.Errno = 8530 - ERROR_DS_CANT_START syscall.Errno = 8531 - ERROR_DS_INIT_FAILURE syscall.Errno = 8532 - ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION syscall.Errno = 8533 - ERROR_DS_SOURCE_DOMAIN_IN_FOREST syscall.Errno = 8534 - ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST syscall.Errno = 8535 - ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED syscall.Errno = 8536 - ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN syscall.Errno = 8537 - ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER syscall.Errno = 8538 - ERROR_DS_SRC_SID_EXISTS_IN_FOREST syscall.Errno = 8539 - ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH syscall.Errno = 8540 - ERROR_SAM_INIT_FAILURE syscall.Errno = 8541 - ERROR_DS_DRA_SCHEMA_INFO_SHIP syscall.Errno = 8542 - ERROR_DS_DRA_SCHEMA_CONFLICT syscall.Errno = 8543 - ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT syscall.Errno = 8544 - ERROR_DS_DRA_OBJ_NC_MISMATCH syscall.Errno = 8545 - ERROR_DS_NC_STILL_HAS_DSAS syscall.Errno = 8546 - ERROR_DS_GC_REQUIRED syscall.Errno = 8547 - ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY syscall.Errno = 8548 - ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS syscall.Errno = 8549 - ERROR_DS_CANT_ADD_TO_GC syscall.Errno = 8550 - ERROR_DS_NO_CHECKPOINT_WITH_PDC syscall.Errno = 8551 - ERROR_DS_SOURCE_AUDITING_NOT_ENABLED syscall.Errno = 8552 - ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC syscall.Errno = 8553 - ERROR_DS_INVALID_NAME_FOR_SPN syscall.Errno = 8554 - ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS syscall.Errno = 8555 - ERROR_DS_UNICODEPWD_NOT_IN_QUOTES syscall.Errno = 8556 - ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED syscall.Errno = 8557 - ERROR_DS_MUST_BE_RUN_ON_DST_DC syscall.Errno = 8558 - ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER syscall.Errno = 8559 - ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ syscall.Errno = 8560 - ERROR_DS_INIT_FAILURE_CONSOLE syscall.Errno = 8561 - ERROR_DS_SAM_INIT_FAILURE_CONSOLE syscall.Errno = 8562 - ERROR_DS_FOREST_VERSION_TOO_HIGH syscall.Errno = 8563 - ERROR_DS_DOMAIN_VERSION_TOO_HIGH syscall.Errno = 8564 - ERROR_DS_FOREST_VERSION_TOO_LOW syscall.Errno = 8565 - ERROR_DS_DOMAIN_VERSION_TOO_LOW syscall.Errno = 8566 - ERROR_DS_INCOMPATIBLE_VERSION syscall.Errno = 8567 - ERROR_DS_LOW_DSA_VERSION syscall.Errno = 8568 - ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN syscall.Errno = 8569 - ERROR_DS_NOT_SUPPORTED_SORT_ORDER syscall.Errno = 8570 - ERROR_DS_NAME_NOT_UNIQUE syscall.Errno = 8571 - ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4 syscall.Errno = 8572 - ERROR_DS_OUT_OF_VERSION_STORE syscall.Errno = 8573 - ERROR_DS_INCOMPATIBLE_CONTROLS_USED syscall.Errno = 8574 - ERROR_DS_NO_REF_DOMAIN syscall.Errno = 8575 - ERROR_DS_RESERVED_LINK_ID syscall.Errno = 8576 - ERROR_DS_LINK_ID_NOT_AVAILABLE syscall.Errno = 8577 - ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER syscall.Errno = 8578 - ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE syscall.Errno = 8579 - ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC syscall.Errno = 8580 - ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG syscall.Errno = 8581 - ERROR_DS_MODIFYDN_WRONG_GRANDPARENT syscall.Errno = 8582 - ERROR_DS_NAME_ERROR_TRUST_REFERRAL syscall.Errno = 8583 - ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER syscall.Errno = 8584 - ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD syscall.Errno = 8585 - ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2 syscall.Errno = 8586 - ERROR_DS_THREAD_LIMIT_EXCEEDED syscall.Errno = 8587 - ERROR_DS_NOT_CLOSEST syscall.Errno = 8588 - ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF syscall.Errno = 8589 - ERROR_DS_SINGLE_USER_MODE_FAILED syscall.Errno = 8590 - ERROR_DS_NTDSCRIPT_SYNTAX_ERROR syscall.Errno = 8591 - ERROR_DS_NTDSCRIPT_PROCESS_ERROR syscall.Errno = 8592 - ERROR_DS_DIFFERENT_REPL_EPOCHS syscall.Errno = 8593 - ERROR_DS_DRS_EXTENSIONS_CHANGED syscall.Errno = 8594 - ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR syscall.Errno = 8595 - ERROR_DS_NO_MSDS_INTID syscall.Errno = 8596 - ERROR_DS_DUP_MSDS_INTID syscall.Errno = 8597 - ERROR_DS_EXISTS_IN_RDNATTID syscall.Errno = 8598 - ERROR_DS_AUTHORIZATION_FAILED syscall.Errno = 8599 - ERROR_DS_INVALID_SCRIPT syscall.Errno = 8600 - ERROR_DS_REMOTE_CROSSREF_OP_FAILED syscall.Errno = 8601 - ERROR_DS_CROSS_REF_BUSY syscall.Errno = 8602 - ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN syscall.Errno = 8603 - ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC syscall.Errno = 8604 - ERROR_DS_DUPLICATE_ID_FOUND syscall.Errno = 8605 - ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT syscall.Errno = 8606 - ERROR_DS_GROUP_CONVERSION_ERROR syscall.Errno = 8607 - ERROR_DS_CANT_MOVE_APP_BASIC_GROUP syscall.Errno = 8608 - ERROR_DS_CANT_MOVE_APP_QUERY_GROUP syscall.Errno = 8609 - ERROR_DS_ROLE_NOT_VERIFIED syscall.Errno = 8610 - ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL syscall.Errno = 8611 - ERROR_DS_DOMAIN_RENAME_IN_PROGRESS syscall.Errno = 8612 - ERROR_DS_EXISTING_AD_CHILD_NC syscall.Errno = 8613 - ERROR_DS_REPL_LIFETIME_EXCEEDED syscall.Errno = 8614 - ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER syscall.Errno = 8615 - ERROR_DS_LDAP_SEND_QUEUE_FULL syscall.Errno = 8616 - ERROR_DS_DRA_OUT_SCHEDULE_WINDOW syscall.Errno = 8617 - ERROR_DS_POLICY_NOT_KNOWN syscall.Errno = 8618 - ERROR_NO_SITE_SETTINGS_OBJECT syscall.Errno = 8619 - ERROR_NO_SECRETS syscall.Errno = 8620 - ERROR_NO_WRITABLE_DC_FOUND syscall.Errno = 8621 - ERROR_DS_NO_SERVER_OBJECT syscall.Errno = 8622 - ERROR_DS_NO_NTDSA_OBJECT syscall.Errno = 8623 - ERROR_DS_NON_ASQ_SEARCH syscall.Errno = 8624 - ERROR_DS_AUDIT_FAILURE syscall.Errno = 8625 - ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE syscall.Errno = 8626 - ERROR_DS_INVALID_SEARCH_FLAG_TUPLE syscall.Errno = 8627 - ERROR_DS_HIERARCHY_TABLE_TOO_DEEP syscall.Errno = 8628 - ERROR_DS_DRA_CORRUPT_UTD_VECTOR syscall.Errno = 8629 - ERROR_DS_DRA_SECRETS_DENIED syscall.Errno = 8630 - ERROR_DS_RESERVED_MAPI_ID syscall.Errno = 8631 - ERROR_DS_MAPI_ID_NOT_AVAILABLE syscall.Errno = 8632 - ERROR_DS_DRA_MISSING_KRBTGT_SECRET syscall.Errno = 8633 - ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST syscall.Errno = 8634 - ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST syscall.Errno = 8635 - ERROR_INVALID_USER_PRINCIPAL_NAME syscall.Errno = 8636 - ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS syscall.Errno = 8637 - ERROR_DS_OID_NOT_FOUND syscall.Errno = 8638 - ERROR_DS_DRA_RECYCLED_TARGET syscall.Errno = 8639 - ERROR_DS_DISALLOWED_NC_REDIRECT syscall.Errno = 8640 - ERROR_DS_HIGH_ADLDS_FFL syscall.Errno = 8641 - ERROR_DS_HIGH_DSA_VERSION syscall.Errno = 8642 - ERROR_DS_LOW_ADLDS_FFL syscall.Errno = 8643 - ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION syscall.Errno = 8644 - ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED syscall.Errno = 8645 - ERROR_INCORRECT_ACCOUNT_TYPE syscall.Errno = 8646 - ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8647 - ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST syscall.Errno = 8648 - ERROR_DS_MISSING_FOREST_TRUST syscall.Errno = 8649 - ERROR_DS_VALUE_KEY_NOT_UNIQUE syscall.Errno = 8650 - DNS_ERROR_RESPONSE_CODES_BASE syscall.Errno = 9000 - DNS_ERROR_RCODE_NO_ERROR = ERROR_SUCCESS - DNS_ERROR_MASK syscall.Errno = 0x00002328 - DNS_ERROR_RCODE_FORMAT_ERROR syscall.Errno = 9001 - DNS_ERROR_RCODE_SERVER_FAILURE syscall.Errno = 9002 - DNS_ERROR_RCODE_NAME_ERROR syscall.Errno = 9003 - DNS_ERROR_RCODE_NOT_IMPLEMENTED syscall.Errno = 9004 - DNS_ERROR_RCODE_REFUSED syscall.Errno = 9005 - DNS_ERROR_RCODE_YXDOMAIN syscall.Errno = 9006 - DNS_ERROR_RCODE_YXRRSET syscall.Errno = 9007 - DNS_ERROR_RCODE_NXRRSET syscall.Errno = 9008 - DNS_ERROR_RCODE_NOTAUTH syscall.Errno = 9009 - DNS_ERROR_RCODE_NOTZONE syscall.Errno = 9010 - DNS_ERROR_RCODE_BADSIG syscall.Errno = 9016 - DNS_ERROR_RCODE_BADKEY syscall.Errno = 9017 - DNS_ERROR_RCODE_BADTIME syscall.Errno = 9018 - DNS_ERROR_RCODE_LAST = DNS_ERROR_RCODE_BADTIME - DNS_ERROR_DNSSEC_BASE syscall.Errno = 9100 - DNS_ERROR_KEYMASTER_REQUIRED syscall.Errno = 9101 - DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE syscall.Errno = 9102 - DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1 syscall.Errno = 9103 - DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS syscall.Errno = 9104 - DNS_ERROR_UNSUPPORTED_ALGORITHM syscall.Errno = 9105 - DNS_ERROR_INVALID_KEY_SIZE syscall.Errno = 9106 - DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE syscall.Errno = 9107 - DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION syscall.Errno = 9108 - DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR syscall.Errno = 9109 - DNS_ERROR_UNEXPECTED_CNG_ERROR syscall.Errno = 9110 - DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION syscall.Errno = 9111 - DNS_ERROR_KSP_NOT_ACCESSIBLE syscall.Errno = 9112 - DNS_ERROR_TOO_MANY_SKDS syscall.Errno = 9113 - DNS_ERROR_INVALID_ROLLOVER_PERIOD syscall.Errno = 9114 - DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET syscall.Errno = 9115 - DNS_ERROR_ROLLOVER_IN_PROGRESS syscall.Errno = 9116 - DNS_ERROR_STANDBY_KEY_NOT_PRESENT syscall.Errno = 9117 - DNS_ERROR_NOT_ALLOWED_ON_ZSK syscall.Errno = 9118 - DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD syscall.Errno = 9119 - DNS_ERROR_ROLLOVER_ALREADY_QUEUED syscall.Errno = 9120 - DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE syscall.Errno = 9121 - DNS_ERROR_BAD_KEYMASTER syscall.Errno = 9122 - DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD syscall.Errno = 9123 - DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT syscall.Errno = 9124 - DNS_ERROR_DNSSEC_IS_DISABLED syscall.Errno = 9125 - DNS_ERROR_INVALID_XML syscall.Errno = 9126 - DNS_ERROR_NO_VALID_TRUST_ANCHORS syscall.Errno = 9127 - DNS_ERROR_ROLLOVER_NOT_POKEABLE syscall.Errno = 9128 - DNS_ERROR_NSEC3_NAME_COLLISION syscall.Errno = 9129 - DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1 syscall.Errno = 9130 - DNS_ERROR_PACKET_FMT_BASE syscall.Errno = 9500 - DNS_INFO_NO_RECORDS syscall.Errno = 9501 - DNS_ERROR_BAD_PACKET syscall.Errno = 9502 - DNS_ERROR_NO_PACKET syscall.Errno = 9503 - DNS_ERROR_RCODE syscall.Errno = 9504 - DNS_ERROR_UNSECURE_PACKET syscall.Errno = 9505 - DNS_STATUS_PACKET_UNSECURE = DNS_ERROR_UNSECURE_PACKET - DNS_REQUEST_PENDING syscall.Errno = 9506 - DNS_ERROR_NO_MEMORY = ERROR_OUTOFMEMORY - DNS_ERROR_INVALID_NAME = ERROR_INVALID_NAME - DNS_ERROR_INVALID_DATA = ERROR_INVALID_DATA - DNS_ERROR_GENERAL_API_BASE syscall.Errno = 9550 - DNS_ERROR_INVALID_TYPE syscall.Errno = 9551 - DNS_ERROR_INVALID_IP_ADDRESS syscall.Errno = 9552 - DNS_ERROR_INVALID_PROPERTY syscall.Errno = 9553 - DNS_ERROR_TRY_AGAIN_LATER syscall.Errno = 9554 - DNS_ERROR_NOT_UNIQUE syscall.Errno = 9555 - DNS_ERROR_NON_RFC_NAME syscall.Errno = 9556 - DNS_STATUS_FQDN syscall.Errno = 9557 - DNS_STATUS_DOTTED_NAME syscall.Errno = 9558 - DNS_STATUS_SINGLE_PART_NAME syscall.Errno = 9559 - DNS_ERROR_INVALID_NAME_CHAR syscall.Errno = 9560 - DNS_ERROR_NUMERIC_NAME syscall.Errno = 9561 - DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER syscall.Errno = 9562 - DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION syscall.Errno = 9563 - DNS_ERROR_CANNOT_FIND_ROOT_HINTS syscall.Errno = 9564 - DNS_ERROR_INCONSISTENT_ROOT_HINTS syscall.Errno = 9565 - DNS_ERROR_DWORD_VALUE_TOO_SMALL syscall.Errno = 9566 - DNS_ERROR_DWORD_VALUE_TOO_LARGE syscall.Errno = 9567 - DNS_ERROR_BACKGROUND_LOADING syscall.Errno = 9568 - DNS_ERROR_NOT_ALLOWED_ON_RODC syscall.Errno = 9569 - DNS_ERROR_NOT_ALLOWED_UNDER_DNAME syscall.Errno = 9570 - DNS_ERROR_DELEGATION_REQUIRED syscall.Errno = 9571 - DNS_ERROR_INVALID_POLICY_TABLE syscall.Errno = 9572 - DNS_ERROR_ADDRESS_REQUIRED syscall.Errno = 9573 - DNS_ERROR_ZONE_BASE syscall.Errno = 9600 - DNS_ERROR_ZONE_DOES_NOT_EXIST syscall.Errno = 9601 - DNS_ERROR_NO_ZONE_INFO syscall.Errno = 9602 - DNS_ERROR_INVALID_ZONE_OPERATION syscall.Errno = 9603 - DNS_ERROR_ZONE_CONFIGURATION_ERROR syscall.Errno = 9604 - DNS_ERROR_ZONE_HAS_NO_SOA_RECORD syscall.Errno = 9605 - DNS_ERROR_ZONE_HAS_NO_NS_RECORDS syscall.Errno = 9606 - DNS_ERROR_ZONE_LOCKED syscall.Errno = 9607 - DNS_ERROR_ZONE_CREATION_FAILED syscall.Errno = 9608 - DNS_ERROR_ZONE_ALREADY_EXISTS syscall.Errno = 9609 - DNS_ERROR_AUTOZONE_ALREADY_EXISTS syscall.Errno = 9610 - DNS_ERROR_INVALID_ZONE_TYPE syscall.Errno = 9611 - DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP syscall.Errno = 9612 - DNS_ERROR_ZONE_NOT_SECONDARY syscall.Errno = 9613 - DNS_ERROR_NEED_SECONDARY_ADDRESSES syscall.Errno = 9614 - DNS_ERROR_WINS_INIT_FAILED syscall.Errno = 9615 - DNS_ERROR_NEED_WINS_SERVERS syscall.Errno = 9616 - DNS_ERROR_NBSTAT_INIT_FAILED syscall.Errno = 9617 - DNS_ERROR_SOA_DELETE_INVALID syscall.Errno = 9618 - DNS_ERROR_FORWARDER_ALREADY_EXISTS syscall.Errno = 9619 - DNS_ERROR_ZONE_REQUIRES_MASTER_IP syscall.Errno = 9620 - DNS_ERROR_ZONE_IS_SHUTDOWN syscall.Errno = 9621 - DNS_ERROR_ZONE_LOCKED_FOR_SIGNING syscall.Errno = 9622 - DNS_ERROR_DATAFILE_BASE syscall.Errno = 9650 - DNS_ERROR_PRIMARY_REQUIRES_DATAFILE syscall.Errno = 9651 - DNS_ERROR_INVALID_DATAFILE_NAME syscall.Errno = 9652 - DNS_ERROR_DATAFILE_OPEN_FAILURE syscall.Errno = 9653 - DNS_ERROR_FILE_WRITEBACK_FAILED syscall.Errno = 9654 - DNS_ERROR_DATAFILE_PARSING syscall.Errno = 9655 - DNS_ERROR_DATABASE_BASE syscall.Errno = 9700 - DNS_ERROR_RECORD_DOES_NOT_EXIST syscall.Errno = 9701 - DNS_ERROR_RECORD_FORMAT syscall.Errno = 9702 - DNS_ERROR_NODE_CREATION_FAILED syscall.Errno = 9703 - DNS_ERROR_UNKNOWN_RECORD_TYPE syscall.Errno = 9704 - DNS_ERROR_RECORD_TIMED_OUT syscall.Errno = 9705 - DNS_ERROR_NAME_NOT_IN_ZONE syscall.Errno = 9706 - DNS_ERROR_CNAME_LOOP syscall.Errno = 9707 - DNS_ERROR_NODE_IS_CNAME syscall.Errno = 9708 - DNS_ERROR_CNAME_COLLISION syscall.Errno = 9709 - DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT syscall.Errno = 9710 - DNS_ERROR_RECORD_ALREADY_EXISTS syscall.Errno = 9711 - DNS_ERROR_SECONDARY_DATA syscall.Errno = 9712 - DNS_ERROR_NO_CREATE_CACHE_DATA syscall.Errno = 9713 - DNS_ERROR_NAME_DOES_NOT_EXIST syscall.Errno = 9714 - DNS_WARNING_PTR_CREATE_FAILED syscall.Errno = 9715 - DNS_WARNING_DOMAIN_UNDELETED syscall.Errno = 9716 - DNS_ERROR_DS_UNAVAILABLE syscall.Errno = 9717 - DNS_ERROR_DS_ZONE_ALREADY_EXISTS syscall.Errno = 9718 - DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE syscall.Errno = 9719 - DNS_ERROR_NODE_IS_DNAME syscall.Errno = 9720 - DNS_ERROR_DNAME_COLLISION syscall.Errno = 9721 - DNS_ERROR_ALIAS_LOOP syscall.Errno = 9722 - DNS_ERROR_OPERATION_BASE syscall.Errno = 9750 - DNS_INFO_AXFR_COMPLETE syscall.Errno = 9751 - DNS_ERROR_AXFR syscall.Errno = 9752 - DNS_INFO_ADDED_LOCAL_WINS syscall.Errno = 9753 - DNS_ERROR_SECURE_BASE syscall.Errno = 9800 - DNS_STATUS_CONTINUE_NEEDED syscall.Errno = 9801 - DNS_ERROR_SETUP_BASE syscall.Errno = 9850 - DNS_ERROR_NO_TCPIP syscall.Errno = 9851 - DNS_ERROR_NO_DNS_SERVERS syscall.Errno = 9852 - DNS_ERROR_DP_BASE syscall.Errno = 9900 - DNS_ERROR_DP_DOES_NOT_EXIST syscall.Errno = 9901 - DNS_ERROR_DP_ALREADY_EXISTS syscall.Errno = 9902 - DNS_ERROR_DP_NOT_ENLISTED syscall.Errno = 9903 - DNS_ERROR_DP_ALREADY_ENLISTED syscall.Errno = 9904 - DNS_ERROR_DP_NOT_AVAILABLE syscall.Errno = 9905 - DNS_ERROR_DP_FSMO_ERROR syscall.Errno = 9906 - DNS_ERROR_RRL_NOT_ENABLED syscall.Errno = 9911 - DNS_ERROR_RRL_INVALID_WINDOW_SIZE syscall.Errno = 9912 - DNS_ERROR_RRL_INVALID_IPV4_PREFIX syscall.Errno = 9913 - DNS_ERROR_RRL_INVALID_IPV6_PREFIX syscall.Errno = 9914 - DNS_ERROR_RRL_INVALID_TC_RATE syscall.Errno = 9915 - DNS_ERROR_RRL_INVALID_LEAK_RATE syscall.Errno = 9916 - DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE syscall.Errno = 9917 - DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS syscall.Errno = 9921 - DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST syscall.Errno = 9922 - DNS_ERROR_VIRTUALIZATION_TREE_LOCKED syscall.Errno = 9923 - DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME syscall.Errno = 9924 - DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE syscall.Errno = 9925 - DNS_ERROR_ZONESCOPE_ALREADY_EXISTS syscall.Errno = 9951 - DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST syscall.Errno = 9952 - DNS_ERROR_DEFAULT_ZONESCOPE syscall.Errno = 9953 - DNS_ERROR_INVALID_ZONESCOPE_NAME syscall.Errno = 9954 - DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES syscall.Errno = 9955 - DNS_ERROR_LOAD_ZONESCOPE_FAILED syscall.Errno = 9956 - DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED syscall.Errno = 9957 - DNS_ERROR_INVALID_SCOPE_NAME syscall.Errno = 9958 - DNS_ERROR_SCOPE_DOES_NOT_EXIST syscall.Errno = 9959 - DNS_ERROR_DEFAULT_SCOPE syscall.Errno = 9960 - DNS_ERROR_INVALID_SCOPE_OPERATION syscall.Errno = 9961 - DNS_ERROR_SCOPE_LOCKED syscall.Errno = 9962 - DNS_ERROR_SCOPE_ALREADY_EXISTS syscall.Errno = 9963 - DNS_ERROR_POLICY_ALREADY_EXISTS syscall.Errno = 9971 - DNS_ERROR_POLICY_DOES_NOT_EXIST syscall.Errno = 9972 - DNS_ERROR_POLICY_INVALID_CRITERIA syscall.Errno = 9973 - DNS_ERROR_POLICY_INVALID_SETTINGS syscall.Errno = 9974 - DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED syscall.Errno = 9975 - DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST syscall.Errno = 9976 - DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS syscall.Errno = 9977 - DNS_ERROR_SUBNET_DOES_NOT_EXIST syscall.Errno = 9978 - DNS_ERROR_SUBNET_ALREADY_EXISTS syscall.Errno = 9979 - DNS_ERROR_POLICY_LOCKED syscall.Errno = 9980 - DNS_ERROR_POLICY_INVALID_WEIGHT syscall.Errno = 9981 - DNS_ERROR_POLICY_INVALID_NAME syscall.Errno = 9982 - DNS_ERROR_POLICY_MISSING_CRITERIA syscall.Errno = 9983 - DNS_ERROR_INVALID_CLIENT_SUBNET_NAME syscall.Errno = 9984 - DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID syscall.Errno = 9985 - DNS_ERROR_POLICY_SCOPE_MISSING syscall.Errno = 9986 - DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED syscall.Errno = 9987 - DNS_ERROR_SERVERSCOPE_IS_REFERENCED syscall.Errno = 9988 - DNS_ERROR_ZONESCOPE_IS_REFERENCED syscall.Errno = 9989 - DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET syscall.Errno = 9990 - DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL syscall.Errno = 9991 - DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL syscall.Errno = 9992 - DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE syscall.Errno = 9993 - DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN syscall.Errno = 9994 - DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE syscall.Errno = 9995 - DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY syscall.Errno = 9996 - WSABASEERR syscall.Errno = 10000 - WSAEINTR syscall.Errno = 10004 - WSAEBADF syscall.Errno = 10009 - WSAEACCES syscall.Errno = 10013 - WSAEFAULT syscall.Errno = 10014 - WSAEINVAL syscall.Errno = 10022 - WSAEMFILE syscall.Errno = 10024 - WSAEWOULDBLOCK syscall.Errno = 10035 - WSAEINPROGRESS syscall.Errno = 10036 - WSAEALREADY syscall.Errno = 10037 - WSAENOTSOCK syscall.Errno = 10038 - WSAEDESTADDRREQ syscall.Errno = 10039 - WSAEMSGSIZE syscall.Errno = 10040 - WSAEPROTOTYPE syscall.Errno = 10041 - WSAENOPROTOOPT syscall.Errno = 10042 - WSAEPROTONOSUPPORT syscall.Errno = 10043 - WSAESOCKTNOSUPPORT syscall.Errno = 10044 - WSAEOPNOTSUPP syscall.Errno = 10045 - WSAEPFNOSUPPORT syscall.Errno = 10046 - WSAEAFNOSUPPORT syscall.Errno = 10047 - WSAEADDRINUSE syscall.Errno = 10048 - WSAEADDRNOTAVAIL syscall.Errno = 10049 - WSAENETDOWN syscall.Errno = 10050 - WSAENETUNREACH syscall.Errno = 10051 - WSAENETRESET syscall.Errno = 10052 - WSAECONNABORTED syscall.Errno = 10053 - WSAECONNRESET syscall.Errno = 10054 - WSAENOBUFS syscall.Errno = 10055 - WSAEISCONN syscall.Errno = 10056 - WSAENOTCONN syscall.Errno = 10057 - WSAESHUTDOWN syscall.Errno = 10058 - WSAETOOMANYREFS syscall.Errno = 10059 - WSAETIMEDOUT syscall.Errno = 10060 - WSAECONNREFUSED syscall.Errno = 10061 - WSAELOOP syscall.Errno = 10062 - WSAENAMETOOLONG syscall.Errno = 10063 - WSAEHOSTDOWN syscall.Errno = 10064 - WSAEHOSTUNREACH syscall.Errno = 10065 - WSAENOTEMPTY syscall.Errno = 10066 - WSAEPROCLIM syscall.Errno = 10067 - WSAEUSERS syscall.Errno = 10068 - WSAEDQUOT syscall.Errno = 10069 - WSAESTALE syscall.Errno = 10070 - WSAEREMOTE syscall.Errno = 10071 - WSASYSNOTREADY syscall.Errno = 10091 - WSAVERNOTSUPPORTED syscall.Errno = 10092 - WSANOTINITIALISED syscall.Errno = 10093 - WSAEDISCON syscall.Errno = 10101 - WSAENOMORE syscall.Errno = 10102 - WSAECANCELLED syscall.Errno = 10103 - WSAEINVALIDPROCTABLE syscall.Errno = 10104 - WSAEINVALIDPROVIDER syscall.Errno = 10105 - WSAEPROVIDERFAILEDINIT syscall.Errno = 10106 - WSASYSCALLFAILURE syscall.Errno = 10107 - WSASERVICE_NOT_FOUND syscall.Errno = 10108 - WSATYPE_NOT_FOUND syscall.Errno = 10109 - WSA_E_NO_MORE syscall.Errno = 10110 - WSA_E_CANCELLED syscall.Errno = 10111 - WSAEREFUSED syscall.Errno = 10112 - WSAHOST_NOT_FOUND syscall.Errno = 11001 - WSATRY_AGAIN syscall.Errno = 11002 - WSANO_RECOVERY syscall.Errno = 11003 - WSANO_DATA syscall.Errno = 11004 - WSA_QOS_RECEIVERS syscall.Errno = 11005 - WSA_QOS_SENDERS syscall.Errno = 11006 - WSA_QOS_NO_SENDERS syscall.Errno = 11007 - WSA_QOS_NO_RECEIVERS syscall.Errno = 11008 - WSA_QOS_REQUEST_CONFIRMED syscall.Errno = 11009 - WSA_QOS_ADMISSION_FAILURE syscall.Errno = 11010 - WSA_QOS_POLICY_FAILURE syscall.Errno = 11011 - WSA_QOS_BAD_STYLE syscall.Errno = 11012 - WSA_QOS_BAD_OBJECT syscall.Errno = 11013 - WSA_QOS_TRAFFIC_CTRL_ERROR syscall.Errno = 11014 - WSA_QOS_GENERIC_ERROR syscall.Errno = 11015 - WSA_QOS_ESERVICETYPE syscall.Errno = 11016 - WSA_QOS_EFLOWSPEC syscall.Errno = 11017 - WSA_QOS_EPROVSPECBUF syscall.Errno = 11018 - WSA_QOS_EFILTERSTYLE syscall.Errno = 11019 - WSA_QOS_EFILTERTYPE syscall.Errno = 11020 - WSA_QOS_EFILTERCOUNT syscall.Errno = 11021 - WSA_QOS_EOBJLENGTH syscall.Errno = 11022 - WSA_QOS_EFLOWCOUNT syscall.Errno = 11023 - WSA_QOS_EUNKOWNPSOBJ syscall.Errno = 11024 - WSA_QOS_EPOLICYOBJ syscall.Errno = 11025 - WSA_QOS_EFLOWDESC syscall.Errno = 11026 - WSA_QOS_EPSFLOWSPEC syscall.Errno = 11027 - WSA_QOS_EPSFILTERSPEC syscall.Errno = 11028 - WSA_QOS_ESDMODEOBJ syscall.Errno = 11029 - WSA_QOS_ESHAPERATEOBJ syscall.Errno = 11030 - WSA_QOS_RESERVED_PETYPE syscall.Errno = 11031 - WSA_SECURE_HOST_NOT_FOUND syscall.Errno = 11032 - WSA_IPSEC_NAME_POLICY_ERROR syscall.Errno = 11033 - ERROR_IPSEC_QM_POLICY_EXISTS syscall.Errno = 13000 - ERROR_IPSEC_QM_POLICY_NOT_FOUND syscall.Errno = 13001 - ERROR_IPSEC_QM_POLICY_IN_USE syscall.Errno = 13002 - ERROR_IPSEC_MM_POLICY_EXISTS syscall.Errno = 13003 - ERROR_IPSEC_MM_POLICY_NOT_FOUND syscall.Errno = 13004 - ERROR_IPSEC_MM_POLICY_IN_USE syscall.Errno = 13005 - ERROR_IPSEC_MM_FILTER_EXISTS syscall.Errno = 13006 - ERROR_IPSEC_MM_FILTER_NOT_FOUND syscall.Errno = 13007 - ERROR_IPSEC_TRANSPORT_FILTER_EXISTS syscall.Errno = 13008 - ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND syscall.Errno = 13009 - ERROR_IPSEC_MM_AUTH_EXISTS syscall.Errno = 13010 - ERROR_IPSEC_MM_AUTH_NOT_FOUND syscall.Errno = 13011 - ERROR_IPSEC_MM_AUTH_IN_USE syscall.Errno = 13012 - ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND syscall.Errno = 13013 - ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND syscall.Errno = 13014 - ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND syscall.Errno = 13015 - ERROR_IPSEC_TUNNEL_FILTER_EXISTS syscall.Errno = 13016 - ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND syscall.Errno = 13017 - ERROR_IPSEC_MM_FILTER_PENDING_DELETION syscall.Errno = 13018 - ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION syscall.Errno = 13019 - ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION syscall.Errno = 13020 - ERROR_IPSEC_MM_POLICY_PENDING_DELETION syscall.Errno = 13021 - ERROR_IPSEC_MM_AUTH_PENDING_DELETION syscall.Errno = 13022 - ERROR_IPSEC_QM_POLICY_PENDING_DELETION syscall.Errno = 13023 - WARNING_IPSEC_MM_POLICY_PRUNED syscall.Errno = 13024 - WARNING_IPSEC_QM_POLICY_PRUNED syscall.Errno = 13025 - ERROR_IPSEC_IKE_NEG_STATUS_BEGIN syscall.Errno = 13800 - ERROR_IPSEC_IKE_AUTH_FAIL syscall.Errno = 13801 - ERROR_IPSEC_IKE_ATTRIB_FAIL syscall.Errno = 13802 - ERROR_IPSEC_IKE_NEGOTIATION_PENDING syscall.Errno = 13803 - ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR syscall.Errno = 13804 - ERROR_IPSEC_IKE_TIMED_OUT syscall.Errno = 13805 - ERROR_IPSEC_IKE_NO_CERT syscall.Errno = 13806 - ERROR_IPSEC_IKE_SA_DELETED syscall.Errno = 13807 - ERROR_IPSEC_IKE_SA_REAPED syscall.Errno = 13808 - ERROR_IPSEC_IKE_MM_ACQUIRE_DROP syscall.Errno = 13809 - ERROR_IPSEC_IKE_QM_ACQUIRE_DROP syscall.Errno = 13810 - ERROR_IPSEC_IKE_QUEUE_DROP_MM syscall.Errno = 13811 - ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM syscall.Errno = 13812 - ERROR_IPSEC_IKE_DROP_NO_RESPONSE syscall.Errno = 13813 - ERROR_IPSEC_IKE_MM_DELAY_DROP syscall.Errno = 13814 - ERROR_IPSEC_IKE_QM_DELAY_DROP syscall.Errno = 13815 - ERROR_IPSEC_IKE_ERROR syscall.Errno = 13816 - ERROR_IPSEC_IKE_CRL_FAILED syscall.Errno = 13817 - ERROR_IPSEC_IKE_INVALID_KEY_USAGE syscall.Errno = 13818 - ERROR_IPSEC_IKE_INVALID_CERT_TYPE syscall.Errno = 13819 - ERROR_IPSEC_IKE_NO_PRIVATE_KEY syscall.Errno = 13820 - ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY syscall.Errno = 13821 - ERROR_IPSEC_IKE_DH_FAIL syscall.Errno = 13822 - ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED syscall.Errno = 13823 - ERROR_IPSEC_IKE_INVALID_HEADER syscall.Errno = 13824 - ERROR_IPSEC_IKE_NO_POLICY syscall.Errno = 13825 - ERROR_IPSEC_IKE_INVALID_SIGNATURE syscall.Errno = 13826 - ERROR_IPSEC_IKE_KERBEROS_ERROR syscall.Errno = 13827 - ERROR_IPSEC_IKE_NO_PUBLIC_KEY syscall.Errno = 13828 - ERROR_IPSEC_IKE_PROCESS_ERR syscall.Errno = 13829 - ERROR_IPSEC_IKE_PROCESS_ERR_SA syscall.Errno = 13830 - ERROR_IPSEC_IKE_PROCESS_ERR_PROP syscall.Errno = 13831 - ERROR_IPSEC_IKE_PROCESS_ERR_TRANS syscall.Errno = 13832 - ERROR_IPSEC_IKE_PROCESS_ERR_KE syscall.Errno = 13833 - ERROR_IPSEC_IKE_PROCESS_ERR_ID syscall.Errno = 13834 - ERROR_IPSEC_IKE_PROCESS_ERR_CERT syscall.Errno = 13835 - ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ syscall.Errno = 13836 - ERROR_IPSEC_IKE_PROCESS_ERR_HASH syscall.Errno = 13837 - ERROR_IPSEC_IKE_PROCESS_ERR_SIG syscall.Errno = 13838 - ERROR_IPSEC_IKE_PROCESS_ERR_NONCE syscall.Errno = 13839 - ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY syscall.Errno = 13840 - ERROR_IPSEC_IKE_PROCESS_ERR_DELETE syscall.Errno = 13841 - ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR syscall.Errno = 13842 - ERROR_IPSEC_IKE_INVALID_PAYLOAD syscall.Errno = 13843 - ERROR_IPSEC_IKE_LOAD_SOFT_SA syscall.Errno = 13844 - ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN syscall.Errno = 13845 - ERROR_IPSEC_IKE_INVALID_COOKIE syscall.Errno = 13846 - ERROR_IPSEC_IKE_NO_PEER_CERT syscall.Errno = 13847 - ERROR_IPSEC_IKE_PEER_CRL_FAILED syscall.Errno = 13848 - ERROR_IPSEC_IKE_POLICY_CHANGE syscall.Errno = 13849 - ERROR_IPSEC_IKE_NO_MM_POLICY syscall.Errno = 13850 - ERROR_IPSEC_IKE_NOTCBPRIV syscall.Errno = 13851 - ERROR_IPSEC_IKE_SECLOADFAIL syscall.Errno = 13852 - ERROR_IPSEC_IKE_FAILSSPINIT syscall.Errno = 13853 - ERROR_IPSEC_IKE_FAILQUERYSSP syscall.Errno = 13854 - ERROR_IPSEC_IKE_SRVACQFAIL syscall.Errno = 13855 - ERROR_IPSEC_IKE_SRVQUERYCRED syscall.Errno = 13856 - ERROR_IPSEC_IKE_GETSPIFAIL syscall.Errno = 13857 - ERROR_IPSEC_IKE_INVALID_FILTER syscall.Errno = 13858 - ERROR_IPSEC_IKE_OUT_OF_MEMORY syscall.Errno = 13859 - ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED syscall.Errno = 13860 - ERROR_IPSEC_IKE_INVALID_POLICY syscall.Errno = 13861 - ERROR_IPSEC_IKE_UNKNOWN_DOI syscall.Errno = 13862 - ERROR_IPSEC_IKE_INVALID_SITUATION syscall.Errno = 13863 - ERROR_IPSEC_IKE_DH_FAILURE syscall.Errno = 13864 - ERROR_IPSEC_IKE_INVALID_GROUP syscall.Errno = 13865 - ERROR_IPSEC_IKE_ENCRYPT syscall.Errno = 13866 - ERROR_IPSEC_IKE_DECRYPT syscall.Errno = 13867 - ERROR_IPSEC_IKE_POLICY_MATCH syscall.Errno = 13868 - ERROR_IPSEC_IKE_UNSUPPORTED_ID syscall.Errno = 13869 - ERROR_IPSEC_IKE_INVALID_HASH syscall.Errno = 13870 - ERROR_IPSEC_IKE_INVALID_HASH_ALG syscall.Errno = 13871 - ERROR_IPSEC_IKE_INVALID_HASH_SIZE syscall.Errno = 13872 - ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG syscall.Errno = 13873 - ERROR_IPSEC_IKE_INVALID_AUTH_ALG syscall.Errno = 13874 - ERROR_IPSEC_IKE_INVALID_SIG syscall.Errno = 13875 - ERROR_IPSEC_IKE_LOAD_FAILED syscall.Errno = 13876 - ERROR_IPSEC_IKE_RPC_DELETE syscall.Errno = 13877 - ERROR_IPSEC_IKE_BENIGN_REINIT syscall.Errno = 13878 - ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY syscall.Errno = 13879 - ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION syscall.Errno = 13880 - ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN syscall.Errno = 13881 - ERROR_IPSEC_IKE_MM_LIMIT syscall.Errno = 13882 - ERROR_IPSEC_IKE_NEGOTIATION_DISABLED syscall.Errno = 13883 - ERROR_IPSEC_IKE_QM_LIMIT syscall.Errno = 13884 - ERROR_IPSEC_IKE_MM_EXPIRED syscall.Errno = 13885 - ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID syscall.Errno = 13886 - ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH syscall.Errno = 13887 - ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID syscall.Errno = 13888 - ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD syscall.Errno = 13889 - ERROR_IPSEC_IKE_DOS_COOKIE_SENT syscall.Errno = 13890 - ERROR_IPSEC_IKE_SHUTTING_DOWN syscall.Errno = 13891 - ERROR_IPSEC_IKE_CGA_AUTH_FAILED syscall.Errno = 13892 - ERROR_IPSEC_IKE_PROCESS_ERR_NATOA syscall.Errno = 13893 - ERROR_IPSEC_IKE_INVALID_MM_FOR_QM syscall.Errno = 13894 - ERROR_IPSEC_IKE_QM_EXPIRED syscall.Errno = 13895 - ERROR_IPSEC_IKE_TOO_MANY_FILTERS syscall.Errno = 13896 - ERROR_IPSEC_IKE_NEG_STATUS_END syscall.Errno = 13897 - ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL syscall.Errno = 13898 - ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE syscall.Errno = 13899 - ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING syscall.Errno = 13900 - ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING syscall.Errno = 13901 - ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS syscall.Errno = 13902 - ERROR_IPSEC_IKE_RATELIMIT_DROP syscall.Errno = 13903 - ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE syscall.Errno = 13904 - ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE syscall.Errno = 13905 - ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE syscall.Errno = 13906 - ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY syscall.Errno = 13907 - ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE syscall.Errno = 13908 - ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END syscall.Errno = 13909 - ERROR_IPSEC_BAD_SPI syscall.Errno = 13910 - ERROR_IPSEC_SA_LIFETIME_EXPIRED syscall.Errno = 13911 - ERROR_IPSEC_WRONG_SA syscall.Errno = 13912 - ERROR_IPSEC_REPLAY_CHECK_FAILED syscall.Errno = 13913 - ERROR_IPSEC_INVALID_PACKET syscall.Errno = 13914 - ERROR_IPSEC_INTEGRITY_CHECK_FAILED syscall.Errno = 13915 - ERROR_IPSEC_CLEAR_TEXT_DROP syscall.Errno = 13916 - ERROR_IPSEC_AUTH_FIREWALL_DROP syscall.Errno = 13917 - ERROR_IPSEC_THROTTLE_DROP syscall.Errno = 13918 - ERROR_IPSEC_DOSP_BLOCK syscall.Errno = 13925 - ERROR_IPSEC_DOSP_RECEIVED_MULTICAST syscall.Errno = 13926 - ERROR_IPSEC_DOSP_INVALID_PACKET syscall.Errno = 13927 - ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED syscall.Errno = 13928 - ERROR_IPSEC_DOSP_MAX_ENTRIES syscall.Errno = 13929 - ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED syscall.Errno = 13930 - ERROR_IPSEC_DOSP_NOT_INSTALLED syscall.Errno = 13931 - ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES syscall.Errno = 13932 - ERROR_SXS_SECTION_NOT_FOUND syscall.Errno = 14000 - ERROR_SXS_CANT_GEN_ACTCTX syscall.Errno = 14001 - ERROR_SXS_INVALID_ACTCTXDATA_FORMAT syscall.Errno = 14002 - ERROR_SXS_ASSEMBLY_NOT_FOUND syscall.Errno = 14003 - ERROR_SXS_MANIFEST_FORMAT_ERROR syscall.Errno = 14004 - ERROR_SXS_MANIFEST_PARSE_ERROR syscall.Errno = 14005 - ERROR_SXS_ACTIVATION_CONTEXT_DISABLED syscall.Errno = 14006 - ERROR_SXS_KEY_NOT_FOUND syscall.Errno = 14007 - ERROR_SXS_VERSION_CONFLICT syscall.Errno = 14008 - ERROR_SXS_WRONG_SECTION_TYPE syscall.Errno = 14009 - ERROR_SXS_THREAD_QUERIES_DISABLED syscall.Errno = 14010 - ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET syscall.Errno = 14011 - ERROR_SXS_UNKNOWN_ENCODING_GROUP syscall.Errno = 14012 - ERROR_SXS_UNKNOWN_ENCODING syscall.Errno = 14013 - ERROR_SXS_INVALID_XML_NAMESPACE_URI syscall.Errno = 14014 - ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14015 - ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED syscall.Errno = 14016 - ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14017 - ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14018 - ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE syscall.Errno = 14019 - ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT syscall.Errno = 14020 - ERROR_SXS_DUPLICATE_DLL_NAME syscall.Errno = 14021 - ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME syscall.Errno = 14022 - ERROR_SXS_DUPLICATE_CLSID syscall.Errno = 14023 - ERROR_SXS_DUPLICATE_IID syscall.Errno = 14024 - ERROR_SXS_DUPLICATE_TLBID syscall.Errno = 14025 - ERROR_SXS_DUPLICATE_PROGID syscall.Errno = 14026 - ERROR_SXS_DUPLICATE_ASSEMBLY_NAME syscall.Errno = 14027 - ERROR_SXS_FILE_HASH_MISMATCH syscall.Errno = 14028 - ERROR_SXS_POLICY_PARSE_ERROR syscall.Errno = 14029 - ERROR_SXS_XML_E_MISSINGQUOTE syscall.Errno = 14030 - ERROR_SXS_XML_E_COMMENTSYNTAX syscall.Errno = 14031 - ERROR_SXS_XML_E_BADSTARTNAMECHAR syscall.Errno = 14032 - ERROR_SXS_XML_E_BADNAMECHAR syscall.Errno = 14033 - ERROR_SXS_XML_E_BADCHARINSTRING syscall.Errno = 14034 - ERROR_SXS_XML_E_XMLDECLSYNTAX syscall.Errno = 14035 - ERROR_SXS_XML_E_BADCHARDATA syscall.Errno = 14036 - ERROR_SXS_XML_E_MISSINGWHITESPACE syscall.Errno = 14037 - ERROR_SXS_XML_E_EXPECTINGTAGEND syscall.Errno = 14038 - ERROR_SXS_XML_E_MISSINGSEMICOLON syscall.Errno = 14039 - ERROR_SXS_XML_E_UNBALANCEDPAREN syscall.Errno = 14040 - ERROR_SXS_XML_E_INTERNALERROR syscall.Errno = 14041 - ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE syscall.Errno = 14042 - ERROR_SXS_XML_E_INCOMPLETE_ENCODING syscall.Errno = 14043 - ERROR_SXS_XML_E_MISSING_PAREN syscall.Errno = 14044 - ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE syscall.Errno = 14045 - ERROR_SXS_XML_E_MULTIPLE_COLONS syscall.Errno = 14046 - ERROR_SXS_XML_E_INVALID_DECIMAL syscall.Errno = 14047 - ERROR_SXS_XML_E_INVALID_HEXIDECIMAL syscall.Errno = 14048 - ERROR_SXS_XML_E_INVALID_UNICODE syscall.Errno = 14049 - ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK syscall.Errno = 14050 - ERROR_SXS_XML_E_UNEXPECTEDENDTAG syscall.Errno = 14051 - ERROR_SXS_XML_E_UNCLOSEDTAG syscall.Errno = 14052 - ERROR_SXS_XML_E_DUPLICATEATTRIBUTE syscall.Errno = 14053 - ERROR_SXS_XML_E_MULTIPLEROOTS syscall.Errno = 14054 - ERROR_SXS_XML_E_INVALIDATROOTLEVEL syscall.Errno = 14055 - ERROR_SXS_XML_E_BADXMLDECL syscall.Errno = 14056 - ERROR_SXS_XML_E_MISSINGROOT syscall.Errno = 14057 - ERROR_SXS_XML_E_UNEXPECTEDEOF syscall.Errno = 14058 - ERROR_SXS_XML_E_BADPEREFINSUBSET syscall.Errno = 14059 - ERROR_SXS_XML_E_UNCLOSEDSTARTTAG syscall.Errno = 14060 - ERROR_SXS_XML_E_UNCLOSEDENDTAG syscall.Errno = 14061 - ERROR_SXS_XML_E_UNCLOSEDSTRING syscall.Errno = 14062 - ERROR_SXS_XML_E_UNCLOSEDCOMMENT syscall.Errno = 14063 - ERROR_SXS_XML_E_UNCLOSEDDECL syscall.Errno = 14064 - ERROR_SXS_XML_E_UNCLOSEDCDATA syscall.Errno = 14065 - ERROR_SXS_XML_E_RESERVEDNAMESPACE syscall.Errno = 14066 - ERROR_SXS_XML_E_INVALIDENCODING syscall.Errno = 14067 - ERROR_SXS_XML_E_INVALIDSWITCH syscall.Errno = 14068 - ERROR_SXS_XML_E_BADXMLCASE syscall.Errno = 14069 - ERROR_SXS_XML_E_INVALID_STANDALONE syscall.Errno = 14070 - ERROR_SXS_XML_E_UNEXPECTED_STANDALONE syscall.Errno = 14071 - ERROR_SXS_XML_E_INVALID_VERSION syscall.Errno = 14072 - ERROR_SXS_XML_E_MISSINGEQUALS syscall.Errno = 14073 - ERROR_SXS_PROTECTION_RECOVERY_FAILED syscall.Errno = 14074 - ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT syscall.Errno = 14075 - ERROR_SXS_PROTECTION_CATALOG_NOT_VALID syscall.Errno = 14076 - ERROR_SXS_UNTRANSLATABLE_HRESULT syscall.Errno = 14077 - ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING syscall.Errno = 14078 - ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE syscall.Errno = 14079 - ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14080 - ERROR_SXS_ASSEMBLY_MISSING syscall.Errno = 14081 - ERROR_SXS_CORRUPT_ACTIVATION_STACK syscall.Errno = 14082 - ERROR_SXS_CORRUPTION syscall.Errno = 14083 - ERROR_SXS_EARLY_DEACTIVATION syscall.Errno = 14084 - ERROR_SXS_INVALID_DEACTIVATION syscall.Errno = 14085 - ERROR_SXS_MULTIPLE_DEACTIVATION syscall.Errno = 14086 - ERROR_SXS_PROCESS_TERMINATION_REQUESTED syscall.Errno = 14087 - ERROR_SXS_RELEASE_ACTIVATION_CONTEXT syscall.Errno = 14088 - ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY syscall.Errno = 14089 - ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE syscall.Errno = 14090 - ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME syscall.Errno = 14091 - ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE syscall.Errno = 14092 - ERROR_SXS_IDENTITY_PARSE_ERROR syscall.Errno = 14093 - ERROR_MALFORMED_SUBSTITUTION_STRING syscall.Errno = 14094 - ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN syscall.Errno = 14095 - ERROR_UNMAPPED_SUBSTITUTION_STRING syscall.Errno = 14096 - ERROR_SXS_ASSEMBLY_NOT_LOCKED syscall.Errno = 14097 - ERROR_SXS_COMPONENT_STORE_CORRUPT syscall.Errno = 14098 - ERROR_ADVANCED_INSTALLER_FAILED syscall.Errno = 14099 - ERROR_XML_ENCODING_MISMATCH syscall.Errno = 14100 - ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT syscall.Errno = 14101 - ERROR_SXS_IDENTITIES_DIFFERENT syscall.Errno = 14102 - ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT syscall.Errno = 14103 - ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY syscall.Errno = 14104 - ERROR_SXS_MANIFEST_TOO_BIG syscall.Errno = 14105 - ERROR_SXS_SETTING_NOT_REGISTERED syscall.Errno = 14106 - ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE syscall.Errno = 14107 - ERROR_SMI_PRIMITIVE_INSTALLER_FAILED syscall.Errno = 14108 - ERROR_GENERIC_COMMAND_FAILED syscall.Errno = 14109 - ERROR_SXS_FILE_HASH_MISSING syscall.Errno = 14110 - ERROR_EVT_INVALID_CHANNEL_PATH syscall.Errno = 15000 - ERROR_EVT_INVALID_QUERY syscall.Errno = 15001 - ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND syscall.Errno = 15002 - ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND syscall.Errno = 15003 - ERROR_EVT_INVALID_PUBLISHER_NAME syscall.Errno = 15004 - ERROR_EVT_INVALID_EVENT_DATA syscall.Errno = 15005 - ERROR_EVT_CHANNEL_NOT_FOUND syscall.Errno = 15007 - ERROR_EVT_MALFORMED_XML_TEXT syscall.Errno = 15008 - ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL syscall.Errno = 15009 - ERROR_EVT_CONFIGURATION_ERROR syscall.Errno = 15010 - ERROR_EVT_QUERY_RESULT_STALE syscall.Errno = 15011 - ERROR_EVT_QUERY_RESULT_INVALID_POSITION syscall.Errno = 15012 - ERROR_EVT_NON_VALIDATING_MSXML syscall.Errno = 15013 - ERROR_EVT_FILTER_ALREADYSCOPED syscall.Errno = 15014 - ERROR_EVT_FILTER_NOTELTSET syscall.Errno = 15015 - ERROR_EVT_FILTER_INVARG syscall.Errno = 15016 - ERROR_EVT_FILTER_INVTEST syscall.Errno = 15017 - ERROR_EVT_FILTER_INVTYPE syscall.Errno = 15018 - ERROR_EVT_FILTER_PARSEERR syscall.Errno = 15019 - ERROR_EVT_FILTER_UNSUPPORTEDOP syscall.Errno = 15020 - ERROR_EVT_FILTER_UNEXPECTEDTOKEN syscall.Errno = 15021 - ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL syscall.Errno = 15022 - ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE syscall.Errno = 15023 - ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE syscall.Errno = 15024 - ERROR_EVT_CHANNEL_CANNOT_ACTIVATE syscall.Errno = 15025 - ERROR_EVT_FILTER_TOO_COMPLEX syscall.Errno = 15026 - ERROR_EVT_MESSAGE_NOT_FOUND syscall.Errno = 15027 - ERROR_EVT_MESSAGE_ID_NOT_FOUND syscall.Errno = 15028 - ERROR_EVT_UNRESOLVED_VALUE_INSERT syscall.Errno = 15029 - ERROR_EVT_UNRESOLVED_PARAMETER_INSERT syscall.Errno = 15030 - ERROR_EVT_MAX_INSERTS_REACHED syscall.Errno = 15031 - ERROR_EVT_EVENT_DEFINITION_NOT_FOUND syscall.Errno = 15032 - ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND syscall.Errno = 15033 - ERROR_EVT_VERSION_TOO_OLD syscall.Errno = 15034 - ERROR_EVT_VERSION_TOO_NEW syscall.Errno = 15035 - ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY syscall.Errno = 15036 - ERROR_EVT_PUBLISHER_DISABLED syscall.Errno = 15037 - ERROR_EVT_FILTER_OUT_OF_RANGE syscall.Errno = 15038 - ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE syscall.Errno = 15080 - ERROR_EC_LOG_DISABLED syscall.Errno = 15081 - ERROR_EC_CIRCULAR_FORWARDING syscall.Errno = 15082 - ERROR_EC_CREDSTORE_FULL syscall.Errno = 15083 - ERROR_EC_CRED_NOT_FOUND syscall.Errno = 15084 - ERROR_EC_NO_ACTIVE_CHANNEL syscall.Errno = 15085 - ERROR_MUI_FILE_NOT_FOUND syscall.Errno = 15100 - ERROR_MUI_INVALID_FILE syscall.Errno = 15101 - ERROR_MUI_INVALID_RC_CONFIG syscall.Errno = 15102 - ERROR_MUI_INVALID_LOCALE_NAME syscall.Errno = 15103 - ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME syscall.Errno = 15104 - ERROR_MUI_FILE_NOT_LOADED syscall.Errno = 15105 - ERROR_RESOURCE_ENUM_USER_STOP syscall.Errno = 15106 - ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED syscall.Errno = 15107 - ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME syscall.Errno = 15108 - ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE syscall.Errno = 15110 - ERROR_MRM_INVALID_PRICONFIG syscall.Errno = 15111 - ERROR_MRM_INVALID_FILE_TYPE syscall.Errno = 15112 - ERROR_MRM_UNKNOWN_QUALIFIER syscall.Errno = 15113 - ERROR_MRM_INVALID_QUALIFIER_VALUE syscall.Errno = 15114 - ERROR_MRM_NO_CANDIDATE syscall.Errno = 15115 - ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE syscall.Errno = 15116 - ERROR_MRM_RESOURCE_TYPE_MISMATCH syscall.Errno = 15117 - ERROR_MRM_DUPLICATE_MAP_NAME syscall.Errno = 15118 - ERROR_MRM_DUPLICATE_ENTRY syscall.Errno = 15119 - ERROR_MRM_INVALID_RESOURCE_IDENTIFIER syscall.Errno = 15120 - ERROR_MRM_FILEPATH_TOO_LONG syscall.Errno = 15121 - ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE syscall.Errno = 15122 - ERROR_MRM_INVALID_PRI_FILE syscall.Errno = 15126 - ERROR_MRM_NAMED_RESOURCE_NOT_FOUND syscall.Errno = 15127 - ERROR_MRM_MAP_NOT_FOUND syscall.Errno = 15135 - ERROR_MRM_UNSUPPORTED_PROFILE_TYPE syscall.Errno = 15136 - ERROR_MRM_INVALID_QUALIFIER_OPERATOR syscall.Errno = 15137 - ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE syscall.Errno = 15138 - ERROR_MRM_AUTOMERGE_ENABLED syscall.Errno = 15139 - ERROR_MRM_TOO_MANY_RESOURCES syscall.Errno = 15140 - ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE syscall.Errno = 15141 - ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE syscall.Errno = 15142 - ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD syscall.Errno = 15143 - ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST syscall.Errno = 15144 - ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT syscall.Errno = 15145 - ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE syscall.Errno = 15146 - ERROR_MRM_GENERATION_COUNT_MISMATCH syscall.Errno = 15147 - ERROR_PRI_MERGE_VERSION_MISMATCH syscall.Errno = 15148 - ERROR_PRI_MERGE_MISSING_SCHEMA syscall.Errno = 15149 - ERROR_PRI_MERGE_LOAD_FILE_FAILED syscall.Errno = 15150 - ERROR_PRI_MERGE_ADD_FILE_FAILED syscall.Errno = 15151 - ERROR_PRI_MERGE_WRITE_FILE_FAILED syscall.Errno = 15152 - ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED syscall.Errno = 15153 - ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED syscall.Errno = 15154 - ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED syscall.Errno = 15155 - ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED syscall.Errno = 15156 - ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED syscall.Errno = 15157 - ERROR_PRI_MERGE_INVALID_FILE_NAME syscall.Errno = 15158 - ERROR_MRM_PACKAGE_NOT_FOUND syscall.Errno = 15159 - ERROR_MCA_INVALID_CAPABILITIES_STRING syscall.Errno = 15200 - ERROR_MCA_INVALID_VCP_VERSION syscall.Errno = 15201 - ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION syscall.Errno = 15202 - ERROR_MCA_MCCS_VERSION_MISMATCH syscall.Errno = 15203 - ERROR_MCA_UNSUPPORTED_MCCS_VERSION syscall.Errno = 15204 - ERROR_MCA_INTERNAL_ERROR syscall.Errno = 15205 - ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED syscall.Errno = 15206 - ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE syscall.Errno = 15207 - ERROR_AMBIGUOUS_SYSTEM_DEVICE syscall.Errno = 15250 - ERROR_SYSTEM_DEVICE_NOT_FOUND syscall.Errno = 15299 - ERROR_HASH_NOT_SUPPORTED syscall.Errno = 15300 - ERROR_HASH_NOT_PRESENT syscall.Errno = 15301 - ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED syscall.Errno = 15321 - ERROR_GPIO_CLIENT_INFORMATION_INVALID syscall.Errno = 15322 - ERROR_GPIO_VERSION_NOT_SUPPORTED syscall.Errno = 15323 - ERROR_GPIO_INVALID_REGISTRATION_PACKET syscall.Errno = 15324 - ERROR_GPIO_OPERATION_DENIED syscall.Errno = 15325 - ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE syscall.Errno = 15326 - ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED syscall.Errno = 15327 - ERROR_CANNOT_SWITCH_RUNLEVEL syscall.Errno = 15400 - ERROR_INVALID_RUNLEVEL_SETTING syscall.Errno = 15401 - ERROR_RUNLEVEL_SWITCH_TIMEOUT syscall.Errno = 15402 - ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT syscall.Errno = 15403 - ERROR_RUNLEVEL_SWITCH_IN_PROGRESS syscall.Errno = 15404 - ERROR_SERVICES_FAILED_AUTOSTART syscall.Errno = 15405 - ERROR_COM_TASK_STOP_PENDING syscall.Errno = 15501 - ERROR_INSTALL_OPEN_PACKAGE_FAILED syscall.Errno = 15600 - ERROR_INSTALL_PACKAGE_NOT_FOUND syscall.Errno = 15601 - ERROR_INSTALL_INVALID_PACKAGE syscall.Errno = 15602 - ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED syscall.Errno = 15603 - ERROR_INSTALL_OUT_OF_DISK_SPACE syscall.Errno = 15604 - ERROR_INSTALL_NETWORK_FAILURE syscall.Errno = 15605 - ERROR_INSTALL_REGISTRATION_FAILURE syscall.Errno = 15606 - ERROR_INSTALL_DEREGISTRATION_FAILURE syscall.Errno = 15607 - ERROR_INSTALL_CANCEL syscall.Errno = 15608 - ERROR_INSTALL_FAILED syscall.Errno = 15609 - ERROR_REMOVE_FAILED syscall.Errno = 15610 - ERROR_PACKAGE_ALREADY_EXISTS syscall.Errno = 15611 - ERROR_NEEDS_REMEDIATION syscall.Errno = 15612 - ERROR_INSTALL_PREREQUISITE_FAILED syscall.Errno = 15613 - ERROR_PACKAGE_REPOSITORY_CORRUPTED syscall.Errno = 15614 - ERROR_INSTALL_POLICY_FAILURE syscall.Errno = 15615 - ERROR_PACKAGE_UPDATING syscall.Errno = 15616 - ERROR_DEPLOYMENT_BLOCKED_BY_POLICY syscall.Errno = 15617 - ERROR_PACKAGES_IN_USE syscall.Errno = 15618 - ERROR_RECOVERY_FILE_CORRUPT syscall.Errno = 15619 - ERROR_INVALID_STAGED_SIGNATURE syscall.Errno = 15620 - ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED syscall.Errno = 15621 - ERROR_INSTALL_PACKAGE_DOWNGRADE syscall.Errno = 15622 - ERROR_SYSTEM_NEEDS_REMEDIATION syscall.Errno = 15623 - ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN syscall.Errno = 15624 - ERROR_RESILIENCY_FILE_CORRUPT syscall.Errno = 15625 - ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING syscall.Errno = 15626 - ERROR_PACKAGE_MOVE_FAILED syscall.Errno = 15627 - ERROR_INSTALL_VOLUME_NOT_EMPTY syscall.Errno = 15628 - ERROR_INSTALL_VOLUME_OFFLINE syscall.Errno = 15629 - ERROR_INSTALL_VOLUME_CORRUPT syscall.Errno = 15630 - ERROR_NEEDS_REGISTRATION syscall.Errno = 15631 - ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE syscall.Errno = 15632 - ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED syscall.Errno = 15633 - ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE syscall.Errno = 15634 - ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM syscall.Errno = 15635 - ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING syscall.Errno = 15636 - ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE syscall.Errno = 15637 - ERROR_PACKAGE_STAGING_ONHOLD syscall.Errno = 15638 - ERROR_INSTALL_INVALID_RELATED_SET_UPDATE syscall.Errno = 15639 - ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640 - ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF syscall.Errno = 15641 - ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED syscall.Errno = 15642 - ERROR_PACKAGES_REPUTATION_CHECK_FAILED syscall.Errno = 15643 - ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT syscall.Errno = 15644 - ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED syscall.Errno = 15645 - ERROR_APPINSTALLER_ACTIVATION_BLOCKED syscall.Errno = 15646 - ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED syscall.Errno = 15647 - APPMODEL_ERROR_NO_PACKAGE syscall.Errno = 15700 - APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT syscall.Errno = 15701 - APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT syscall.Errno = 15702 - APPMODEL_ERROR_NO_APPLICATION syscall.Errno = 15703 - APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED syscall.Errno = 15704 - APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID syscall.Errno = 15705 - APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE syscall.Errno = 15706 - ERROR_STATE_LOAD_STORE_FAILED syscall.Errno = 15800 - ERROR_STATE_GET_VERSION_FAILED syscall.Errno = 15801 - ERROR_STATE_SET_VERSION_FAILED syscall.Errno = 15802 - ERROR_STATE_STRUCTURED_RESET_FAILED syscall.Errno = 15803 - ERROR_STATE_OPEN_CONTAINER_FAILED syscall.Errno = 15804 - ERROR_STATE_CREATE_CONTAINER_FAILED syscall.Errno = 15805 - ERROR_STATE_DELETE_CONTAINER_FAILED syscall.Errno = 15806 - ERROR_STATE_READ_SETTING_FAILED syscall.Errno = 15807 - ERROR_STATE_WRITE_SETTING_FAILED syscall.Errno = 15808 - ERROR_STATE_DELETE_SETTING_FAILED syscall.Errno = 15809 - ERROR_STATE_QUERY_SETTING_FAILED syscall.Errno = 15810 - ERROR_STATE_READ_COMPOSITE_SETTING_FAILED syscall.Errno = 15811 - ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED syscall.Errno = 15812 - ERROR_STATE_ENUMERATE_CONTAINER_FAILED syscall.Errno = 15813 - ERROR_STATE_ENUMERATE_SETTINGS_FAILED syscall.Errno = 15814 - ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15815 - ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED syscall.Errno = 15816 - ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15817 - ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED syscall.Errno = 15818 - ERROR_API_UNAVAILABLE syscall.Errno = 15841 - STORE_ERROR_UNLICENSED syscall.Errno = 15861 - STORE_ERROR_UNLICENSED_USER syscall.Errno = 15862 - STORE_ERROR_PENDING_COM_TRANSACTION syscall.Errno = 15863 - STORE_ERROR_LICENSE_REVOKED syscall.Errno = 15864 - SEVERITY_SUCCESS syscall.Errno = 0 - SEVERITY_ERROR syscall.Errno = 1 - FACILITY_NT_BIT = 0x10000000 - E_NOT_SET = ERROR_NOT_FOUND - E_NOT_VALID_STATE = ERROR_INVALID_STATE - E_NOT_SUFFICIENT_BUFFER = ERROR_INSUFFICIENT_BUFFER - E_TIME_CRITICAL_THREAD = ERROR_TIME_CRITICAL_THREAD - NOERROR syscall.Errno = 0 - E_UNEXPECTED Handle = 0x8000FFFF - E_NOTIMPL Handle = 0x80004001 - E_OUTOFMEMORY Handle = 0x8007000E - E_INVALIDARG Handle = 0x80070057 - E_NOINTERFACE Handle = 0x80004002 - E_POINTER Handle = 0x80004003 - E_HANDLE Handle = 0x80070006 - E_ABORT Handle = 0x80004004 - E_FAIL Handle = 0x80004005 - E_ACCESSDENIED Handle = 0x80070005 - E_PENDING Handle = 0x8000000A - E_BOUNDS Handle = 0x8000000B - E_CHANGED_STATE Handle = 0x8000000C - E_ILLEGAL_STATE_CHANGE Handle = 0x8000000D - E_ILLEGAL_METHOD_CALL Handle = 0x8000000E - RO_E_METADATA_NAME_NOT_FOUND Handle = 0x8000000F - RO_E_METADATA_NAME_IS_NAMESPACE Handle = 0x80000010 - RO_E_METADATA_INVALID_TYPE_FORMAT Handle = 0x80000011 - RO_E_INVALID_METADATA_FILE Handle = 0x80000012 - RO_E_CLOSED Handle = 0x80000013 - RO_E_EXCLUSIVE_WRITE Handle = 0x80000014 - RO_E_CHANGE_NOTIFICATION_IN_PROGRESS Handle = 0x80000015 - RO_E_ERROR_STRING_NOT_FOUND Handle = 0x80000016 - E_STRING_NOT_NULL_TERMINATED Handle = 0x80000017 - E_ILLEGAL_DELEGATE_ASSIGNMENT Handle = 0x80000018 - E_ASYNC_OPERATION_NOT_STARTED Handle = 0x80000019 - E_APPLICATION_EXITING Handle = 0x8000001A - E_APPLICATION_VIEW_EXITING Handle = 0x8000001B - RO_E_MUST_BE_AGILE Handle = 0x8000001C - RO_E_UNSUPPORTED_FROM_MTA Handle = 0x8000001D - RO_E_COMMITTED Handle = 0x8000001E - RO_E_BLOCKED_CROSS_ASTA_CALL Handle = 0x8000001F - RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER Handle = 0x80000020 - RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER Handle = 0x80000021 - CO_E_INIT_TLS Handle = 0x80004006 - CO_E_INIT_SHARED_ALLOCATOR Handle = 0x80004007 - CO_E_INIT_MEMORY_ALLOCATOR Handle = 0x80004008 - CO_E_INIT_CLASS_CACHE Handle = 0x80004009 - CO_E_INIT_RPC_CHANNEL Handle = 0x8000400A - CO_E_INIT_TLS_SET_CHANNEL_CONTROL Handle = 0x8000400B - CO_E_INIT_TLS_CHANNEL_CONTROL Handle = 0x8000400C - CO_E_INIT_UNACCEPTED_USER_ALLOCATOR Handle = 0x8000400D - CO_E_INIT_SCM_MUTEX_EXISTS Handle = 0x8000400E - CO_E_INIT_SCM_FILE_MAPPING_EXISTS Handle = 0x8000400F - CO_E_INIT_SCM_MAP_VIEW_OF_FILE Handle = 0x80004010 - CO_E_INIT_SCM_EXEC_FAILURE Handle = 0x80004011 - CO_E_INIT_ONLY_SINGLE_THREADED Handle = 0x80004012 - CO_E_CANT_REMOTE Handle = 0x80004013 - CO_E_BAD_SERVER_NAME Handle = 0x80004014 - CO_E_WRONG_SERVER_IDENTITY Handle = 0x80004015 - CO_E_OLE1DDE_DISABLED Handle = 0x80004016 - CO_E_RUNAS_SYNTAX Handle = 0x80004017 - CO_E_CREATEPROCESS_FAILURE Handle = 0x80004018 - CO_E_RUNAS_CREATEPROCESS_FAILURE Handle = 0x80004019 - CO_E_RUNAS_LOGON_FAILURE Handle = 0x8000401A - CO_E_LAUNCH_PERMSSION_DENIED Handle = 0x8000401B - CO_E_START_SERVICE_FAILURE Handle = 0x8000401C - CO_E_REMOTE_COMMUNICATION_FAILURE Handle = 0x8000401D - CO_E_SERVER_START_TIMEOUT Handle = 0x8000401E - CO_E_CLSREG_INCONSISTENT Handle = 0x8000401F - CO_E_IIDREG_INCONSISTENT Handle = 0x80004020 - CO_E_NOT_SUPPORTED Handle = 0x80004021 - CO_E_RELOAD_DLL Handle = 0x80004022 - CO_E_MSI_ERROR Handle = 0x80004023 - CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT Handle = 0x80004024 - CO_E_SERVER_PAUSED Handle = 0x80004025 - CO_E_SERVER_NOT_PAUSED Handle = 0x80004026 - CO_E_CLASS_DISABLED Handle = 0x80004027 - CO_E_CLRNOTAVAILABLE Handle = 0x80004028 - CO_E_ASYNC_WORK_REJECTED Handle = 0x80004029 - CO_E_SERVER_INIT_TIMEOUT Handle = 0x8000402A - CO_E_NO_SECCTX_IN_ACTIVATE Handle = 0x8000402B - CO_E_TRACKER_CONFIG Handle = 0x80004030 - CO_E_THREADPOOL_CONFIG Handle = 0x80004031 - CO_E_SXS_CONFIG Handle = 0x80004032 - CO_E_MALFORMED_SPN Handle = 0x80004033 - CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN Handle = 0x80004034 - CO_E_PREMATURE_STUB_RUNDOWN Handle = 0x80004035 - S_OK Handle = 0 - S_FALSE Handle = 1 - OLE_E_FIRST Handle = 0x80040000 - OLE_E_LAST Handle = 0x800400FF - OLE_S_FIRST Handle = 0x00040000 - OLE_S_LAST Handle = 0x000400FF - OLE_E_OLEVERB Handle = 0x80040000 - OLE_E_ADVF Handle = 0x80040001 - OLE_E_ENUM_NOMORE Handle = 0x80040002 - OLE_E_ADVISENOTSUPPORTED Handle = 0x80040003 - OLE_E_NOCONNECTION Handle = 0x80040004 - OLE_E_NOTRUNNING Handle = 0x80040005 - OLE_E_NOCACHE Handle = 0x80040006 - OLE_E_BLANK Handle = 0x80040007 - OLE_E_CLASSDIFF Handle = 0x80040008 - OLE_E_CANT_GETMONIKER Handle = 0x80040009 - OLE_E_CANT_BINDTOSOURCE Handle = 0x8004000A - OLE_E_STATIC Handle = 0x8004000B - OLE_E_PROMPTSAVECANCELLED Handle = 0x8004000C - OLE_E_INVALIDRECT Handle = 0x8004000D - OLE_E_WRONGCOMPOBJ Handle = 0x8004000E - OLE_E_INVALIDHWND Handle = 0x8004000F - OLE_E_NOT_INPLACEACTIVE Handle = 0x80040010 - OLE_E_CANTCONVERT Handle = 0x80040011 - OLE_E_NOSTORAGE Handle = 0x80040012 - DV_E_FORMATETC Handle = 0x80040064 - DV_E_DVTARGETDEVICE Handle = 0x80040065 - DV_E_STGMEDIUM Handle = 0x80040066 - DV_E_STATDATA Handle = 0x80040067 - DV_E_LINDEX Handle = 0x80040068 - DV_E_TYMED Handle = 0x80040069 - DV_E_CLIPFORMAT Handle = 0x8004006A - DV_E_DVASPECT Handle = 0x8004006B - DV_E_DVTARGETDEVICE_SIZE Handle = 0x8004006C - DV_E_NOIVIEWOBJECT Handle = 0x8004006D - DRAGDROP_E_FIRST syscall.Errno = 0x80040100 - DRAGDROP_E_LAST syscall.Errno = 0x8004010F - DRAGDROP_S_FIRST syscall.Errno = 0x00040100 - DRAGDROP_S_LAST syscall.Errno = 0x0004010F - DRAGDROP_E_NOTREGISTERED Handle = 0x80040100 - DRAGDROP_E_ALREADYREGISTERED Handle = 0x80040101 - DRAGDROP_E_INVALIDHWND Handle = 0x80040102 - DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED Handle = 0x80040103 - CLASSFACTORY_E_FIRST syscall.Errno = 0x80040110 - CLASSFACTORY_E_LAST syscall.Errno = 0x8004011F - CLASSFACTORY_S_FIRST syscall.Errno = 0x00040110 - CLASSFACTORY_S_LAST syscall.Errno = 0x0004011F - CLASS_E_NOAGGREGATION Handle = 0x80040110 - CLASS_E_CLASSNOTAVAILABLE Handle = 0x80040111 - CLASS_E_NOTLICENSED Handle = 0x80040112 - MARSHAL_E_FIRST syscall.Errno = 0x80040120 - MARSHAL_E_LAST syscall.Errno = 0x8004012F - MARSHAL_S_FIRST syscall.Errno = 0x00040120 - MARSHAL_S_LAST syscall.Errno = 0x0004012F - DATA_E_FIRST syscall.Errno = 0x80040130 - DATA_E_LAST syscall.Errno = 0x8004013F - DATA_S_FIRST syscall.Errno = 0x00040130 - DATA_S_LAST syscall.Errno = 0x0004013F - VIEW_E_FIRST syscall.Errno = 0x80040140 - VIEW_E_LAST syscall.Errno = 0x8004014F - VIEW_S_FIRST syscall.Errno = 0x00040140 - VIEW_S_LAST syscall.Errno = 0x0004014F - VIEW_E_DRAW Handle = 0x80040140 - REGDB_E_FIRST syscall.Errno = 0x80040150 - REGDB_E_LAST syscall.Errno = 0x8004015F - REGDB_S_FIRST syscall.Errno = 0x00040150 - REGDB_S_LAST syscall.Errno = 0x0004015F - REGDB_E_READREGDB Handle = 0x80040150 - REGDB_E_WRITEREGDB Handle = 0x80040151 - REGDB_E_KEYMISSING Handle = 0x80040152 - REGDB_E_INVALIDVALUE Handle = 0x80040153 - REGDB_E_CLASSNOTREG Handle = 0x80040154 - REGDB_E_IIDNOTREG Handle = 0x80040155 - REGDB_E_BADTHREADINGMODEL Handle = 0x80040156 - REGDB_E_PACKAGEPOLICYVIOLATION Handle = 0x80040157 - CAT_E_FIRST syscall.Errno = 0x80040160 - CAT_E_LAST syscall.Errno = 0x80040161 - CAT_E_CATIDNOEXIST Handle = 0x80040160 - CAT_E_NODESCRIPTION Handle = 0x80040161 - CS_E_FIRST syscall.Errno = 0x80040164 - CS_E_LAST syscall.Errno = 0x8004016F - CS_E_PACKAGE_NOTFOUND Handle = 0x80040164 - CS_E_NOT_DELETABLE Handle = 0x80040165 - CS_E_CLASS_NOTFOUND Handle = 0x80040166 - CS_E_INVALID_VERSION Handle = 0x80040167 - CS_E_NO_CLASSSTORE Handle = 0x80040168 - CS_E_OBJECT_NOTFOUND Handle = 0x80040169 - CS_E_OBJECT_ALREADY_EXISTS Handle = 0x8004016A - CS_E_INVALID_PATH Handle = 0x8004016B - CS_E_NETWORK_ERROR Handle = 0x8004016C - CS_E_ADMIN_LIMIT_EXCEEDED Handle = 0x8004016D - CS_E_SCHEMA_MISMATCH Handle = 0x8004016E - CS_E_INTERNAL_ERROR Handle = 0x8004016F - CACHE_E_FIRST syscall.Errno = 0x80040170 - CACHE_E_LAST syscall.Errno = 0x8004017F - CACHE_S_FIRST syscall.Errno = 0x00040170 - CACHE_S_LAST syscall.Errno = 0x0004017F - CACHE_E_NOCACHE_UPDATED Handle = 0x80040170 - OLEOBJ_E_FIRST syscall.Errno = 0x80040180 - OLEOBJ_E_LAST syscall.Errno = 0x8004018F - OLEOBJ_S_FIRST syscall.Errno = 0x00040180 - OLEOBJ_S_LAST syscall.Errno = 0x0004018F - OLEOBJ_E_NOVERBS Handle = 0x80040180 - OLEOBJ_E_INVALIDVERB Handle = 0x80040181 - CLIENTSITE_E_FIRST syscall.Errno = 0x80040190 - CLIENTSITE_E_LAST syscall.Errno = 0x8004019F - CLIENTSITE_S_FIRST syscall.Errno = 0x00040190 - CLIENTSITE_S_LAST syscall.Errno = 0x0004019F - INPLACE_E_NOTUNDOABLE Handle = 0x800401A0 - INPLACE_E_NOTOOLSPACE Handle = 0x800401A1 - INPLACE_E_FIRST syscall.Errno = 0x800401A0 - INPLACE_E_LAST syscall.Errno = 0x800401AF - INPLACE_S_FIRST syscall.Errno = 0x000401A0 - INPLACE_S_LAST syscall.Errno = 0x000401AF - ENUM_E_FIRST syscall.Errno = 0x800401B0 - ENUM_E_LAST syscall.Errno = 0x800401BF - ENUM_S_FIRST syscall.Errno = 0x000401B0 - ENUM_S_LAST syscall.Errno = 0x000401BF - CONVERT10_E_FIRST syscall.Errno = 0x800401C0 - CONVERT10_E_LAST syscall.Errno = 0x800401CF - CONVERT10_S_FIRST syscall.Errno = 0x000401C0 - CONVERT10_S_LAST syscall.Errno = 0x000401CF - CONVERT10_E_OLESTREAM_GET Handle = 0x800401C0 - CONVERT10_E_OLESTREAM_PUT Handle = 0x800401C1 - CONVERT10_E_OLESTREAM_FMT Handle = 0x800401C2 - CONVERT10_E_OLESTREAM_BITMAP_TO_DIB Handle = 0x800401C3 - CONVERT10_E_STG_FMT Handle = 0x800401C4 - CONVERT10_E_STG_NO_STD_STREAM Handle = 0x800401C5 - CONVERT10_E_STG_DIB_TO_BITMAP Handle = 0x800401C6 - CLIPBRD_E_FIRST syscall.Errno = 0x800401D0 - CLIPBRD_E_LAST syscall.Errno = 0x800401DF - CLIPBRD_S_FIRST syscall.Errno = 0x000401D0 - CLIPBRD_S_LAST syscall.Errno = 0x000401DF - CLIPBRD_E_CANT_OPEN Handle = 0x800401D0 - CLIPBRD_E_CANT_EMPTY Handle = 0x800401D1 - CLIPBRD_E_CANT_SET Handle = 0x800401D2 - CLIPBRD_E_BAD_DATA Handle = 0x800401D3 - CLIPBRD_E_CANT_CLOSE Handle = 0x800401D4 - MK_E_FIRST syscall.Errno = 0x800401E0 - MK_E_LAST syscall.Errno = 0x800401EF - MK_S_FIRST syscall.Errno = 0x000401E0 - MK_S_LAST syscall.Errno = 0x000401EF - MK_E_CONNECTMANUALLY Handle = 0x800401E0 - MK_E_EXCEEDEDDEADLINE Handle = 0x800401E1 - MK_E_NEEDGENERIC Handle = 0x800401E2 - MK_E_UNAVAILABLE Handle = 0x800401E3 - MK_E_SYNTAX Handle = 0x800401E4 - MK_E_NOOBJECT Handle = 0x800401E5 - MK_E_INVALIDEXTENSION Handle = 0x800401E6 - MK_E_INTERMEDIATEINTERFACENOTSUPPORTED Handle = 0x800401E7 - MK_E_NOTBINDABLE Handle = 0x800401E8 - MK_E_NOTBOUND Handle = 0x800401E9 - MK_E_CANTOPENFILE Handle = 0x800401EA - MK_E_MUSTBOTHERUSER Handle = 0x800401EB - MK_E_NOINVERSE Handle = 0x800401EC - MK_E_NOSTORAGE Handle = 0x800401ED - MK_E_NOPREFIX Handle = 0x800401EE - MK_E_ENUMERATION_FAILED Handle = 0x800401EF - CO_E_FIRST syscall.Errno = 0x800401F0 - CO_E_LAST syscall.Errno = 0x800401FF - CO_S_FIRST syscall.Errno = 0x000401F0 - CO_S_LAST syscall.Errno = 0x000401FF - CO_E_NOTINITIALIZED Handle = 0x800401F0 - CO_E_ALREADYINITIALIZED Handle = 0x800401F1 - CO_E_CANTDETERMINECLASS Handle = 0x800401F2 - CO_E_CLASSSTRING Handle = 0x800401F3 - CO_E_IIDSTRING Handle = 0x800401F4 - CO_E_APPNOTFOUND Handle = 0x800401F5 - CO_E_APPSINGLEUSE Handle = 0x800401F6 - CO_E_ERRORINAPP Handle = 0x800401F7 - CO_E_DLLNOTFOUND Handle = 0x800401F8 - CO_E_ERRORINDLL Handle = 0x800401F9 - CO_E_WRONGOSFORAPP Handle = 0x800401FA - CO_E_OBJNOTREG Handle = 0x800401FB - CO_E_OBJISREG Handle = 0x800401FC - CO_E_OBJNOTCONNECTED Handle = 0x800401FD - CO_E_APPDIDNTREG Handle = 0x800401FE - CO_E_RELEASED Handle = 0x800401FF - EVENT_E_FIRST syscall.Errno = 0x80040200 - EVENT_E_LAST syscall.Errno = 0x8004021F - EVENT_S_FIRST syscall.Errno = 0x00040200 - EVENT_S_LAST syscall.Errno = 0x0004021F - EVENT_S_SOME_SUBSCRIBERS_FAILED Handle = 0x00040200 - EVENT_E_ALL_SUBSCRIBERS_FAILED Handle = 0x80040201 - EVENT_S_NOSUBSCRIBERS Handle = 0x00040202 - EVENT_E_QUERYSYNTAX Handle = 0x80040203 - EVENT_E_QUERYFIELD Handle = 0x80040204 - EVENT_E_INTERNALEXCEPTION Handle = 0x80040205 - EVENT_E_INTERNALERROR Handle = 0x80040206 - EVENT_E_INVALID_PER_USER_SID Handle = 0x80040207 - EVENT_E_USER_EXCEPTION Handle = 0x80040208 - EVENT_E_TOO_MANY_METHODS Handle = 0x80040209 - EVENT_E_MISSING_EVENTCLASS Handle = 0x8004020A - EVENT_E_NOT_ALL_REMOVED Handle = 0x8004020B - EVENT_E_COMPLUS_NOT_INSTALLED Handle = 0x8004020C - EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT Handle = 0x8004020D - EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT Handle = 0x8004020E - EVENT_E_INVALID_EVENT_CLASS_PARTITION Handle = 0x8004020F - EVENT_E_PER_USER_SID_NOT_LOGGED_ON Handle = 0x80040210 - TPC_E_INVALID_PROPERTY Handle = 0x80040241 - TPC_E_NO_DEFAULT_TABLET Handle = 0x80040212 - TPC_E_UNKNOWN_PROPERTY Handle = 0x8004021B - TPC_E_INVALID_INPUT_RECT Handle = 0x80040219 - TPC_E_INVALID_STROKE Handle = 0x80040222 - TPC_E_INITIALIZE_FAIL Handle = 0x80040223 - TPC_E_NOT_RELEVANT Handle = 0x80040232 - TPC_E_INVALID_PACKET_DESCRIPTION Handle = 0x80040233 - TPC_E_RECOGNIZER_NOT_REGISTERED Handle = 0x80040235 - TPC_E_INVALID_RIGHTS Handle = 0x80040236 - TPC_E_OUT_OF_ORDER_CALL Handle = 0x80040237 - TPC_E_QUEUE_FULL Handle = 0x80040238 - TPC_E_INVALID_CONFIGURATION Handle = 0x80040239 - TPC_E_INVALID_DATA_FROM_RECOGNIZER Handle = 0x8004023A - TPC_S_TRUNCATED Handle = 0x00040252 - TPC_S_INTERRUPTED Handle = 0x00040253 - TPC_S_NO_DATA_TO_PROCESS Handle = 0x00040254 - XACT_E_FIRST syscall.Errno = 0x8004D000 - XACT_E_LAST syscall.Errno = 0x8004D02B - XACT_S_FIRST syscall.Errno = 0x0004D000 - XACT_S_LAST syscall.Errno = 0x0004D010 - XACT_E_ALREADYOTHERSINGLEPHASE Handle = 0x8004D000 - XACT_E_CANTRETAIN Handle = 0x8004D001 - XACT_E_COMMITFAILED Handle = 0x8004D002 - XACT_E_COMMITPREVENTED Handle = 0x8004D003 - XACT_E_HEURISTICABORT Handle = 0x8004D004 - XACT_E_HEURISTICCOMMIT Handle = 0x8004D005 - XACT_E_HEURISTICDAMAGE Handle = 0x8004D006 - XACT_E_HEURISTICDANGER Handle = 0x8004D007 - XACT_E_ISOLATIONLEVEL Handle = 0x8004D008 - XACT_E_NOASYNC Handle = 0x8004D009 - XACT_E_NOENLIST Handle = 0x8004D00A - XACT_E_NOISORETAIN Handle = 0x8004D00B - XACT_E_NORESOURCE Handle = 0x8004D00C - XACT_E_NOTCURRENT Handle = 0x8004D00D - XACT_E_NOTRANSACTION Handle = 0x8004D00E - XACT_E_NOTSUPPORTED Handle = 0x8004D00F - XACT_E_UNKNOWNRMGRID Handle = 0x8004D010 - XACT_E_WRONGSTATE Handle = 0x8004D011 - XACT_E_WRONGUOW Handle = 0x8004D012 - XACT_E_XTIONEXISTS Handle = 0x8004D013 - XACT_E_NOIMPORTOBJECT Handle = 0x8004D014 - XACT_E_INVALIDCOOKIE Handle = 0x8004D015 - XACT_E_INDOUBT Handle = 0x8004D016 - XACT_E_NOTIMEOUT Handle = 0x8004D017 - XACT_E_ALREADYINPROGRESS Handle = 0x8004D018 - XACT_E_ABORTED Handle = 0x8004D019 - XACT_E_LOGFULL Handle = 0x8004D01A - XACT_E_TMNOTAVAILABLE Handle = 0x8004D01B - XACT_E_CONNECTION_DOWN Handle = 0x8004D01C - XACT_E_CONNECTION_DENIED Handle = 0x8004D01D - XACT_E_REENLISTTIMEOUT Handle = 0x8004D01E - XACT_E_TIP_CONNECT_FAILED Handle = 0x8004D01F - XACT_E_TIP_PROTOCOL_ERROR Handle = 0x8004D020 - XACT_E_TIP_PULL_FAILED Handle = 0x8004D021 - XACT_E_DEST_TMNOTAVAILABLE Handle = 0x8004D022 - XACT_E_TIP_DISABLED Handle = 0x8004D023 - XACT_E_NETWORK_TX_DISABLED Handle = 0x8004D024 - XACT_E_PARTNER_NETWORK_TX_DISABLED Handle = 0x8004D025 - XACT_E_XA_TX_DISABLED Handle = 0x8004D026 - XACT_E_UNABLE_TO_READ_DTC_CONFIG Handle = 0x8004D027 - XACT_E_UNABLE_TO_LOAD_DTC_PROXY Handle = 0x8004D028 - XACT_E_ABORTING Handle = 0x8004D029 - XACT_E_PUSH_COMM_FAILURE Handle = 0x8004D02A - XACT_E_PULL_COMM_FAILURE Handle = 0x8004D02B - XACT_E_LU_TX_DISABLED Handle = 0x8004D02C - XACT_E_CLERKNOTFOUND Handle = 0x8004D080 - XACT_E_CLERKEXISTS Handle = 0x8004D081 - XACT_E_RECOVERYINPROGRESS Handle = 0x8004D082 - XACT_E_TRANSACTIONCLOSED Handle = 0x8004D083 - XACT_E_INVALIDLSN Handle = 0x8004D084 - XACT_E_REPLAYREQUEST Handle = 0x8004D085 - XACT_S_ASYNC Handle = 0x0004D000 - XACT_S_DEFECT Handle = 0x0004D001 - XACT_S_READONLY Handle = 0x0004D002 - XACT_S_SOMENORETAIN Handle = 0x0004D003 - XACT_S_OKINFORM Handle = 0x0004D004 - XACT_S_MADECHANGESCONTENT Handle = 0x0004D005 - XACT_S_MADECHANGESINFORM Handle = 0x0004D006 - XACT_S_ALLNORETAIN Handle = 0x0004D007 - XACT_S_ABORTING Handle = 0x0004D008 - XACT_S_SINGLEPHASE Handle = 0x0004D009 - XACT_S_LOCALLY_OK Handle = 0x0004D00A - XACT_S_LASTRESOURCEMANAGER Handle = 0x0004D010 - CONTEXT_E_FIRST syscall.Errno = 0x8004E000 - CONTEXT_E_LAST syscall.Errno = 0x8004E02F - CONTEXT_S_FIRST syscall.Errno = 0x0004E000 - CONTEXT_S_LAST syscall.Errno = 0x0004E02F - CONTEXT_E_ABORTED Handle = 0x8004E002 - CONTEXT_E_ABORTING Handle = 0x8004E003 - CONTEXT_E_NOCONTEXT Handle = 0x8004E004 - CONTEXT_E_WOULD_DEADLOCK Handle = 0x8004E005 - CONTEXT_E_SYNCH_TIMEOUT Handle = 0x8004E006 - CONTEXT_E_OLDREF Handle = 0x8004E007 - CONTEXT_E_ROLENOTFOUND Handle = 0x8004E00C - CONTEXT_E_TMNOTAVAILABLE Handle = 0x8004E00F - CO_E_ACTIVATIONFAILED Handle = 0x8004E021 - CO_E_ACTIVATIONFAILED_EVENTLOGGED Handle = 0x8004E022 - CO_E_ACTIVATIONFAILED_CATALOGERROR Handle = 0x8004E023 - CO_E_ACTIVATIONFAILED_TIMEOUT Handle = 0x8004E024 - CO_E_INITIALIZATIONFAILED Handle = 0x8004E025 - CONTEXT_E_NOJIT Handle = 0x8004E026 - CONTEXT_E_NOTRANSACTION Handle = 0x8004E027 - CO_E_THREADINGMODEL_CHANGED Handle = 0x8004E028 - CO_E_NOIISINTRINSICS Handle = 0x8004E029 - CO_E_NOCOOKIES Handle = 0x8004E02A - CO_E_DBERROR Handle = 0x8004E02B - CO_E_NOTPOOLED Handle = 0x8004E02C - CO_E_NOTCONSTRUCTED Handle = 0x8004E02D - CO_E_NOSYNCHRONIZATION Handle = 0x8004E02E - CO_E_ISOLEVELMISMATCH Handle = 0x8004E02F - CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED Handle = 0x8004E030 - CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED Handle = 0x8004E031 - OLE_S_USEREG Handle = 0x00040000 - OLE_S_STATIC Handle = 0x00040001 - OLE_S_MAC_CLIPFORMAT Handle = 0x00040002 - DRAGDROP_S_DROP Handle = 0x00040100 - DRAGDROP_S_CANCEL Handle = 0x00040101 - DRAGDROP_S_USEDEFAULTCURSORS Handle = 0x00040102 - DATA_S_SAMEFORMATETC Handle = 0x00040130 - VIEW_S_ALREADY_FROZEN Handle = 0x00040140 - CACHE_S_FORMATETC_NOTSUPPORTED Handle = 0x00040170 - CACHE_S_SAMECACHE Handle = 0x00040171 - CACHE_S_SOMECACHES_NOTUPDATED Handle = 0x00040172 - OLEOBJ_S_INVALIDVERB Handle = 0x00040180 - OLEOBJ_S_CANNOT_DOVERB_NOW Handle = 0x00040181 - OLEOBJ_S_INVALIDHWND Handle = 0x00040182 - INPLACE_S_TRUNCATED Handle = 0x000401A0 - CONVERT10_S_NO_PRESENTATION Handle = 0x000401C0 - MK_S_REDUCED_TO_SELF Handle = 0x000401E2 - MK_S_ME Handle = 0x000401E4 - MK_S_HIM Handle = 0x000401E5 - MK_S_US Handle = 0x000401E6 - MK_S_MONIKERALREADYREGISTERED Handle = 0x000401E7 - SCHED_S_TASK_READY Handle = 0x00041300 - SCHED_S_TASK_RUNNING Handle = 0x00041301 - SCHED_S_TASK_DISABLED Handle = 0x00041302 - SCHED_S_TASK_HAS_NOT_RUN Handle = 0x00041303 - SCHED_S_TASK_NO_MORE_RUNS Handle = 0x00041304 - SCHED_S_TASK_NOT_SCHEDULED Handle = 0x00041305 - SCHED_S_TASK_TERMINATED Handle = 0x00041306 - SCHED_S_TASK_NO_VALID_TRIGGERS Handle = 0x00041307 - SCHED_S_EVENT_TRIGGER Handle = 0x00041308 - SCHED_E_TRIGGER_NOT_FOUND Handle = 0x80041309 - SCHED_E_TASK_NOT_READY Handle = 0x8004130A - SCHED_E_TASK_NOT_RUNNING Handle = 0x8004130B - SCHED_E_SERVICE_NOT_INSTALLED Handle = 0x8004130C - SCHED_E_CANNOT_OPEN_TASK Handle = 0x8004130D - SCHED_E_INVALID_TASK Handle = 0x8004130E - SCHED_E_ACCOUNT_INFORMATION_NOT_SET Handle = 0x8004130F - SCHED_E_ACCOUNT_NAME_NOT_FOUND Handle = 0x80041310 - SCHED_E_ACCOUNT_DBASE_CORRUPT Handle = 0x80041311 - SCHED_E_NO_SECURITY_SERVICES Handle = 0x80041312 - SCHED_E_UNKNOWN_OBJECT_VERSION Handle = 0x80041313 - SCHED_E_UNSUPPORTED_ACCOUNT_OPTION Handle = 0x80041314 - SCHED_E_SERVICE_NOT_RUNNING Handle = 0x80041315 - SCHED_E_UNEXPECTEDNODE Handle = 0x80041316 - SCHED_E_NAMESPACE Handle = 0x80041317 - SCHED_E_INVALIDVALUE Handle = 0x80041318 - SCHED_E_MISSINGNODE Handle = 0x80041319 - SCHED_E_MALFORMEDXML Handle = 0x8004131A - SCHED_S_SOME_TRIGGERS_FAILED Handle = 0x0004131B - SCHED_S_BATCH_LOGON_PROBLEM Handle = 0x0004131C - SCHED_E_TOO_MANY_NODES Handle = 0x8004131D - SCHED_E_PAST_END_BOUNDARY Handle = 0x8004131E - SCHED_E_ALREADY_RUNNING Handle = 0x8004131F - SCHED_E_USER_NOT_LOGGED_ON Handle = 0x80041320 - SCHED_E_INVALID_TASK_HASH Handle = 0x80041321 - SCHED_E_SERVICE_NOT_AVAILABLE Handle = 0x80041322 - SCHED_E_SERVICE_TOO_BUSY Handle = 0x80041323 - SCHED_E_TASK_ATTEMPTED Handle = 0x80041324 - SCHED_S_TASK_QUEUED Handle = 0x00041325 - SCHED_E_TASK_DISABLED Handle = 0x80041326 - SCHED_E_TASK_NOT_V1_COMPAT Handle = 0x80041327 - SCHED_E_START_ON_DEMAND Handle = 0x80041328 - SCHED_E_TASK_NOT_UBPM_COMPAT Handle = 0x80041329 - SCHED_E_DEPRECATED_FEATURE_USED Handle = 0x80041330 - CO_E_CLASS_CREATE_FAILED Handle = 0x80080001 - CO_E_SCM_ERROR Handle = 0x80080002 - CO_E_SCM_RPC_FAILURE Handle = 0x80080003 - CO_E_BAD_PATH Handle = 0x80080004 - CO_E_SERVER_EXEC_FAILURE Handle = 0x80080005 - CO_E_OBJSRV_RPC_FAILURE Handle = 0x80080006 - MK_E_NO_NORMALIZED Handle = 0x80080007 - CO_E_SERVER_STOPPING Handle = 0x80080008 - MEM_E_INVALID_ROOT Handle = 0x80080009 - MEM_E_INVALID_LINK Handle = 0x80080010 - MEM_E_INVALID_SIZE Handle = 0x80080011 - CO_S_NOTALLINTERFACES Handle = 0x00080012 - CO_S_MACHINENAMENOTFOUND Handle = 0x00080013 - CO_E_MISSING_DISPLAYNAME Handle = 0x80080015 - CO_E_RUNAS_VALUE_MUST_BE_AAA Handle = 0x80080016 - CO_E_ELEVATION_DISABLED Handle = 0x80080017 - APPX_E_PACKAGING_INTERNAL Handle = 0x80080200 - APPX_E_INTERLEAVING_NOT_ALLOWED Handle = 0x80080201 - APPX_E_RELATIONSHIPS_NOT_ALLOWED Handle = 0x80080202 - APPX_E_MISSING_REQUIRED_FILE Handle = 0x80080203 - APPX_E_INVALID_MANIFEST Handle = 0x80080204 - APPX_E_INVALID_BLOCKMAP Handle = 0x80080205 - APPX_E_CORRUPT_CONTENT Handle = 0x80080206 - APPX_E_BLOCK_HASH_INVALID Handle = 0x80080207 - APPX_E_REQUESTED_RANGE_TOO_LARGE Handle = 0x80080208 - APPX_E_INVALID_SIP_CLIENT_DATA Handle = 0x80080209 - APPX_E_INVALID_KEY_INFO Handle = 0x8008020A - APPX_E_INVALID_CONTENTGROUPMAP Handle = 0x8008020B - APPX_E_INVALID_APPINSTALLER Handle = 0x8008020C - APPX_E_DELTA_BASELINE_VERSION_MISMATCH Handle = 0x8008020D - APPX_E_DELTA_PACKAGE_MISSING_FILE Handle = 0x8008020E - APPX_E_INVALID_DELTA_PACKAGE Handle = 0x8008020F - APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED Handle = 0x80080210 - APPX_E_INVALID_PACKAGING_LAYOUT Handle = 0x80080211 - APPX_E_INVALID_PACKAGESIGNCONFIG Handle = 0x80080212 - APPX_E_RESOURCESPRI_NOT_ALLOWED Handle = 0x80080213 - APPX_E_FILE_COMPRESSION_MISMATCH Handle = 0x80080214 - APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION Handle = 0x80080215 - APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST Handle = 0x80080216 - BT_E_SPURIOUS_ACTIVATION Handle = 0x80080300 - DISP_E_UNKNOWNINTERFACE Handle = 0x80020001 - DISP_E_MEMBERNOTFOUND Handle = 0x80020003 - DISP_E_PARAMNOTFOUND Handle = 0x80020004 - DISP_E_TYPEMISMATCH Handle = 0x80020005 - DISP_E_UNKNOWNNAME Handle = 0x80020006 - DISP_E_NONAMEDARGS Handle = 0x80020007 - DISP_E_BADVARTYPE Handle = 0x80020008 - DISP_E_EXCEPTION Handle = 0x80020009 - DISP_E_OVERFLOW Handle = 0x8002000A - DISP_E_BADINDEX Handle = 0x8002000B - DISP_E_UNKNOWNLCID Handle = 0x8002000C - DISP_E_ARRAYISLOCKED Handle = 0x8002000D - DISP_E_BADPARAMCOUNT Handle = 0x8002000E - DISP_E_PARAMNOTOPTIONAL Handle = 0x8002000F - DISP_E_BADCALLEE Handle = 0x80020010 - DISP_E_NOTACOLLECTION Handle = 0x80020011 - DISP_E_DIVBYZERO Handle = 0x80020012 - DISP_E_BUFFERTOOSMALL Handle = 0x80020013 - TYPE_E_BUFFERTOOSMALL Handle = 0x80028016 - TYPE_E_FIELDNOTFOUND Handle = 0x80028017 - TYPE_E_INVDATAREAD Handle = 0x80028018 - TYPE_E_UNSUPFORMAT Handle = 0x80028019 - TYPE_E_REGISTRYACCESS Handle = 0x8002801C - TYPE_E_LIBNOTREGISTERED Handle = 0x8002801D - TYPE_E_UNDEFINEDTYPE Handle = 0x80028027 - TYPE_E_QUALIFIEDNAMEDISALLOWED Handle = 0x80028028 - TYPE_E_INVALIDSTATE Handle = 0x80028029 - TYPE_E_WRONGTYPEKIND Handle = 0x8002802A - TYPE_E_ELEMENTNOTFOUND Handle = 0x8002802B - TYPE_E_AMBIGUOUSNAME Handle = 0x8002802C - TYPE_E_NAMECONFLICT Handle = 0x8002802D - TYPE_E_UNKNOWNLCID Handle = 0x8002802E - TYPE_E_DLLFUNCTIONNOTFOUND Handle = 0x8002802F - TYPE_E_BADMODULEKIND Handle = 0x800288BD - TYPE_E_SIZETOOBIG Handle = 0x800288C5 - TYPE_E_DUPLICATEID Handle = 0x800288C6 - TYPE_E_INVALIDID Handle = 0x800288CF - TYPE_E_TYPEMISMATCH Handle = 0x80028CA0 - TYPE_E_OUTOFBOUNDS Handle = 0x80028CA1 - TYPE_E_IOERROR Handle = 0x80028CA2 - TYPE_E_CANTCREATETMPFILE Handle = 0x80028CA3 - TYPE_E_CANTLOADLIBRARY Handle = 0x80029C4A - TYPE_E_INCONSISTENTPROPFUNCS Handle = 0x80029C83 - TYPE_E_CIRCULARTYPE Handle = 0x80029C84 - STG_E_INVALIDFUNCTION Handle = 0x80030001 - STG_E_FILENOTFOUND Handle = 0x80030002 - STG_E_PATHNOTFOUND Handle = 0x80030003 - STG_E_TOOMANYOPENFILES Handle = 0x80030004 - STG_E_ACCESSDENIED Handle = 0x80030005 - STG_E_INVALIDHANDLE Handle = 0x80030006 - STG_E_INSUFFICIENTMEMORY Handle = 0x80030008 - STG_E_INVALIDPOINTER Handle = 0x80030009 - STG_E_NOMOREFILES Handle = 0x80030012 - STG_E_DISKISWRITEPROTECTED Handle = 0x80030013 - STG_E_SEEKERROR Handle = 0x80030019 - STG_E_WRITEFAULT Handle = 0x8003001D - STG_E_READFAULT Handle = 0x8003001E - STG_E_SHAREVIOLATION Handle = 0x80030020 - STG_E_LOCKVIOLATION Handle = 0x80030021 - STG_E_FILEALREADYEXISTS Handle = 0x80030050 - STG_E_INVALIDPARAMETER Handle = 0x80030057 - STG_E_MEDIUMFULL Handle = 0x80030070 - STG_E_PROPSETMISMATCHED Handle = 0x800300F0 - STG_E_ABNORMALAPIEXIT Handle = 0x800300FA - STG_E_INVALIDHEADER Handle = 0x800300FB - STG_E_INVALIDNAME Handle = 0x800300FC - STG_E_UNKNOWN Handle = 0x800300FD - STG_E_UNIMPLEMENTEDFUNCTION Handle = 0x800300FE - STG_E_INVALIDFLAG Handle = 0x800300FF - STG_E_INUSE Handle = 0x80030100 - STG_E_NOTCURRENT Handle = 0x80030101 - STG_E_REVERTED Handle = 0x80030102 - STG_E_CANTSAVE Handle = 0x80030103 - STG_E_OLDFORMAT Handle = 0x80030104 - STG_E_OLDDLL Handle = 0x80030105 - STG_E_SHAREREQUIRED Handle = 0x80030106 - STG_E_NOTFILEBASEDSTORAGE Handle = 0x80030107 - STG_E_EXTANTMARSHALLINGS Handle = 0x80030108 - STG_E_DOCFILECORRUPT Handle = 0x80030109 - STG_E_BADBASEADDRESS Handle = 0x80030110 - STG_E_DOCFILETOOLARGE Handle = 0x80030111 - STG_E_NOTSIMPLEFORMAT Handle = 0x80030112 - STG_E_INCOMPLETE Handle = 0x80030201 - STG_E_TERMINATED Handle = 0x80030202 - STG_S_CONVERTED Handle = 0x00030200 - STG_S_BLOCK Handle = 0x00030201 - STG_S_RETRYNOW Handle = 0x00030202 - STG_S_MONITORING Handle = 0x00030203 - STG_S_MULTIPLEOPENS Handle = 0x00030204 - STG_S_CONSOLIDATIONFAILED Handle = 0x00030205 - STG_S_CANNOTCONSOLIDATE Handle = 0x00030206 - STG_S_POWER_CYCLE_REQUIRED Handle = 0x00030207 - STG_E_FIRMWARE_SLOT_INVALID Handle = 0x80030208 - STG_E_FIRMWARE_IMAGE_INVALID Handle = 0x80030209 - STG_E_DEVICE_UNRESPONSIVE Handle = 0x8003020A - STG_E_STATUS_COPY_PROTECTION_FAILURE Handle = 0x80030305 - STG_E_CSS_AUTHENTICATION_FAILURE Handle = 0x80030306 - STG_E_CSS_KEY_NOT_PRESENT Handle = 0x80030307 - STG_E_CSS_KEY_NOT_ESTABLISHED Handle = 0x80030308 - STG_E_CSS_SCRAMBLED_SECTOR Handle = 0x80030309 - STG_E_CSS_REGION_MISMATCH Handle = 0x8003030A - STG_E_RESETS_EXHAUSTED Handle = 0x8003030B - RPC_E_CALL_REJECTED Handle = 0x80010001 - RPC_E_CALL_CANCELED Handle = 0x80010002 - RPC_E_CANTPOST_INSENDCALL Handle = 0x80010003 - RPC_E_CANTCALLOUT_INASYNCCALL Handle = 0x80010004 - RPC_E_CANTCALLOUT_INEXTERNALCALL Handle = 0x80010005 - RPC_E_CONNECTION_TERMINATED Handle = 0x80010006 - RPC_E_SERVER_DIED Handle = 0x80010007 - RPC_E_CLIENT_DIED Handle = 0x80010008 - RPC_E_INVALID_DATAPACKET Handle = 0x80010009 - RPC_E_CANTTRANSMIT_CALL Handle = 0x8001000A - RPC_E_CLIENT_CANTMARSHAL_DATA Handle = 0x8001000B - RPC_E_CLIENT_CANTUNMARSHAL_DATA Handle = 0x8001000C - RPC_E_SERVER_CANTMARSHAL_DATA Handle = 0x8001000D - RPC_E_SERVER_CANTUNMARSHAL_DATA Handle = 0x8001000E - RPC_E_INVALID_DATA Handle = 0x8001000F - RPC_E_INVALID_PARAMETER Handle = 0x80010010 - RPC_E_CANTCALLOUT_AGAIN Handle = 0x80010011 - RPC_E_SERVER_DIED_DNE Handle = 0x80010012 - RPC_E_SYS_CALL_FAILED Handle = 0x80010100 - RPC_E_OUT_OF_RESOURCES Handle = 0x80010101 - RPC_E_ATTEMPTED_MULTITHREAD Handle = 0x80010102 - RPC_E_NOT_REGISTERED Handle = 0x80010103 - RPC_E_FAULT Handle = 0x80010104 - RPC_E_SERVERFAULT Handle = 0x80010105 - RPC_E_CHANGED_MODE Handle = 0x80010106 - RPC_E_INVALIDMETHOD Handle = 0x80010107 - RPC_E_DISCONNECTED Handle = 0x80010108 - RPC_E_RETRY Handle = 0x80010109 - RPC_E_SERVERCALL_RETRYLATER Handle = 0x8001010A - RPC_E_SERVERCALL_REJECTED Handle = 0x8001010B - RPC_E_INVALID_CALLDATA Handle = 0x8001010C - RPC_E_CANTCALLOUT_ININPUTSYNCCALL Handle = 0x8001010D - RPC_E_WRONG_THREAD Handle = 0x8001010E - RPC_E_THREAD_NOT_INIT Handle = 0x8001010F - RPC_E_VERSION_MISMATCH Handle = 0x80010110 - RPC_E_INVALID_HEADER Handle = 0x80010111 - RPC_E_INVALID_EXTENSION Handle = 0x80010112 - RPC_E_INVALID_IPID Handle = 0x80010113 - RPC_E_INVALID_OBJECT Handle = 0x80010114 - RPC_S_CALLPENDING Handle = 0x80010115 - RPC_S_WAITONTIMER Handle = 0x80010116 - RPC_E_CALL_COMPLETE Handle = 0x80010117 - RPC_E_UNSECURE_CALL Handle = 0x80010118 - RPC_E_TOO_LATE Handle = 0x80010119 - RPC_E_NO_GOOD_SECURITY_PACKAGES Handle = 0x8001011A - RPC_E_ACCESS_DENIED Handle = 0x8001011B - RPC_E_REMOTE_DISABLED Handle = 0x8001011C - RPC_E_INVALID_OBJREF Handle = 0x8001011D - RPC_E_NO_CONTEXT Handle = 0x8001011E - RPC_E_TIMEOUT Handle = 0x8001011F - RPC_E_NO_SYNC Handle = 0x80010120 - RPC_E_FULLSIC_REQUIRED Handle = 0x80010121 - RPC_E_INVALID_STD_NAME Handle = 0x80010122 - CO_E_FAILEDTOIMPERSONATE Handle = 0x80010123 - CO_E_FAILEDTOGETSECCTX Handle = 0x80010124 - CO_E_FAILEDTOOPENTHREADTOKEN Handle = 0x80010125 - CO_E_FAILEDTOGETTOKENINFO Handle = 0x80010126 - CO_E_TRUSTEEDOESNTMATCHCLIENT Handle = 0x80010127 - CO_E_FAILEDTOQUERYCLIENTBLANKET Handle = 0x80010128 - CO_E_FAILEDTOSETDACL Handle = 0x80010129 - CO_E_ACCESSCHECKFAILED Handle = 0x8001012A - CO_E_NETACCESSAPIFAILED Handle = 0x8001012B - CO_E_WRONGTRUSTEENAMESYNTAX Handle = 0x8001012C - CO_E_INVALIDSID Handle = 0x8001012D - CO_E_CONVERSIONFAILED Handle = 0x8001012E - CO_E_NOMATCHINGSIDFOUND Handle = 0x8001012F - CO_E_LOOKUPACCSIDFAILED Handle = 0x80010130 - CO_E_NOMATCHINGNAMEFOUND Handle = 0x80010131 - CO_E_LOOKUPACCNAMEFAILED Handle = 0x80010132 - CO_E_SETSERLHNDLFAILED Handle = 0x80010133 - CO_E_FAILEDTOGETWINDIR Handle = 0x80010134 - CO_E_PATHTOOLONG Handle = 0x80010135 - CO_E_FAILEDTOGENUUID Handle = 0x80010136 - CO_E_FAILEDTOCREATEFILE Handle = 0x80010137 - CO_E_FAILEDTOCLOSEHANDLE Handle = 0x80010138 - CO_E_EXCEEDSYSACLLIMIT Handle = 0x80010139 - CO_E_ACESINWRONGORDER Handle = 0x8001013A - CO_E_INCOMPATIBLESTREAMVERSION Handle = 0x8001013B - CO_E_FAILEDTOOPENPROCESSTOKEN Handle = 0x8001013C - CO_E_DECODEFAILED Handle = 0x8001013D - CO_E_ACNOTINITIALIZED Handle = 0x8001013F - CO_E_CANCEL_DISABLED Handle = 0x80010140 - RPC_E_UNEXPECTED Handle = 0x8001FFFF - ERROR_AUDITING_DISABLED Handle = 0xC0090001 - ERROR_ALL_SIDS_FILTERED Handle = 0xC0090002 - ERROR_BIZRULES_NOT_ENABLED Handle = 0xC0090003 - NTE_BAD_UID Handle = 0x80090001 - NTE_BAD_HASH Handle = 0x80090002 - NTE_BAD_KEY Handle = 0x80090003 - NTE_BAD_LEN Handle = 0x80090004 - NTE_BAD_DATA Handle = 0x80090005 - NTE_BAD_SIGNATURE Handle = 0x80090006 - NTE_BAD_VER Handle = 0x80090007 - NTE_BAD_ALGID Handle = 0x80090008 - NTE_BAD_FLAGS Handle = 0x80090009 - NTE_BAD_TYPE Handle = 0x8009000A - NTE_BAD_KEY_STATE Handle = 0x8009000B - NTE_BAD_HASH_STATE Handle = 0x8009000C - NTE_NO_KEY Handle = 0x8009000D - NTE_NO_MEMORY Handle = 0x8009000E - NTE_EXISTS Handle = 0x8009000F - NTE_PERM Handle = 0x80090010 - NTE_NOT_FOUND Handle = 0x80090011 - NTE_DOUBLE_ENCRYPT Handle = 0x80090012 - NTE_BAD_PROVIDER Handle = 0x80090013 - NTE_BAD_PROV_TYPE Handle = 0x80090014 - NTE_BAD_PUBLIC_KEY Handle = 0x80090015 - NTE_BAD_KEYSET Handle = 0x80090016 - NTE_PROV_TYPE_NOT_DEF Handle = 0x80090017 - NTE_PROV_TYPE_ENTRY_BAD Handle = 0x80090018 - NTE_KEYSET_NOT_DEF Handle = 0x80090019 - NTE_KEYSET_ENTRY_BAD Handle = 0x8009001A - NTE_PROV_TYPE_NO_MATCH Handle = 0x8009001B - NTE_SIGNATURE_FILE_BAD Handle = 0x8009001C - NTE_PROVIDER_DLL_FAIL Handle = 0x8009001D - NTE_PROV_DLL_NOT_FOUND Handle = 0x8009001E - NTE_BAD_KEYSET_PARAM Handle = 0x8009001F - NTE_FAIL Handle = 0x80090020 - NTE_SYS_ERR Handle = 0x80090021 - NTE_SILENT_CONTEXT Handle = 0x80090022 - NTE_TOKEN_KEYSET_STORAGE_FULL Handle = 0x80090023 - NTE_TEMPORARY_PROFILE Handle = 0x80090024 - NTE_FIXEDPARAMETER Handle = 0x80090025 - NTE_INVALID_HANDLE Handle = 0x80090026 - NTE_INVALID_PARAMETER Handle = 0x80090027 - NTE_BUFFER_TOO_SMALL Handle = 0x80090028 - NTE_NOT_SUPPORTED Handle = 0x80090029 - NTE_NO_MORE_ITEMS Handle = 0x8009002A - NTE_BUFFERS_OVERLAP Handle = 0x8009002B - NTE_DECRYPTION_FAILURE Handle = 0x8009002C - NTE_INTERNAL_ERROR Handle = 0x8009002D - NTE_UI_REQUIRED Handle = 0x8009002E - NTE_HMAC_NOT_SUPPORTED Handle = 0x8009002F - NTE_DEVICE_NOT_READY Handle = 0x80090030 - NTE_AUTHENTICATION_IGNORED Handle = 0x80090031 - NTE_VALIDATION_FAILED Handle = 0x80090032 - NTE_INCORRECT_PASSWORD Handle = 0x80090033 - NTE_ENCRYPTION_FAILURE Handle = 0x80090034 - NTE_DEVICE_NOT_FOUND Handle = 0x80090035 - NTE_USER_CANCELLED Handle = 0x80090036 - NTE_PASSWORD_CHANGE_REQUIRED Handle = 0x80090037 - NTE_NOT_ACTIVE_CONSOLE Handle = 0x80090038 - SEC_E_INSUFFICIENT_MEMORY Handle = 0x80090300 - SEC_E_INVALID_HANDLE Handle = 0x80090301 - SEC_E_UNSUPPORTED_FUNCTION Handle = 0x80090302 - SEC_E_TARGET_UNKNOWN Handle = 0x80090303 - SEC_E_INTERNAL_ERROR Handle = 0x80090304 - SEC_E_SECPKG_NOT_FOUND Handle = 0x80090305 - SEC_E_NOT_OWNER Handle = 0x80090306 - SEC_E_CANNOT_INSTALL Handle = 0x80090307 - SEC_E_INVALID_TOKEN Handle = 0x80090308 - SEC_E_CANNOT_PACK Handle = 0x80090309 - SEC_E_QOP_NOT_SUPPORTED Handle = 0x8009030A - SEC_E_NO_IMPERSONATION Handle = 0x8009030B - SEC_E_LOGON_DENIED Handle = 0x8009030C - SEC_E_UNKNOWN_CREDENTIALS Handle = 0x8009030D - SEC_E_NO_CREDENTIALS Handle = 0x8009030E - SEC_E_MESSAGE_ALTERED Handle = 0x8009030F - SEC_E_OUT_OF_SEQUENCE Handle = 0x80090310 - SEC_E_NO_AUTHENTICATING_AUTHORITY Handle = 0x80090311 - SEC_I_CONTINUE_NEEDED Handle = 0x00090312 - SEC_I_COMPLETE_NEEDED Handle = 0x00090313 - SEC_I_COMPLETE_AND_CONTINUE Handle = 0x00090314 - SEC_I_LOCAL_LOGON Handle = 0x00090315 - SEC_E_BAD_PKGID Handle = 0x80090316 - SEC_E_CONTEXT_EXPIRED Handle = 0x80090317 - SEC_I_CONTEXT_EXPIRED Handle = 0x00090317 - SEC_E_INCOMPLETE_MESSAGE Handle = 0x80090318 - SEC_E_INCOMPLETE_CREDENTIALS Handle = 0x80090320 - SEC_E_BUFFER_TOO_SMALL Handle = 0x80090321 - SEC_I_INCOMPLETE_CREDENTIALS Handle = 0x00090320 - SEC_I_RENEGOTIATE Handle = 0x00090321 - SEC_E_WRONG_PRINCIPAL Handle = 0x80090322 - SEC_I_NO_LSA_CONTEXT Handle = 0x00090323 - SEC_E_TIME_SKEW Handle = 0x80090324 - SEC_E_UNTRUSTED_ROOT Handle = 0x80090325 - SEC_E_ILLEGAL_MESSAGE Handle = 0x80090326 - SEC_E_CERT_UNKNOWN Handle = 0x80090327 - SEC_E_CERT_EXPIRED Handle = 0x80090328 - SEC_E_ENCRYPT_FAILURE Handle = 0x80090329 - SEC_E_DECRYPT_FAILURE Handle = 0x80090330 - SEC_E_ALGORITHM_MISMATCH Handle = 0x80090331 - SEC_E_SECURITY_QOS_FAILED Handle = 0x80090332 - SEC_E_UNFINISHED_CONTEXT_DELETED Handle = 0x80090333 - SEC_E_NO_TGT_REPLY Handle = 0x80090334 - SEC_E_NO_IP_ADDRESSES Handle = 0x80090335 - SEC_E_WRONG_CREDENTIAL_HANDLE Handle = 0x80090336 - SEC_E_CRYPTO_SYSTEM_INVALID Handle = 0x80090337 - SEC_E_MAX_REFERRALS_EXCEEDED Handle = 0x80090338 - SEC_E_MUST_BE_KDC Handle = 0x80090339 - SEC_E_STRONG_CRYPTO_NOT_SUPPORTED Handle = 0x8009033A - SEC_E_TOO_MANY_PRINCIPALS Handle = 0x8009033B - SEC_E_NO_PA_DATA Handle = 0x8009033C - SEC_E_PKINIT_NAME_MISMATCH Handle = 0x8009033D - SEC_E_SMARTCARD_LOGON_REQUIRED Handle = 0x8009033E - SEC_E_SHUTDOWN_IN_PROGRESS Handle = 0x8009033F - SEC_E_KDC_INVALID_REQUEST Handle = 0x80090340 - SEC_E_KDC_UNABLE_TO_REFER Handle = 0x80090341 - SEC_E_KDC_UNKNOWN_ETYPE Handle = 0x80090342 - SEC_E_UNSUPPORTED_PREAUTH Handle = 0x80090343 - SEC_E_DELEGATION_REQUIRED Handle = 0x80090345 - SEC_E_BAD_BINDINGS Handle = 0x80090346 - SEC_E_MULTIPLE_ACCOUNTS Handle = 0x80090347 - SEC_E_NO_KERB_KEY Handle = 0x80090348 - SEC_E_CERT_WRONG_USAGE Handle = 0x80090349 - SEC_E_DOWNGRADE_DETECTED Handle = 0x80090350 - SEC_E_SMARTCARD_CERT_REVOKED Handle = 0x80090351 - SEC_E_ISSUING_CA_UNTRUSTED Handle = 0x80090352 - SEC_E_REVOCATION_OFFLINE_C Handle = 0x80090353 - SEC_E_PKINIT_CLIENT_FAILURE Handle = 0x80090354 - SEC_E_SMARTCARD_CERT_EXPIRED Handle = 0x80090355 - SEC_E_NO_S4U_PROT_SUPPORT Handle = 0x80090356 - SEC_E_CROSSREALM_DELEGATION_FAILURE Handle = 0x80090357 - SEC_E_REVOCATION_OFFLINE_KDC Handle = 0x80090358 - SEC_E_ISSUING_CA_UNTRUSTED_KDC Handle = 0x80090359 - SEC_E_KDC_CERT_EXPIRED Handle = 0x8009035A - SEC_E_KDC_CERT_REVOKED Handle = 0x8009035B - SEC_I_SIGNATURE_NEEDED Handle = 0x0009035C - SEC_E_INVALID_PARAMETER Handle = 0x8009035D - SEC_E_DELEGATION_POLICY Handle = 0x8009035E - SEC_E_POLICY_NLTM_ONLY Handle = 0x8009035F - SEC_I_NO_RENEGOTIATION Handle = 0x00090360 - SEC_E_NO_CONTEXT Handle = 0x80090361 - SEC_E_PKU2U_CERT_FAILURE Handle = 0x80090362 - SEC_E_MUTUAL_AUTH_FAILED Handle = 0x80090363 - SEC_I_MESSAGE_FRAGMENT Handle = 0x00090364 - SEC_E_ONLY_HTTPS_ALLOWED Handle = 0x80090365 - SEC_I_CONTINUE_NEEDED_MESSAGE_OK Handle = 0x00090366 - SEC_E_APPLICATION_PROTOCOL_MISMATCH Handle = 0x80090367 - SEC_I_ASYNC_CALL_PENDING Handle = 0x00090368 - SEC_E_INVALID_UPN_NAME Handle = 0x80090369 - SEC_E_NO_SPM = SEC_E_INTERNAL_ERROR - SEC_E_NOT_SUPPORTED = SEC_E_UNSUPPORTED_FUNCTION - CRYPT_E_MSG_ERROR Handle = 0x80091001 - CRYPT_E_UNKNOWN_ALGO Handle = 0x80091002 - CRYPT_E_OID_FORMAT Handle = 0x80091003 - CRYPT_E_INVALID_MSG_TYPE Handle = 0x80091004 - CRYPT_E_UNEXPECTED_ENCODING Handle = 0x80091005 - CRYPT_E_AUTH_ATTR_MISSING Handle = 0x80091006 - CRYPT_E_HASH_VALUE Handle = 0x80091007 - CRYPT_E_INVALID_INDEX Handle = 0x80091008 - CRYPT_E_ALREADY_DECRYPTED Handle = 0x80091009 - CRYPT_E_NOT_DECRYPTED Handle = 0x8009100A - CRYPT_E_RECIPIENT_NOT_FOUND Handle = 0x8009100B - CRYPT_E_CONTROL_TYPE Handle = 0x8009100C - CRYPT_E_ISSUER_SERIALNUMBER Handle = 0x8009100D - CRYPT_E_SIGNER_NOT_FOUND Handle = 0x8009100E - CRYPT_E_ATTRIBUTES_MISSING Handle = 0x8009100F - CRYPT_E_STREAM_MSG_NOT_READY Handle = 0x80091010 - CRYPT_E_STREAM_INSUFFICIENT_DATA Handle = 0x80091011 - CRYPT_I_NEW_PROTECTION_REQUIRED Handle = 0x00091012 - CRYPT_E_BAD_LEN Handle = 0x80092001 - CRYPT_E_BAD_ENCODE Handle = 0x80092002 - CRYPT_E_FILE_ERROR Handle = 0x80092003 - CRYPT_E_NOT_FOUND Handle = 0x80092004 - CRYPT_E_EXISTS Handle = 0x80092005 - CRYPT_E_NO_PROVIDER Handle = 0x80092006 - CRYPT_E_SELF_SIGNED Handle = 0x80092007 - CRYPT_E_DELETED_PREV Handle = 0x80092008 - CRYPT_E_NO_MATCH Handle = 0x80092009 - CRYPT_E_UNEXPECTED_MSG_TYPE Handle = 0x8009200A - CRYPT_E_NO_KEY_PROPERTY Handle = 0x8009200B - CRYPT_E_NO_DECRYPT_CERT Handle = 0x8009200C - CRYPT_E_BAD_MSG Handle = 0x8009200D - CRYPT_E_NO_SIGNER Handle = 0x8009200E - CRYPT_E_PENDING_CLOSE Handle = 0x8009200F - CRYPT_E_REVOKED Handle = 0x80092010 - CRYPT_E_NO_REVOCATION_DLL Handle = 0x80092011 - CRYPT_E_NO_REVOCATION_CHECK Handle = 0x80092012 - CRYPT_E_REVOCATION_OFFLINE Handle = 0x80092013 - CRYPT_E_NOT_IN_REVOCATION_DATABASE Handle = 0x80092014 - CRYPT_E_INVALID_NUMERIC_STRING Handle = 0x80092020 - CRYPT_E_INVALID_PRINTABLE_STRING Handle = 0x80092021 - CRYPT_E_INVALID_IA5_STRING Handle = 0x80092022 - CRYPT_E_INVALID_X500_STRING Handle = 0x80092023 - CRYPT_E_NOT_CHAR_STRING Handle = 0x80092024 - CRYPT_E_FILERESIZED Handle = 0x80092025 - CRYPT_E_SECURITY_SETTINGS Handle = 0x80092026 - CRYPT_E_NO_VERIFY_USAGE_DLL Handle = 0x80092027 - CRYPT_E_NO_VERIFY_USAGE_CHECK Handle = 0x80092028 - CRYPT_E_VERIFY_USAGE_OFFLINE Handle = 0x80092029 - CRYPT_E_NOT_IN_CTL Handle = 0x8009202A - CRYPT_E_NO_TRUSTED_SIGNER Handle = 0x8009202B - CRYPT_E_MISSING_PUBKEY_PARA Handle = 0x8009202C - CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND Handle = 0x8009202D - CRYPT_E_OSS_ERROR Handle = 0x80093000 - OSS_MORE_BUF Handle = 0x80093001 - OSS_NEGATIVE_UINTEGER Handle = 0x80093002 - OSS_PDU_RANGE Handle = 0x80093003 - OSS_MORE_INPUT Handle = 0x80093004 - OSS_DATA_ERROR Handle = 0x80093005 - OSS_BAD_ARG Handle = 0x80093006 - OSS_BAD_VERSION Handle = 0x80093007 - OSS_OUT_MEMORY Handle = 0x80093008 - OSS_PDU_MISMATCH Handle = 0x80093009 - OSS_LIMITED Handle = 0x8009300A - OSS_BAD_PTR Handle = 0x8009300B - OSS_BAD_TIME Handle = 0x8009300C - OSS_INDEFINITE_NOT_SUPPORTED Handle = 0x8009300D - OSS_MEM_ERROR Handle = 0x8009300E - OSS_BAD_TABLE Handle = 0x8009300F - OSS_TOO_LONG Handle = 0x80093010 - OSS_CONSTRAINT_VIOLATED Handle = 0x80093011 - OSS_FATAL_ERROR Handle = 0x80093012 - OSS_ACCESS_SERIALIZATION_ERROR Handle = 0x80093013 - OSS_NULL_TBL Handle = 0x80093014 - OSS_NULL_FCN Handle = 0x80093015 - OSS_BAD_ENCRULES Handle = 0x80093016 - OSS_UNAVAIL_ENCRULES Handle = 0x80093017 - OSS_CANT_OPEN_TRACE_WINDOW Handle = 0x80093018 - OSS_UNIMPLEMENTED Handle = 0x80093019 - OSS_OID_DLL_NOT_LINKED Handle = 0x8009301A - OSS_CANT_OPEN_TRACE_FILE Handle = 0x8009301B - OSS_TRACE_FILE_ALREADY_OPEN Handle = 0x8009301C - OSS_TABLE_MISMATCH Handle = 0x8009301D - OSS_TYPE_NOT_SUPPORTED Handle = 0x8009301E - OSS_REAL_DLL_NOT_LINKED Handle = 0x8009301F - OSS_REAL_CODE_NOT_LINKED Handle = 0x80093020 - OSS_OUT_OF_RANGE Handle = 0x80093021 - OSS_COPIER_DLL_NOT_LINKED Handle = 0x80093022 - OSS_CONSTRAINT_DLL_NOT_LINKED Handle = 0x80093023 - OSS_COMPARATOR_DLL_NOT_LINKED Handle = 0x80093024 - OSS_COMPARATOR_CODE_NOT_LINKED Handle = 0x80093025 - OSS_MEM_MGR_DLL_NOT_LINKED Handle = 0x80093026 - OSS_PDV_DLL_NOT_LINKED Handle = 0x80093027 - OSS_PDV_CODE_NOT_LINKED Handle = 0x80093028 - OSS_API_DLL_NOT_LINKED Handle = 0x80093029 - OSS_BERDER_DLL_NOT_LINKED Handle = 0x8009302A - OSS_PER_DLL_NOT_LINKED Handle = 0x8009302B - OSS_OPEN_TYPE_ERROR Handle = 0x8009302C - OSS_MUTEX_NOT_CREATED Handle = 0x8009302D - OSS_CANT_CLOSE_TRACE_FILE Handle = 0x8009302E - CRYPT_E_ASN1_ERROR Handle = 0x80093100 - CRYPT_E_ASN1_INTERNAL Handle = 0x80093101 - CRYPT_E_ASN1_EOD Handle = 0x80093102 - CRYPT_E_ASN1_CORRUPT Handle = 0x80093103 - CRYPT_E_ASN1_LARGE Handle = 0x80093104 - CRYPT_E_ASN1_CONSTRAINT Handle = 0x80093105 - CRYPT_E_ASN1_MEMORY Handle = 0x80093106 - CRYPT_E_ASN1_OVERFLOW Handle = 0x80093107 - CRYPT_E_ASN1_BADPDU Handle = 0x80093108 - CRYPT_E_ASN1_BADARGS Handle = 0x80093109 - CRYPT_E_ASN1_BADREAL Handle = 0x8009310A - CRYPT_E_ASN1_BADTAG Handle = 0x8009310B - CRYPT_E_ASN1_CHOICE Handle = 0x8009310C - CRYPT_E_ASN1_RULE Handle = 0x8009310D - CRYPT_E_ASN1_UTF8 Handle = 0x8009310E - CRYPT_E_ASN1_PDU_TYPE Handle = 0x80093133 - CRYPT_E_ASN1_NYI Handle = 0x80093134 - CRYPT_E_ASN1_EXTENDED Handle = 0x80093201 - CRYPT_E_ASN1_NOEOD Handle = 0x80093202 - CERTSRV_E_BAD_REQUESTSUBJECT Handle = 0x80094001 - CERTSRV_E_NO_REQUEST Handle = 0x80094002 - CERTSRV_E_BAD_REQUESTSTATUS Handle = 0x80094003 - CERTSRV_E_PROPERTY_EMPTY Handle = 0x80094004 - CERTSRV_E_INVALID_CA_CERTIFICATE Handle = 0x80094005 - CERTSRV_E_SERVER_SUSPENDED Handle = 0x80094006 - CERTSRV_E_ENCODING_LENGTH Handle = 0x80094007 - CERTSRV_E_ROLECONFLICT Handle = 0x80094008 - CERTSRV_E_RESTRICTEDOFFICER Handle = 0x80094009 - CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED Handle = 0x8009400A - CERTSRV_E_NO_VALID_KRA Handle = 0x8009400B - CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL Handle = 0x8009400C - CERTSRV_E_NO_CAADMIN_DEFINED Handle = 0x8009400D - CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE Handle = 0x8009400E - CERTSRV_E_NO_DB_SESSIONS Handle = 0x8009400F - CERTSRV_E_ALIGNMENT_FAULT Handle = 0x80094010 - CERTSRV_E_ENROLL_DENIED Handle = 0x80094011 - CERTSRV_E_TEMPLATE_DENIED Handle = 0x80094012 - CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE Handle = 0x80094013 - CERTSRV_E_ADMIN_DENIED_REQUEST Handle = 0x80094014 - CERTSRV_E_NO_POLICY_SERVER Handle = 0x80094015 - CERTSRV_E_WEAK_SIGNATURE_OR_KEY Handle = 0x80094016 - CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED Handle = 0x80094017 - CERTSRV_E_ENCRYPTION_CERT_REQUIRED Handle = 0x80094018 - CERTSRV_E_UNSUPPORTED_CERT_TYPE Handle = 0x80094800 - CERTSRV_E_NO_CERT_TYPE Handle = 0x80094801 - CERTSRV_E_TEMPLATE_CONFLICT Handle = 0x80094802 - CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED Handle = 0x80094803 - CERTSRV_E_ARCHIVED_KEY_REQUIRED Handle = 0x80094804 - CERTSRV_E_SMIME_REQUIRED Handle = 0x80094805 - CERTSRV_E_BAD_RENEWAL_SUBJECT Handle = 0x80094806 - CERTSRV_E_BAD_TEMPLATE_VERSION Handle = 0x80094807 - CERTSRV_E_TEMPLATE_POLICY_REQUIRED Handle = 0x80094808 - CERTSRV_E_SIGNATURE_POLICY_REQUIRED Handle = 0x80094809 - CERTSRV_E_SIGNATURE_COUNT Handle = 0x8009480A - CERTSRV_E_SIGNATURE_REJECTED Handle = 0x8009480B - CERTSRV_E_ISSUANCE_POLICY_REQUIRED Handle = 0x8009480C - CERTSRV_E_SUBJECT_UPN_REQUIRED Handle = 0x8009480D - CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED Handle = 0x8009480E - CERTSRV_E_SUBJECT_DNS_REQUIRED Handle = 0x8009480F - CERTSRV_E_ARCHIVED_KEY_UNEXPECTED Handle = 0x80094810 - CERTSRV_E_KEY_LENGTH Handle = 0x80094811 - CERTSRV_E_SUBJECT_EMAIL_REQUIRED Handle = 0x80094812 - CERTSRV_E_UNKNOWN_CERT_TYPE Handle = 0x80094813 - CERTSRV_E_CERT_TYPE_OVERLAP Handle = 0x80094814 - CERTSRV_E_TOO_MANY_SIGNATURES Handle = 0x80094815 - CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY Handle = 0x80094816 - CERTSRV_E_INVALID_EK Handle = 0x80094817 - CERTSRV_E_INVALID_IDBINDING Handle = 0x80094818 - CERTSRV_E_INVALID_ATTESTATION Handle = 0x80094819 - CERTSRV_E_KEY_ATTESTATION Handle = 0x8009481A - CERTSRV_E_CORRUPT_KEY_ATTESTATION Handle = 0x8009481B - CERTSRV_E_EXPIRED_CHALLENGE Handle = 0x8009481C - CERTSRV_E_INVALID_RESPONSE Handle = 0x8009481D - CERTSRV_E_INVALID_REQUESTID Handle = 0x8009481E - CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH Handle = 0x8009481F - CERTSRV_E_PENDING_CLIENT_RESPONSE Handle = 0x80094820 - XENROLL_E_KEY_NOT_EXPORTABLE Handle = 0x80095000 - XENROLL_E_CANNOT_ADD_ROOT_CERT Handle = 0x80095001 - XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND Handle = 0x80095002 - XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH Handle = 0x80095003 - XENROLL_E_RESPONSE_KA_HASH_MISMATCH Handle = 0x80095004 - XENROLL_E_KEYSPEC_SMIME_MISMATCH Handle = 0x80095005 - TRUST_E_SYSTEM_ERROR Handle = 0x80096001 - TRUST_E_NO_SIGNER_CERT Handle = 0x80096002 - TRUST_E_COUNTER_SIGNER Handle = 0x80096003 - TRUST_E_CERT_SIGNATURE Handle = 0x80096004 - TRUST_E_TIME_STAMP Handle = 0x80096005 - TRUST_E_BAD_DIGEST Handle = 0x80096010 - TRUST_E_MALFORMED_SIGNATURE Handle = 0x80096011 - TRUST_E_BASIC_CONSTRAINTS Handle = 0x80096019 - TRUST_E_FINANCIAL_CRITERIA Handle = 0x8009601E - MSSIPOTF_E_OUTOFMEMRANGE Handle = 0x80097001 - MSSIPOTF_E_CANTGETOBJECT Handle = 0x80097002 - MSSIPOTF_E_NOHEADTABLE Handle = 0x80097003 - MSSIPOTF_E_BAD_MAGICNUMBER Handle = 0x80097004 - MSSIPOTF_E_BAD_OFFSET_TABLE Handle = 0x80097005 - MSSIPOTF_E_TABLE_TAGORDER Handle = 0x80097006 - MSSIPOTF_E_TABLE_LONGWORD Handle = 0x80097007 - MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT Handle = 0x80097008 - MSSIPOTF_E_TABLES_OVERLAP Handle = 0x80097009 - MSSIPOTF_E_TABLE_PADBYTES Handle = 0x8009700A - MSSIPOTF_E_FILETOOSMALL Handle = 0x8009700B - MSSIPOTF_E_TABLE_CHECKSUM Handle = 0x8009700C - MSSIPOTF_E_FILE_CHECKSUM Handle = 0x8009700D - MSSIPOTF_E_FAILED_POLICY Handle = 0x80097010 - MSSIPOTF_E_FAILED_HINTS_CHECK Handle = 0x80097011 - MSSIPOTF_E_NOT_OPENTYPE Handle = 0x80097012 - MSSIPOTF_E_FILE Handle = 0x80097013 - MSSIPOTF_E_CRYPT Handle = 0x80097014 - MSSIPOTF_E_BADVERSION Handle = 0x80097015 - MSSIPOTF_E_DSIG_STRUCTURE Handle = 0x80097016 - MSSIPOTF_E_PCONST_CHECK Handle = 0x80097017 - MSSIPOTF_E_STRUCTURE Handle = 0x80097018 - ERROR_CRED_REQUIRES_CONFIRMATION Handle = 0x80097019 - NTE_OP_OK syscall.Errno = 0 - TRUST_E_PROVIDER_UNKNOWN Handle = 0x800B0001 - TRUST_E_ACTION_UNKNOWN Handle = 0x800B0002 - TRUST_E_SUBJECT_FORM_UNKNOWN Handle = 0x800B0003 - TRUST_E_SUBJECT_NOT_TRUSTED Handle = 0x800B0004 - DIGSIG_E_ENCODE Handle = 0x800B0005 - DIGSIG_E_DECODE Handle = 0x800B0006 - DIGSIG_E_EXTENSIBILITY Handle = 0x800B0007 - DIGSIG_E_CRYPTO Handle = 0x800B0008 - PERSIST_E_SIZEDEFINITE Handle = 0x800B0009 - PERSIST_E_SIZEINDEFINITE Handle = 0x800B000A - PERSIST_E_NOTSELFSIZING Handle = 0x800B000B - TRUST_E_NOSIGNATURE Handle = 0x800B0100 - CERT_E_EXPIRED Handle = 0x800B0101 - CERT_E_VALIDITYPERIODNESTING Handle = 0x800B0102 - CERT_E_ROLE Handle = 0x800B0103 - CERT_E_PATHLENCONST Handle = 0x800B0104 - CERT_E_CRITICAL Handle = 0x800B0105 - CERT_E_PURPOSE Handle = 0x800B0106 - CERT_E_ISSUERCHAINING Handle = 0x800B0107 - CERT_E_MALFORMED Handle = 0x800B0108 - CERT_E_UNTRUSTEDROOT Handle = 0x800B0109 - CERT_E_CHAINING Handle = 0x800B010A - TRUST_E_FAIL Handle = 0x800B010B - CERT_E_REVOKED Handle = 0x800B010C - CERT_E_UNTRUSTEDTESTROOT Handle = 0x800B010D - CERT_E_REVOCATION_FAILURE Handle = 0x800B010E - CERT_E_CN_NO_MATCH Handle = 0x800B010F - CERT_E_WRONG_USAGE Handle = 0x800B0110 - TRUST_E_EXPLICIT_DISTRUST Handle = 0x800B0111 - CERT_E_UNTRUSTEDCA Handle = 0x800B0112 - CERT_E_INVALID_POLICY Handle = 0x800B0113 - CERT_E_INVALID_NAME Handle = 0x800B0114 - SPAPI_E_EXPECTED_SECTION_NAME Handle = 0x800F0000 - SPAPI_E_BAD_SECTION_NAME_LINE Handle = 0x800F0001 - SPAPI_E_SECTION_NAME_TOO_LONG Handle = 0x800F0002 - SPAPI_E_GENERAL_SYNTAX Handle = 0x800F0003 - SPAPI_E_WRONG_INF_STYLE Handle = 0x800F0100 - SPAPI_E_SECTION_NOT_FOUND Handle = 0x800F0101 - SPAPI_E_LINE_NOT_FOUND Handle = 0x800F0102 - SPAPI_E_NO_BACKUP Handle = 0x800F0103 - SPAPI_E_NO_ASSOCIATED_CLASS Handle = 0x800F0200 - SPAPI_E_CLASS_MISMATCH Handle = 0x800F0201 - SPAPI_E_DUPLICATE_FOUND Handle = 0x800F0202 - SPAPI_E_NO_DRIVER_SELECTED Handle = 0x800F0203 - SPAPI_E_KEY_DOES_NOT_EXIST Handle = 0x800F0204 - SPAPI_E_INVALID_DEVINST_NAME Handle = 0x800F0205 - SPAPI_E_INVALID_CLASS Handle = 0x800F0206 - SPAPI_E_DEVINST_ALREADY_EXISTS Handle = 0x800F0207 - SPAPI_E_DEVINFO_NOT_REGISTERED Handle = 0x800F0208 - SPAPI_E_INVALID_REG_PROPERTY Handle = 0x800F0209 - SPAPI_E_NO_INF Handle = 0x800F020A - SPAPI_E_NO_SUCH_DEVINST Handle = 0x800F020B - SPAPI_E_CANT_LOAD_CLASS_ICON Handle = 0x800F020C - SPAPI_E_INVALID_CLASS_INSTALLER Handle = 0x800F020D - SPAPI_E_DI_DO_DEFAULT Handle = 0x800F020E - SPAPI_E_DI_NOFILECOPY Handle = 0x800F020F - SPAPI_E_INVALID_HWPROFILE Handle = 0x800F0210 - SPAPI_E_NO_DEVICE_SELECTED Handle = 0x800F0211 - SPAPI_E_DEVINFO_LIST_LOCKED Handle = 0x800F0212 - SPAPI_E_DEVINFO_DATA_LOCKED Handle = 0x800F0213 - SPAPI_E_DI_BAD_PATH Handle = 0x800F0214 - SPAPI_E_NO_CLASSINSTALL_PARAMS Handle = 0x800F0215 - SPAPI_E_FILEQUEUE_LOCKED Handle = 0x800F0216 - SPAPI_E_BAD_SERVICE_INSTALLSECT Handle = 0x800F0217 - SPAPI_E_NO_CLASS_DRIVER_LIST Handle = 0x800F0218 - SPAPI_E_NO_ASSOCIATED_SERVICE Handle = 0x800F0219 - SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE Handle = 0x800F021A - SPAPI_E_DEVICE_INTERFACE_ACTIVE Handle = 0x800F021B - SPAPI_E_DEVICE_INTERFACE_REMOVED Handle = 0x800F021C - SPAPI_E_BAD_INTERFACE_INSTALLSECT Handle = 0x800F021D - SPAPI_E_NO_SUCH_INTERFACE_CLASS Handle = 0x800F021E - SPAPI_E_INVALID_REFERENCE_STRING Handle = 0x800F021F - SPAPI_E_INVALID_MACHINENAME Handle = 0x800F0220 - SPAPI_E_REMOTE_COMM_FAILURE Handle = 0x800F0221 - SPAPI_E_MACHINE_UNAVAILABLE Handle = 0x800F0222 - SPAPI_E_NO_CONFIGMGR_SERVICES Handle = 0x800F0223 - SPAPI_E_INVALID_PROPPAGE_PROVIDER Handle = 0x800F0224 - SPAPI_E_NO_SUCH_DEVICE_INTERFACE Handle = 0x800F0225 - SPAPI_E_DI_POSTPROCESSING_REQUIRED Handle = 0x800F0226 - SPAPI_E_INVALID_COINSTALLER Handle = 0x800F0227 - SPAPI_E_NO_COMPAT_DRIVERS Handle = 0x800F0228 - SPAPI_E_NO_DEVICE_ICON Handle = 0x800F0229 - SPAPI_E_INVALID_INF_LOGCONFIG Handle = 0x800F022A - SPAPI_E_DI_DONT_INSTALL Handle = 0x800F022B - SPAPI_E_INVALID_FILTER_DRIVER Handle = 0x800F022C - SPAPI_E_NON_WINDOWS_NT_DRIVER Handle = 0x800F022D - SPAPI_E_NON_WINDOWS_DRIVER Handle = 0x800F022E - SPAPI_E_NO_CATALOG_FOR_OEM_INF Handle = 0x800F022F - SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE Handle = 0x800F0230 - SPAPI_E_NOT_DISABLEABLE Handle = 0x800F0231 - SPAPI_E_CANT_REMOVE_DEVINST Handle = 0x800F0232 - SPAPI_E_INVALID_TARGET Handle = 0x800F0233 - SPAPI_E_DRIVER_NONNATIVE Handle = 0x800F0234 - SPAPI_E_IN_WOW64 Handle = 0x800F0235 - SPAPI_E_SET_SYSTEM_RESTORE_POINT Handle = 0x800F0236 - SPAPI_E_INCORRECTLY_COPIED_INF Handle = 0x800F0237 - SPAPI_E_SCE_DISABLED Handle = 0x800F0238 - SPAPI_E_UNKNOWN_EXCEPTION Handle = 0x800F0239 - SPAPI_E_PNP_REGISTRY_ERROR Handle = 0x800F023A - SPAPI_E_REMOTE_REQUEST_UNSUPPORTED Handle = 0x800F023B - SPAPI_E_NOT_AN_INSTALLED_OEM_INF Handle = 0x800F023C - SPAPI_E_INF_IN_USE_BY_DEVICES Handle = 0x800F023D - SPAPI_E_DI_FUNCTION_OBSOLETE Handle = 0x800F023E - SPAPI_E_NO_AUTHENTICODE_CATALOG Handle = 0x800F023F - SPAPI_E_AUTHENTICODE_DISALLOWED Handle = 0x800F0240 - SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER Handle = 0x800F0241 - SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED Handle = 0x800F0242 - SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED Handle = 0x800F0243 - SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH Handle = 0x800F0244 - SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE Handle = 0x800F0245 - SPAPI_E_DEVICE_INSTALLER_NOT_READY Handle = 0x800F0246 - SPAPI_E_DRIVER_STORE_ADD_FAILED Handle = 0x800F0247 - SPAPI_E_DEVICE_INSTALL_BLOCKED Handle = 0x800F0248 - SPAPI_E_DRIVER_INSTALL_BLOCKED Handle = 0x800F0249 - SPAPI_E_WRONG_INF_TYPE Handle = 0x800F024A - SPAPI_E_FILE_HASH_NOT_IN_CATALOG Handle = 0x800F024B - SPAPI_E_DRIVER_STORE_DELETE_FAILED Handle = 0x800F024C - SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW Handle = 0x800F0300 - SPAPI_E_ERROR_NOT_INSTALLED Handle = 0x800F1000 - SCARD_S_SUCCESS = S_OK - SCARD_F_INTERNAL_ERROR Handle = 0x80100001 - SCARD_E_CANCELLED Handle = 0x80100002 - SCARD_E_INVALID_HANDLE Handle = 0x80100003 - SCARD_E_INVALID_PARAMETER Handle = 0x80100004 - SCARD_E_INVALID_TARGET Handle = 0x80100005 - SCARD_E_NO_MEMORY Handle = 0x80100006 - SCARD_F_WAITED_TOO_LONG Handle = 0x80100007 - SCARD_E_INSUFFICIENT_BUFFER Handle = 0x80100008 - SCARD_E_UNKNOWN_READER Handle = 0x80100009 - SCARD_E_TIMEOUT Handle = 0x8010000A - SCARD_E_SHARING_VIOLATION Handle = 0x8010000B - SCARD_E_NO_SMARTCARD Handle = 0x8010000C - SCARD_E_UNKNOWN_CARD Handle = 0x8010000D - SCARD_E_CANT_DISPOSE Handle = 0x8010000E - SCARD_E_PROTO_MISMATCH Handle = 0x8010000F - SCARD_E_NOT_READY Handle = 0x80100010 - SCARD_E_INVALID_VALUE Handle = 0x80100011 - SCARD_E_SYSTEM_CANCELLED Handle = 0x80100012 - SCARD_F_COMM_ERROR Handle = 0x80100013 - SCARD_F_UNKNOWN_ERROR Handle = 0x80100014 - SCARD_E_INVALID_ATR Handle = 0x80100015 - SCARD_E_NOT_TRANSACTED Handle = 0x80100016 - SCARD_E_READER_UNAVAILABLE Handle = 0x80100017 - SCARD_P_SHUTDOWN Handle = 0x80100018 - SCARD_E_PCI_TOO_SMALL Handle = 0x80100019 - SCARD_E_READER_UNSUPPORTED Handle = 0x8010001A - SCARD_E_DUPLICATE_READER Handle = 0x8010001B - SCARD_E_CARD_UNSUPPORTED Handle = 0x8010001C - SCARD_E_NO_SERVICE Handle = 0x8010001D - SCARD_E_SERVICE_STOPPED Handle = 0x8010001E - SCARD_E_UNEXPECTED Handle = 0x8010001F - SCARD_E_ICC_INSTALLATION Handle = 0x80100020 - SCARD_E_ICC_CREATEORDER Handle = 0x80100021 - SCARD_E_UNSUPPORTED_FEATURE Handle = 0x80100022 - SCARD_E_DIR_NOT_FOUND Handle = 0x80100023 - SCARD_E_FILE_NOT_FOUND Handle = 0x80100024 - SCARD_E_NO_DIR Handle = 0x80100025 - SCARD_E_NO_FILE Handle = 0x80100026 - SCARD_E_NO_ACCESS Handle = 0x80100027 - SCARD_E_WRITE_TOO_MANY Handle = 0x80100028 - SCARD_E_BAD_SEEK Handle = 0x80100029 - SCARD_E_INVALID_CHV Handle = 0x8010002A - SCARD_E_UNKNOWN_RES_MNG Handle = 0x8010002B - SCARD_E_NO_SUCH_CERTIFICATE Handle = 0x8010002C - SCARD_E_CERTIFICATE_UNAVAILABLE Handle = 0x8010002D - SCARD_E_NO_READERS_AVAILABLE Handle = 0x8010002E - SCARD_E_COMM_DATA_LOST Handle = 0x8010002F - SCARD_E_NO_KEY_CONTAINER Handle = 0x80100030 - SCARD_E_SERVER_TOO_BUSY Handle = 0x80100031 - SCARD_E_PIN_CACHE_EXPIRED Handle = 0x80100032 - SCARD_E_NO_PIN_CACHE Handle = 0x80100033 - SCARD_E_READ_ONLY_CARD Handle = 0x80100034 - SCARD_W_UNSUPPORTED_CARD Handle = 0x80100065 - SCARD_W_UNRESPONSIVE_CARD Handle = 0x80100066 - SCARD_W_UNPOWERED_CARD Handle = 0x80100067 - SCARD_W_RESET_CARD Handle = 0x80100068 - SCARD_W_REMOVED_CARD Handle = 0x80100069 - SCARD_W_SECURITY_VIOLATION Handle = 0x8010006A - SCARD_W_WRONG_CHV Handle = 0x8010006B - SCARD_W_CHV_BLOCKED Handle = 0x8010006C - SCARD_W_EOF Handle = 0x8010006D - SCARD_W_CANCELLED_BY_USER Handle = 0x8010006E - SCARD_W_CARD_NOT_AUTHENTICATED Handle = 0x8010006F - SCARD_W_CACHE_ITEM_NOT_FOUND Handle = 0x80100070 - SCARD_W_CACHE_ITEM_STALE Handle = 0x80100071 - SCARD_W_CACHE_ITEM_TOO_BIG Handle = 0x80100072 - COMADMIN_E_OBJECTERRORS Handle = 0x80110401 - COMADMIN_E_OBJECTINVALID Handle = 0x80110402 - COMADMIN_E_KEYMISSING Handle = 0x80110403 - COMADMIN_E_ALREADYINSTALLED Handle = 0x80110404 - COMADMIN_E_APP_FILE_WRITEFAIL Handle = 0x80110407 - COMADMIN_E_APP_FILE_READFAIL Handle = 0x80110408 - COMADMIN_E_APP_FILE_VERSION Handle = 0x80110409 - COMADMIN_E_BADPATH Handle = 0x8011040A - COMADMIN_E_APPLICATIONEXISTS Handle = 0x8011040B - COMADMIN_E_ROLEEXISTS Handle = 0x8011040C - COMADMIN_E_CANTCOPYFILE Handle = 0x8011040D - COMADMIN_E_NOUSER Handle = 0x8011040F - COMADMIN_E_INVALIDUSERIDS Handle = 0x80110410 - COMADMIN_E_NOREGISTRYCLSID Handle = 0x80110411 - COMADMIN_E_BADREGISTRYPROGID Handle = 0x80110412 - COMADMIN_E_AUTHENTICATIONLEVEL Handle = 0x80110413 - COMADMIN_E_USERPASSWDNOTVALID Handle = 0x80110414 - COMADMIN_E_CLSIDORIIDMISMATCH Handle = 0x80110418 - COMADMIN_E_REMOTEINTERFACE Handle = 0x80110419 - COMADMIN_E_DLLREGISTERSERVER Handle = 0x8011041A - COMADMIN_E_NOSERVERSHARE Handle = 0x8011041B - COMADMIN_E_DLLLOADFAILED Handle = 0x8011041D - COMADMIN_E_BADREGISTRYLIBID Handle = 0x8011041E - COMADMIN_E_APPDIRNOTFOUND Handle = 0x8011041F - COMADMIN_E_REGISTRARFAILED Handle = 0x80110423 - COMADMIN_E_COMPFILE_DOESNOTEXIST Handle = 0x80110424 - COMADMIN_E_COMPFILE_LOADDLLFAIL Handle = 0x80110425 - COMADMIN_E_COMPFILE_GETCLASSOBJ Handle = 0x80110426 - COMADMIN_E_COMPFILE_CLASSNOTAVAIL Handle = 0x80110427 - COMADMIN_E_COMPFILE_BADTLB Handle = 0x80110428 - COMADMIN_E_COMPFILE_NOTINSTALLABLE Handle = 0x80110429 - COMADMIN_E_NOTCHANGEABLE Handle = 0x8011042A - COMADMIN_E_NOTDELETEABLE Handle = 0x8011042B - COMADMIN_E_SESSION Handle = 0x8011042C - COMADMIN_E_COMP_MOVE_LOCKED Handle = 0x8011042D - COMADMIN_E_COMP_MOVE_BAD_DEST Handle = 0x8011042E - COMADMIN_E_REGISTERTLB Handle = 0x80110430 - COMADMIN_E_SYSTEMAPP Handle = 0x80110433 - COMADMIN_E_COMPFILE_NOREGISTRAR Handle = 0x80110434 - COMADMIN_E_COREQCOMPINSTALLED Handle = 0x80110435 - COMADMIN_E_SERVICENOTINSTALLED Handle = 0x80110436 - COMADMIN_E_PROPERTYSAVEFAILED Handle = 0x80110437 - COMADMIN_E_OBJECTEXISTS Handle = 0x80110438 - COMADMIN_E_COMPONENTEXISTS Handle = 0x80110439 - COMADMIN_E_REGFILE_CORRUPT Handle = 0x8011043B - COMADMIN_E_PROPERTY_OVERFLOW Handle = 0x8011043C - COMADMIN_E_NOTINREGISTRY Handle = 0x8011043E - COMADMIN_E_OBJECTNOTPOOLABLE Handle = 0x8011043F - COMADMIN_E_APPLID_MATCHES_CLSID Handle = 0x80110446 - COMADMIN_E_ROLE_DOES_NOT_EXIST Handle = 0x80110447 - COMADMIN_E_START_APP_NEEDS_COMPONENTS Handle = 0x80110448 - COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM Handle = 0x80110449 - COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY Handle = 0x8011044A - COMADMIN_E_CAN_NOT_START_APP Handle = 0x8011044B - COMADMIN_E_CAN_NOT_EXPORT_SYS_APP Handle = 0x8011044C - COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT Handle = 0x8011044D - COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER Handle = 0x8011044E - COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE Handle = 0x8011044F - COMADMIN_E_BASE_PARTITION_ONLY Handle = 0x80110450 - COMADMIN_E_START_APP_DISABLED Handle = 0x80110451 - COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME Handle = 0x80110457 - COMADMIN_E_CAT_INVALID_PARTITION_NAME Handle = 0x80110458 - COMADMIN_E_CAT_PARTITION_IN_USE Handle = 0x80110459 - COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES Handle = 0x8011045A - COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED Handle = 0x8011045B - COMADMIN_E_AMBIGUOUS_APPLICATION_NAME Handle = 0x8011045C - COMADMIN_E_AMBIGUOUS_PARTITION_NAME Handle = 0x8011045D - COMADMIN_E_REGDB_NOTINITIALIZED Handle = 0x80110472 - COMADMIN_E_REGDB_NOTOPEN Handle = 0x80110473 - COMADMIN_E_REGDB_SYSTEMERR Handle = 0x80110474 - COMADMIN_E_REGDB_ALREADYRUNNING Handle = 0x80110475 - COMADMIN_E_MIG_VERSIONNOTSUPPORTED Handle = 0x80110480 - COMADMIN_E_MIG_SCHEMANOTFOUND Handle = 0x80110481 - COMADMIN_E_CAT_BITNESSMISMATCH Handle = 0x80110482 - COMADMIN_E_CAT_UNACCEPTABLEBITNESS Handle = 0x80110483 - COMADMIN_E_CAT_WRONGAPPBITNESS Handle = 0x80110484 - COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED Handle = 0x80110485 - COMADMIN_E_CAT_SERVERFAULT Handle = 0x80110486 - COMQC_E_APPLICATION_NOT_QUEUED Handle = 0x80110600 - COMQC_E_NO_QUEUEABLE_INTERFACES Handle = 0x80110601 - COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE Handle = 0x80110602 - COMQC_E_NO_IPERSISTSTREAM Handle = 0x80110603 - COMQC_E_BAD_MESSAGE Handle = 0x80110604 - COMQC_E_UNAUTHENTICATED Handle = 0x80110605 - COMQC_E_UNTRUSTED_ENQUEUER Handle = 0x80110606 - MSDTC_E_DUPLICATE_RESOURCE Handle = 0x80110701 - COMADMIN_E_OBJECT_PARENT_MISSING Handle = 0x80110808 - COMADMIN_E_OBJECT_DOES_NOT_EXIST Handle = 0x80110809 - COMADMIN_E_APP_NOT_RUNNING Handle = 0x8011080A - COMADMIN_E_INVALID_PARTITION Handle = 0x8011080B - COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE Handle = 0x8011080D - COMADMIN_E_USER_IN_SET Handle = 0x8011080E - COMADMIN_E_CANTRECYCLELIBRARYAPPS Handle = 0x8011080F - COMADMIN_E_CANTRECYCLESERVICEAPPS Handle = 0x80110811 - COMADMIN_E_PROCESSALREADYRECYCLED Handle = 0x80110812 - COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED Handle = 0x80110813 - COMADMIN_E_CANTMAKEINPROCSERVICE Handle = 0x80110814 - COMADMIN_E_PROGIDINUSEBYCLSID Handle = 0x80110815 - COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET Handle = 0x80110816 - COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED Handle = 0x80110817 - COMADMIN_E_PARTITION_ACCESSDENIED Handle = 0x80110818 - COMADMIN_E_PARTITION_MSI_ONLY Handle = 0x80110819 - COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT Handle = 0x8011081A - COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS Handle = 0x8011081B - COMADMIN_E_COMP_MOVE_SOURCE Handle = 0x8011081C - COMADMIN_E_COMP_MOVE_DEST Handle = 0x8011081D - COMADMIN_E_COMP_MOVE_PRIVATE Handle = 0x8011081E - COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET Handle = 0x8011081F - COMADMIN_E_CANNOT_ALIAS_EVENTCLASS Handle = 0x80110820 - COMADMIN_E_PRIVATE_ACCESSDENIED Handle = 0x80110821 - COMADMIN_E_SAFERINVALID Handle = 0x80110822 - COMADMIN_E_REGISTRY_ACCESSDENIED Handle = 0x80110823 - COMADMIN_E_PARTITIONS_DISABLED Handle = 0x80110824 - WER_S_REPORT_DEBUG Handle = 0x001B0000 - WER_S_REPORT_UPLOADED Handle = 0x001B0001 - WER_S_REPORT_QUEUED Handle = 0x001B0002 - WER_S_DISABLED Handle = 0x001B0003 - WER_S_SUSPENDED_UPLOAD Handle = 0x001B0004 - WER_S_DISABLED_QUEUE Handle = 0x001B0005 - WER_S_DISABLED_ARCHIVE Handle = 0x001B0006 - WER_S_REPORT_ASYNC Handle = 0x001B0007 - WER_S_IGNORE_ASSERT_INSTANCE Handle = 0x001B0008 - WER_S_IGNORE_ALL_ASSERTS Handle = 0x001B0009 - WER_S_ASSERT_CONTINUE Handle = 0x001B000A - WER_S_THROTTLED Handle = 0x001B000B - WER_S_REPORT_UPLOADED_CAB Handle = 0x001B000C - WER_E_CRASH_FAILURE Handle = 0x801B8000 - WER_E_CANCELED Handle = 0x801B8001 - WER_E_NETWORK_FAILURE Handle = 0x801B8002 - WER_E_NOT_INITIALIZED Handle = 0x801B8003 - WER_E_ALREADY_REPORTING Handle = 0x801B8004 - WER_E_DUMP_THROTTLED Handle = 0x801B8005 - WER_E_INSUFFICIENT_CONSENT Handle = 0x801B8006 - WER_E_TOO_HEAVY Handle = 0x801B8007 - ERROR_FLT_IO_COMPLETE Handle = 0x001F0001 - ERROR_FLT_NO_HANDLER_DEFINED Handle = 0x801F0001 - ERROR_FLT_CONTEXT_ALREADY_DEFINED Handle = 0x801F0002 - ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST Handle = 0x801F0003 - ERROR_FLT_DISALLOW_FAST_IO Handle = 0x801F0004 - ERROR_FLT_INVALID_NAME_REQUEST Handle = 0x801F0005 - ERROR_FLT_NOT_SAFE_TO_POST_OPERATION Handle = 0x801F0006 - ERROR_FLT_NOT_INITIALIZED Handle = 0x801F0007 - ERROR_FLT_FILTER_NOT_READY Handle = 0x801F0008 - ERROR_FLT_POST_OPERATION_CLEANUP Handle = 0x801F0009 - ERROR_FLT_INTERNAL_ERROR Handle = 0x801F000A - ERROR_FLT_DELETING_OBJECT Handle = 0x801F000B - ERROR_FLT_MUST_BE_NONPAGED_POOL Handle = 0x801F000C - ERROR_FLT_DUPLICATE_ENTRY Handle = 0x801F000D - ERROR_FLT_CBDQ_DISABLED Handle = 0x801F000E - ERROR_FLT_DO_NOT_ATTACH Handle = 0x801F000F - ERROR_FLT_DO_NOT_DETACH Handle = 0x801F0010 - ERROR_FLT_INSTANCE_ALTITUDE_COLLISION Handle = 0x801F0011 - ERROR_FLT_INSTANCE_NAME_COLLISION Handle = 0x801F0012 - ERROR_FLT_FILTER_NOT_FOUND Handle = 0x801F0013 - ERROR_FLT_VOLUME_NOT_FOUND Handle = 0x801F0014 - ERROR_FLT_INSTANCE_NOT_FOUND Handle = 0x801F0015 - ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND Handle = 0x801F0016 - ERROR_FLT_INVALID_CONTEXT_REGISTRATION Handle = 0x801F0017 - ERROR_FLT_NAME_CACHE_MISS Handle = 0x801F0018 - ERROR_FLT_NO_DEVICE_OBJECT Handle = 0x801F0019 - ERROR_FLT_VOLUME_ALREADY_MOUNTED Handle = 0x801F001A - ERROR_FLT_ALREADY_ENLISTED Handle = 0x801F001B - ERROR_FLT_CONTEXT_ALREADY_LINKED Handle = 0x801F001C - ERROR_FLT_NO_WAITER_FOR_REPLY Handle = 0x801F0020 - ERROR_FLT_REGISTRATION_BUSY Handle = 0x801F0023 - ERROR_HUNG_DISPLAY_DRIVER_THREAD Handle = 0x80260001 - DWM_E_COMPOSITIONDISABLED Handle = 0x80263001 - DWM_E_REMOTING_NOT_SUPPORTED Handle = 0x80263002 - DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x80263003 - DWM_E_NOT_QUEUING_PRESENTS Handle = 0x80263004 - DWM_E_ADAPTER_NOT_FOUND Handle = 0x80263005 - DWM_S_GDI_REDIRECTION_SURFACE Handle = 0x00263005 - DWM_E_TEXTURE_TOO_LARGE Handle = 0x80263007 - DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI Handle = 0x00263008 - ERROR_MONITOR_NO_DESCRIPTOR Handle = 0x00261001 - ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT Handle = 0x00261002 - ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM Handle = 0xC0261003 - ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK Handle = 0xC0261004 - ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED Handle = 0xC0261005 - ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK Handle = 0xC0261006 - ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK Handle = 0xC0261007 - ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA Handle = 0xC0261008 - ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK Handle = 0xC0261009 - ERROR_MONITOR_INVALID_MANUFACTURE_DATE Handle = 0xC026100A - ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER Handle = 0xC0262000 - ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER Handle = 0xC0262001 - ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER Handle = 0xC0262002 - ERROR_GRAPHICS_ADAPTER_WAS_RESET Handle = 0xC0262003 - ERROR_GRAPHICS_INVALID_DRIVER_MODEL Handle = 0xC0262004 - ERROR_GRAPHICS_PRESENT_MODE_CHANGED Handle = 0xC0262005 - ERROR_GRAPHICS_PRESENT_OCCLUDED Handle = 0xC0262006 - ERROR_GRAPHICS_PRESENT_DENIED Handle = 0xC0262007 - ERROR_GRAPHICS_CANNOTCOLORCONVERT Handle = 0xC0262008 - ERROR_GRAPHICS_DRIVER_MISMATCH Handle = 0xC0262009 - ERROR_GRAPHICS_PARTIAL_DATA_POPULATED Handle = 0x4026200A - ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED Handle = 0xC026200B - ERROR_GRAPHICS_PRESENT_UNOCCLUDED Handle = 0xC026200C - ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE Handle = 0xC026200D - ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED Handle = 0xC026200E - ERROR_GRAPHICS_PRESENT_INVALID_WINDOW Handle = 0xC026200F - ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND Handle = 0xC0262010 - ERROR_GRAPHICS_VAIL_STATE_CHANGED Handle = 0xC0262011 - ERROR_GRAPHICS_NO_VIDEO_MEMORY Handle = 0xC0262100 - ERROR_GRAPHICS_CANT_LOCK_MEMORY Handle = 0xC0262101 - ERROR_GRAPHICS_ALLOCATION_BUSY Handle = 0xC0262102 - ERROR_GRAPHICS_TOO_MANY_REFERENCES Handle = 0xC0262103 - ERROR_GRAPHICS_TRY_AGAIN_LATER Handle = 0xC0262104 - ERROR_GRAPHICS_TRY_AGAIN_NOW Handle = 0xC0262105 - ERROR_GRAPHICS_ALLOCATION_INVALID Handle = 0xC0262106 - ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE Handle = 0xC0262107 - ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED Handle = 0xC0262108 - ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION Handle = 0xC0262109 - ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE Handle = 0xC0262110 - ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION Handle = 0xC0262111 - ERROR_GRAPHICS_ALLOCATION_CLOSED Handle = 0xC0262112 - ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE Handle = 0xC0262113 - ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE Handle = 0xC0262114 - ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE Handle = 0xC0262115 - ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST Handle = 0xC0262116 - ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE Handle = 0xC0262200 - ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION Handle = 0x40262201 - ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY Handle = 0xC0262300 - ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED Handle = 0xC0262301 - ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED Handle = 0xC0262302 - ERROR_GRAPHICS_INVALID_VIDPN Handle = 0xC0262303 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE Handle = 0xC0262304 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET Handle = 0xC0262305 - ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED Handle = 0xC0262306 - ERROR_GRAPHICS_MODE_NOT_PINNED Handle = 0x00262307 - ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET Handle = 0xC0262308 - ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET Handle = 0xC0262309 - ERROR_GRAPHICS_INVALID_FREQUENCY Handle = 0xC026230A - ERROR_GRAPHICS_INVALID_ACTIVE_REGION Handle = 0xC026230B - ERROR_GRAPHICS_INVALID_TOTAL_REGION Handle = 0xC026230C - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE Handle = 0xC0262310 - ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE Handle = 0xC0262311 - ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET Handle = 0xC0262312 - ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY Handle = 0xC0262313 - ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET Handle = 0xC0262314 - ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET Handle = 0xC0262315 - ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET Handle = 0xC0262316 - ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET Handle = 0xC0262317 - ERROR_GRAPHICS_TARGET_ALREADY_IN_SET Handle = 0xC0262318 - ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH Handle = 0xC0262319 - ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY Handle = 0xC026231A - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET Handle = 0xC026231B - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE Handle = 0xC026231C - ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET Handle = 0xC026231D - ERROR_GRAPHICS_NO_PREFERRED_MODE Handle = 0x0026231E - ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET Handle = 0xC026231F - ERROR_GRAPHICS_STALE_MODESET Handle = 0xC0262320 - ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET Handle = 0xC0262321 - ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE Handle = 0xC0262322 - ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN Handle = 0xC0262323 - ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE Handle = 0xC0262324 - ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION Handle = 0xC0262325 - ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES Handle = 0xC0262326 - ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY Handle = 0xC0262327 - ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE Handle = 0xC0262328 - ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET Handle = 0xC0262329 - ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET Handle = 0xC026232A - ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR Handle = 0xC026232B - ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET Handle = 0xC026232C - ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET Handle = 0xC026232D - ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE Handle = 0xC026232E - ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE Handle = 0xC026232F - ERROR_GRAPHICS_RESOURCES_NOT_RELATED Handle = 0xC0262330 - ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE Handle = 0xC0262331 - ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE Handle = 0xC0262332 - ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET Handle = 0xC0262333 - ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER Handle = 0xC0262334 - ERROR_GRAPHICS_NO_VIDPNMGR Handle = 0xC0262335 - ERROR_GRAPHICS_NO_ACTIVE_VIDPN Handle = 0xC0262336 - ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY Handle = 0xC0262337 - ERROR_GRAPHICS_MONITOR_NOT_CONNECTED Handle = 0xC0262338 - ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY Handle = 0xC0262339 - ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE Handle = 0xC026233A - ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE Handle = 0xC026233B - ERROR_GRAPHICS_INVALID_STRIDE Handle = 0xC026233C - ERROR_GRAPHICS_INVALID_PIXELFORMAT Handle = 0xC026233D - ERROR_GRAPHICS_INVALID_COLORBASIS Handle = 0xC026233E - ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE Handle = 0xC026233F - ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY Handle = 0xC0262340 - ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT Handle = 0xC0262341 - ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0xC0262342 - ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN Handle = 0xC0262343 - ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL Handle = 0xC0262344 - ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION Handle = 0xC0262345 - ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED Handle = 0xC0262346 - ERROR_GRAPHICS_INVALID_GAMMA_RAMP Handle = 0xC0262347 - ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED Handle = 0xC0262348 - ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED Handle = 0xC0262349 - ERROR_GRAPHICS_MODE_NOT_IN_MODESET Handle = 0xC026234A - ERROR_GRAPHICS_DATASET_IS_EMPTY Handle = 0x0026234B - ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET Handle = 0x0026234C - ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON Handle = 0xC026234D - ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE Handle = 0xC026234E - ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE Handle = 0xC026234F - ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS Handle = 0xC0262350 - ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED Handle = 0x00262351 - ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING Handle = 0xC0262352 - ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED Handle = 0xC0262353 - ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS Handle = 0xC0262354 - ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT Handle = 0xC0262355 - ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM Handle = 0xC0262356 - ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN Handle = 0xC0262357 - ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT Handle = 0xC0262358 - ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED Handle = 0xC0262359 - ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION Handle = 0xC026235A - ERROR_GRAPHICS_INVALID_CLIENT_TYPE Handle = 0xC026235B - ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET Handle = 0xC026235C - ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED Handle = 0xC0262400 - ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED Handle = 0xC0262401 - ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS Handle = 0x4026242F - ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER Handle = 0xC0262430 - ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED Handle = 0xC0262431 - ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED Handle = 0xC0262432 - ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY Handle = 0xC0262433 - ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED Handle = 0xC0262434 - ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON Handle = 0xC0262435 - ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE Handle = 0xC0262436 - ERROR_GRAPHICS_LEADLINK_START_DEFERRED Handle = 0x40262437 - ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER Handle = 0xC0262438 - ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY Handle = 0x40262439 - ERROR_GRAPHICS_START_DEFERRED Handle = 0x4026243A - ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED Handle = 0xC026243B - ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS Handle = 0x4026243C - ERROR_GRAPHICS_OPM_NOT_SUPPORTED Handle = 0xC0262500 - ERROR_GRAPHICS_COPP_NOT_SUPPORTED Handle = 0xC0262501 - ERROR_GRAPHICS_UAB_NOT_SUPPORTED Handle = 0xC0262502 - ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS Handle = 0xC0262503 - ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST Handle = 0xC0262505 - ERROR_GRAPHICS_OPM_INTERNAL_ERROR Handle = 0xC026250B - ERROR_GRAPHICS_OPM_INVALID_HANDLE Handle = 0xC026250C - ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH Handle = 0xC026250E - ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED Handle = 0xC026250F - ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED Handle = 0xC0262510 - ERROR_GRAPHICS_PVP_HFS_FAILED Handle = 0xC0262511 - ERROR_GRAPHICS_OPM_INVALID_SRM Handle = 0xC0262512 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP Handle = 0xC0262513 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP Handle = 0xC0262514 - ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA Handle = 0xC0262515 - ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET Handle = 0xC0262516 - ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH Handle = 0xC0262517 - ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE Handle = 0xC0262518 - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS Handle = 0xC026251A - ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC026251B - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS Handle = 0xC026251C - ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST Handle = 0xC026251D - ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR Handle = 0xC026251E - ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS Handle = 0xC026251F - ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED Handle = 0xC0262520 - ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST Handle = 0xC0262521 - ERROR_GRAPHICS_I2C_NOT_SUPPORTED Handle = 0xC0262580 - ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST Handle = 0xC0262581 - ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA Handle = 0xC0262582 - ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA Handle = 0xC0262583 - ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED Handle = 0xC0262584 - ERROR_GRAPHICS_DDCCI_INVALID_DATA Handle = 0xC0262585 - ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE Handle = 0xC0262586 - ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING Handle = 0xC0262587 - ERROR_GRAPHICS_MCA_INTERNAL_ERROR Handle = 0xC0262588 - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND Handle = 0xC0262589 - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH Handle = 0xC026258A - ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM Handle = 0xC026258B - ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE Handle = 0xC026258C - ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS Handle = 0xC026258D - ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE Handle = 0xC02625D8 - ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION Handle = 0xC02625D9 - ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION Handle = 0xC02625DA - ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH Handle = 0xC02625DB - ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION Handle = 0xC02625DC - ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED Handle = 0xC02625DE - ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE Handle = 0xC02625DF - ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED Handle = 0xC02625E0 - ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME Handle = 0xC02625E1 - ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP Handle = 0xC02625E2 - ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED Handle = 0xC02625E3 - ERROR_GRAPHICS_INVALID_POINTER Handle = 0xC02625E4 - ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE Handle = 0xC02625E5 - ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL Handle = 0xC02625E6 - ERROR_GRAPHICS_INTERNAL_ERROR Handle = 0xC02625E7 - ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS Handle = 0xC02605E8 - NAP_E_INVALID_PACKET Handle = 0x80270001 - NAP_E_MISSING_SOH Handle = 0x80270002 - NAP_E_CONFLICTING_ID Handle = 0x80270003 - NAP_E_NO_CACHED_SOH Handle = 0x80270004 - NAP_E_STILL_BOUND Handle = 0x80270005 - NAP_E_NOT_REGISTERED Handle = 0x80270006 - NAP_E_NOT_INITIALIZED Handle = 0x80270007 - NAP_E_MISMATCHED_ID Handle = 0x80270008 - NAP_E_NOT_PENDING Handle = 0x80270009 - NAP_E_ID_NOT_FOUND Handle = 0x8027000A - NAP_E_MAXSIZE_TOO_SMALL Handle = 0x8027000B - NAP_E_SERVICE_NOT_RUNNING Handle = 0x8027000C - NAP_S_CERT_ALREADY_PRESENT Handle = 0x0027000D - NAP_E_ENTITY_DISABLED Handle = 0x8027000E - NAP_E_NETSH_GROUPPOLICY_ERROR Handle = 0x8027000F - NAP_E_TOO_MANY_CALLS Handle = 0x80270010 - NAP_E_SHV_CONFIG_EXISTED Handle = 0x80270011 - NAP_E_SHV_CONFIG_NOT_FOUND Handle = 0x80270012 - NAP_E_SHV_TIMEOUT Handle = 0x80270013 - TPM_E_ERROR_MASK Handle = 0x80280000 - TPM_E_AUTHFAIL Handle = 0x80280001 - TPM_E_BADINDEX Handle = 0x80280002 - TPM_E_BAD_PARAMETER Handle = 0x80280003 - TPM_E_AUDITFAILURE Handle = 0x80280004 - TPM_E_CLEAR_DISABLED Handle = 0x80280005 - TPM_E_DEACTIVATED Handle = 0x80280006 - TPM_E_DISABLED Handle = 0x80280007 - TPM_E_DISABLED_CMD Handle = 0x80280008 - TPM_E_FAIL Handle = 0x80280009 - TPM_E_BAD_ORDINAL Handle = 0x8028000A - TPM_E_INSTALL_DISABLED Handle = 0x8028000B - TPM_E_INVALID_KEYHANDLE Handle = 0x8028000C - TPM_E_KEYNOTFOUND Handle = 0x8028000D - TPM_E_INAPPROPRIATE_ENC Handle = 0x8028000E - TPM_E_MIGRATEFAIL Handle = 0x8028000F - TPM_E_INVALID_PCR_INFO Handle = 0x80280010 - TPM_E_NOSPACE Handle = 0x80280011 - TPM_E_NOSRK Handle = 0x80280012 - TPM_E_NOTSEALED_BLOB Handle = 0x80280013 - TPM_E_OWNER_SET Handle = 0x80280014 - TPM_E_RESOURCES Handle = 0x80280015 - TPM_E_SHORTRANDOM Handle = 0x80280016 - TPM_E_SIZE Handle = 0x80280017 - TPM_E_WRONGPCRVAL Handle = 0x80280018 - TPM_E_BAD_PARAM_SIZE Handle = 0x80280019 - TPM_E_SHA_THREAD Handle = 0x8028001A - TPM_E_SHA_ERROR Handle = 0x8028001B - TPM_E_FAILEDSELFTEST Handle = 0x8028001C - TPM_E_AUTH2FAIL Handle = 0x8028001D - TPM_E_BADTAG Handle = 0x8028001E - TPM_E_IOERROR Handle = 0x8028001F - TPM_E_ENCRYPT_ERROR Handle = 0x80280020 - TPM_E_DECRYPT_ERROR Handle = 0x80280021 - TPM_E_INVALID_AUTHHANDLE Handle = 0x80280022 - TPM_E_NO_ENDORSEMENT Handle = 0x80280023 - TPM_E_INVALID_KEYUSAGE Handle = 0x80280024 - TPM_E_WRONG_ENTITYTYPE Handle = 0x80280025 - TPM_E_INVALID_POSTINIT Handle = 0x80280026 - TPM_E_INAPPROPRIATE_SIG Handle = 0x80280027 - TPM_E_BAD_KEY_PROPERTY Handle = 0x80280028 - TPM_E_BAD_MIGRATION Handle = 0x80280029 - TPM_E_BAD_SCHEME Handle = 0x8028002A - TPM_E_BAD_DATASIZE Handle = 0x8028002B - TPM_E_BAD_MODE Handle = 0x8028002C - TPM_E_BAD_PRESENCE Handle = 0x8028002D - TPM_E_BAD_VERSION Handle = 0x8028002E - TPM_E_NO_WRAP_TRANSPORT Handle = 0x8028002F - TPM_E_AUDITFAIL_UNSUCCESSFUL Handle = 0x80280030 - TPM_E_AUDITFAIL_SUCCESSFUL Handle = 0x80280031 - TPM_E_NOTRESETABLE Handle = 0x80280032 - TPM_E_NOTLOCAL Handle = 0x80280033 - TPM_E_BAD_TYPE Handle = 0x80280034 - TPM_E_INVALID_RESOURCE Handle = 0x80280035 - TPM_E_NOTFIPS Handle = 0x80280036 - TPM_E_INVALID_FAMILY Handle = 0x80280037 - TPM_E_NO_NV_PERMISSION Handle = 0x80280038 - TPM_E_REQUIRES_SIGN Handle = 0x80280039 - TPM_E_KEY_NOTSUPPORTED Handle = 0x8028003A - TPM_E_AUTH_CONFLICT Handle = 0x8028003B - TPM_E_AREA_LOCKED Handle = 0x8028003C - TPM_E_BAD_LOCALITY Handle = 0x8028003D - TPM_E_READ_ONLY Handle = 0x8028003E - TPM_E_PER_NOWRITE Handle = 0x8028003F - TPM_E_FAMILYCOUNT Handle = 0x80280040 - TPM_E_WRITE_LOCKED Handle = 0x80280041 - TPM_E_BAD_ATTRIBUTES Handle = 0x80280042 - TPM_E_INVALID_STRUCTURE Handle = 0x80280043 - TPM_E_KEY_OWNER_CONTROL Handle = 0x80280044 - TPM_E_BAD_COUNTER Handle = 0x80280045 - TPM_E_NOT_FULLWRITE Handle = 0x80280046 - TPM_E_CONTEXT_GAP Handle = 0x80280047 - TPM_E_MAXNVWRITES Handle = 0x80280048 - TPM_E_NOOPERATOR Handle = 0x80280049 - TPM_E_RESOURCEMISSING Handle = 0x8028004A - TPM_E_DELEGATE_LOCK Handle = 0x8028004B - TPM_E_DELEGATE_FAMILY Handle = 0x8028004C - TPM_E_DELEGATE_ADMIN Handle = 0x8028004D - TPM_E_TRANSPORT_NOTEXCLUSIVE Handle = 0x8028004E - TPM_E_OWNER_CONTROL Handle = 0x8028004F - TPM_E_DAA_RESOURCES Handle = 0x80280050 - TPM_E_DAA_INPUT_DATA0 Handle = 0x80280051 - TPM_E_DAA_INPUT_DATA1 Handle = 0x80280052 - TPM_E_DAA_ISSUER_SETTINGS Handle = 0x80280053 - TPM_E_DAA_TPM_SETTINGS Handle = 0x80280054 - TPM_E_DAA_STAGE Handle = 0x80280055 - TPM_E_DAA_ISSUER_VALIDITY Handle = 0x80280056 - TPM_E_DAA_WRONG_W Handle = 0x80280057 - TPM_E_BAD_HANDLE Handle = 0x80280058 - TPM_E_BAD_DELEGATE Handle = 0x80280059 - TPM_E_BADCONTEXT Handle = 0x8028005A - TPM_E_TOOMANYCONTEXTS Handle = 0x8028005B - TPM_E_MA_TICKET_SIGNATURE Handle = 0x8028005C - TPM_E_MA_DESTINATION Handle = 0x8028005D - TPM_E_MA_SOURCE Handle = 0x8028005E - TPM_E_MA_AUTHORITY Handle = 0x8028005F - TPM_E_PERMANENTEK Handle = 0x80280061 - TPM_E_BAD_SIGNATURE Handle = 0x80280062 - TPM_E_NOCONTEXTSPACE Handle = 0x80280063 - TPM_20_E_ASYMMETRIC Handle = 0x80280081 - TPM_20_E_ATTRIBUTES Handle = 0x80280082 - TPM_20_E_HASH Handle = 0x80280083 - TPM_20_E_VALUE Handle = 0x80280084 - TPM_20_E_HIERARCHY Handle = 0x80280085 - TPM_20_E_KEY_SIZE Handle = 0x80280087 - TPM_20_E_MGF Handle = 0x80280088 - TPM_20_E_MODE Handle = 0x80280089 - TPM_20_E_TYPE Handle = 0x8028008A - TPM_20_E_HANDLE Handle = 0x8028008B - TPM_20_E_KDF Handle = 0x8028008C - TPM_20_E_RANGE Handle = 0x8028008D - TPM_20_E_AUTH_FAIL Handle = 0x8028008E - TPM_20_E_NONCE Handle = 0x8028008F - TPM_20_E_PP Handle = 0x80280090 - TPM_20_E_SCHEME Handle = 0x80280092 - TPM_20_E_SIZE Handle = 0x80280095 - TPM_20_E_SYMMETRIC Handle = 0x80280096 - TPM_20_E_TAG Handle = 0x80280097 - TPM_20_E_SELECTOR Handle = 0x80280098 - TPM_20_E_INSUFFICIENT Handle = 0x8028009A - TPM_20_E_SIGNATURE Handle = 0x8028009B - TPM_20_E_KEY Handle = 0x8028009C - TPM_20_E_POLICY_FAIL Handle = 0x8028009D - TPM_20_E_INTEGRITY Handle = 0x8028009F - TPM_20_E_TICKET Handle = 0x802800A0 - TPM_20_E_RESERVED_BITS Handle = 0x802800A1 - TPM_20_E_BAD_AUTH Handle = 0x802800A2 - TPM_20_E_EXPIRED Handle = 0x802800A3 - TPM_20_E_POLICY_CC Handle = 0x802800A4 - TPM_20_E_BINDING Handle = 0x802800A5 - TPM_20_E_CURVE Handle = 0x802800A6 - TPM_20_E_ECC_POINT Handle = 0x802800A7 - TPM_20_E_INITIALIZE Handle = 0x80280100 - TPM_20_E_FAILURE Handle = 0x80280101 - TPM_20_E_SEQUENCE Handle = 0x80280103 - TPM_20_E_PRIVATE Handle = 0x8028010B - TPM_20_E_HMAC Handle = 0x80280119 - TPM_20_E_DISABLED Handle = 0x80280120 - TPM_20_E_EXCLUSIVE Handle = 0x80280121 - TPM_20_E_ECC_CURVE Handle = 0x80280123 - TPM_20_E_AUTH_TYPE Handle = 0x80280124 - TPM_20_E_AUTH_MISSING Handle = 0x80280125 - TPM_20_E_POLICY Handle = 0x80280126 - TPM_20_E_PCR Handle = 0x80280127 - TPM_20_E_PCR_CHANGED Handle = 0x80280128 - TPM_20_E_UPGRADE Handle = 0x8028012D - TPM_20_E_TOO_MANY_CONTEXTS Handle = 0x8028012E - TPM_20_E_AUTH_UNAVAILABLE Handle = 0x8028012F - TPM_20_E_REBOOT Handle = 0x80280130 - TPM_20_E_UNBALANCED Handle = 0x80280131 - TPM_20_E_COMMAND_SIZE Handle = 0x80280142 - TPM_20_E_COMMAND_CODE Handle = 0x80280143 - TPM_20_E_AUTHSIZE Handle = 0x80280144 - TPM_20_E_AUTH_CONTEXT Handle = 0x80280145 - TPM_20_E_NV_RANGE Handle = 0x80280146 - TPM_20_E_NV_SIZE Handle = 0x80280147 - TPM_20_E_NV_LOCKED Handle = 0x80280148 - TPM_20_E_NV_AUTHORIZATION Handle = 0x80280149 - TPM_20_E_NV_UNINITIALIZED Handle = 0x8028014A - TPM_20_E_NV_SPACE Handle = 0x8028014B - TPM_20_E_NV_DEFINED Handle = 0x8028014C - TPM_20_E_BAD_CONTEXT Handle = 0x80280150 - TPM_20_E_CPHASH Handle = 0x80280151 - TPM_20_E_PARENT Handle = 0x80280152 - TPM_20_E_NEEDS_TEST Handle = 0x80280153 - TPM_20_E_NO_RESULT Handle = 0x80280154 - TPM_20_E_SENSITIVE Handle = 0x80280155 - TPM_E_COMMAND_BLOCKED Handle = 0x80280400 - TPM_E_INVALID_HANDLE Handle = 0x80280401 - TPM_E_DUPLICATE_VHANDLE Handle = 0x80280402 - TPM_E_EMBEDDED_COMMAND_BLOCKED Handle = 0x80280403 - TPM_E_EMBEDDED_COMMAND_UNSUPPORTED Handle = 0x80280404 - TPM_E_RETRY Handle = 0x80280800 - TPM_E_NEEDS_SELFTEST Handle = 0x80280801 - TPM_E_DOING_SELFTEST Handle = 0x80280802 - TPM_E_DEFEND_LOCK_RUNNING Handle = 0x80280803 - TPM_20_E_CONTEXT_GAP Handle = 0x80280901 - TPM_20_E_OBJECT_MEMORY Handle = 0x80280902 - TPM_20_E_SESSION_MEMORY Handle = 0x80280903 - TPM_20_E_MEMORY Handle = 0x80280904 - TPM_20_E_SESSION_HANDLES Handle = 0x80280905 - TPM_20_E_OBJECT_HANDLES Handle = 0x80280906 - TPM_20_E_LOCALITY Handle = 0x80280907 - TPM_20_E_YIELDED Handle = 0x80280908 - TPM_20_E_CANCELED Handle = 0x80280909 - TPM_20_E_TESTING Handle = 0x8028090A - TPM_20_E_NV_RATE Handle = 0x80280920 - TPM_20_E_LOCKOUT Handle = 0x80280921 - TPM_20_E_RETRY Handle = 0x80280922 - TPM_20_E_NV_UNAVAILABLE Handle = 0x80280923 - TBS_E_INTERNAL_ERROR Handle = 0x80284001 - TBS_E_BAD_PARAMETER Handle = 0x80284002 - TBS_E_INVALID_OUTPUT_POINTER Handle = 0x80284003 - TBS_E_INVALID_CONTEXT Handle = 0x80284004 - TBS_E_INSUFFICIENT_BUFFER Handle = 0x80284005 - TBS_E_IOERROR Handle = 0x80284006 - TBS_E_INVALID_CONTEXT_PARAM Handle = 0x80284007 - TBS_E_SERVICE_NOT_RUNNING Handle = 0x80284008 - TBS_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80284009 - TBS_E_TOO_MANY_RESOURCES Handle = 0x8028400A - TBS_E_SERVICE_START_PENDING Handle = 0x8028400B - TBS_E_PPI_NOT_SUPPORTED Handle = 0x8028400C - TBS_E_COMMAND_CANCELED Handle = 0x8028400D - TBS_E_BUFFER_TOO_LARGE Handle = 0x8028400E - TBS_E_TPM_NOT_FOUND Handle = 0x8028400F - TBS_E_SERVICE_DISABLED Handle = 0x80284010 - TBS_E_NO_EVENT_LOG Handle = 0x80284011 - TBS_E_ACCESS_DENIED Handle = 0x80284012 - TBS_E_PROVISIONING_NOT_ALLOWED Handle = 0x80284013 - TBS_E_PPI_FUNCTION_UNSUPPORTED Handle = 0x80284014 - TBS_E_OWNERAUTH_NOT_FOUND Handle = 0x80284015 - TBS_E_PROVISIONING_INCOMPLETE Handle = 0x80284016 - TPMAPI_E_INVALID_STATE Handle = 0x80290100 - TPMAPI_E_NOT_ENOUGH_DATA Handle = 0x80290101 - TPMAPI_E_TOO_MUCH_DATA Handle = 0x80290102 - TPMAPI_E_INVALID_OUTPUT_POINTER Handle = 0x80290103 - TPMAPI_E_INVALID_PARAMETER Handle = 0x80290104 - TPMAPI_E_OUT_OF_MEMORY Handle = 0x80290105 - TPMAPI_E_BUFFER_TOO_SMALL Handle = 0x80290106 - TPMAPI_E_INTERNAL_ERROR Handle = 0x80290107 - TPMAPI_E_ACCESS_DENIED Handle = 0x80290108 - TPMAPI_E_AUTHORIZATION_FAILED Handle = 0x80290109 - TPMAPI_E_INVALID_CONTEXT_HANDLE Handle = 0x8029010A - TPMAPI_E_TBS_COMMUNICATION_ERROR Handle = 0x8029010B - TPMAPI_E_TPM_COMMAND_ERROR Handle = 0x8029010C - TPMAPI_E_MESSAGE_TOO_LARGE Handle = 0x8029010D - TPMAPI_E_INVALID_ENCODING Handle = 0x8029010E - TPMAPI_E_INVALID_KEY_SIZE Handle = 0x8029010F - TPMAPI_E_ENCRYPTION_FAILED Handle = 0x80290110 - TPMAPI_E_INVALID_KEY_PARAMS Handle = 0x80290111 - TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB Handle = 0x80290112 - TPMAPI_E_INVALID_PCR_INDEX Handle = 0x80290113 - TPMAPI_E_INVALID_DELEGATE_BLOB Handle = 0x80290114 - TPMAPI_E_INVALID_CONTEXT_PARAMS Handle = 0x80290115 - TPMAPI_E_INVALID_KEY_BLOB Handle = 0x80290116 - TPMAPI_E_INVALID_PCR_DATA Handle = 0x80290117 - TPMAPI_E_INVALID_OWNER_AUTH Handle = 0x80290118 - TPMAPI_E_FIPS_RNG_CHECK_FAILED Handle = 0x80290119 - TPMAPI_E_EMPTY_TCG_LOG Handle = 0x8029011A - TPMAPI_E_INVALID_TCG_LOG_ENTRY Handle = 0x8029011B - TPMAPI_E_TCG_SEPARATOR_ABSENT Handle = 0x8029011C - TPMAPI_E_TCG_INVALID_DIGEST_ENTRY Handle = 0x8029011D - TPMAPI_E_POLICY_DENIES_OPERATION Handle = 0x8029011E - TPMAPI_E_NV_BITS_NOT_DEFINED Handle = 0x8029011F - TPMAPI_E_NV_BITS_NOT_READY Handle = 0x80290120 - TPMAPI_E_SEALING_KEY_NOT_AVAILABLE Handle = 0x80290121 - TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND Handle = 0x80290122 - TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE Handle = 0x80290123 - TPMAPI_E_OWNER_AUTH_NOT_NULL Handle = 0x80290124 - TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL Handle = 0x80290125 - TPMAPI_E_AUTHORIZATION_REVOKED Handle = 0x80290126 - TPMAPI_E_MALFORMED_AUTHORIZATION_KEY Handle = 0x80290127 - TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED Handle = 0x80290128 - TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE Handle = 0x80290129 - TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY Handle = 0x8029012A - TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER Handle = 0x8029012B - TPMAPI_E_SEALING_KEY_CHANGED Handle = 0x8029012C - TBSIMP_E_BUFFER_TOO_SMALL Handle = 0x80290200 - TBSIMP_E_CLEANUP_FAILED Handle = 0x80290201 - TBSIMP_E_INVALID_CONTEXT_HANDLE Handle = 0x80290202 - TBSIMP_E_INVALID_CONTEXT_PARAM Handle = 0x80290203 - TBSIMP_E_TPM_ERROR Handle = 0x80290204 - TBSIMP_E_HASH_BAD_KEY Handle = 0x80290205 - TBSIMP_E_DUPLICATE_VHANDLE Handle = 0x80290206 - TBSIMP_E_INVALID_OUTPUT_POINTER Handle = 0x80290207 - TBSIMP_E_INVALID_PARAMETER Handle = 0x80290208 - TBSIMP_E_RPC_INIT_FAILED Handle = 0x80290209 - TBSIMP_E_SCHEDULER_NOT_RUNNING Handle = 0x8029020A - TBSIMP_E_COMMAND_CANCELED Handle = 0x8029020B - TBSIMP_E_OUT_OF_MEMORY Handle = 0x8029020C - TBSIMP_E_LIST_NO_MORE_ITEMS Handle = 0x8029020D - TBSIMP_E_LIST_NOT_FOUND Handle = 0x8029020E - TBSIMP_E_NOT_ENOUGH_SPACE Handle = 0x8029020F - TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS Handle = 0x80290210 - TBSIMP_E_COMMAND_FAILED Handle = 0x80290211 - TBSIMP_E_UNKNOWN_ORDINAL Handle = 0x80290212 - TBSIMP_E_RESOURCE_EXPIRED Handle = 0x80290213 - TBSIMP_E_INVALID_RESOURCE Handle = 0x80290214 - TBSIMP_E_NOTHING_TO_UNLOAD Handle = 0x80290215 - TBSIMP_E_HASH_TABLE_FULL Handle = 0x80290216 - TBSIMP_E_TOO_MANY_TBS_CONTEXTS Handle = 0x80290217 - TBSIMP_E_TOO_MANY_RESOURCES Handle = 0x80290218 - TBSIMP_E_PPI_NOT_SUPPORTED Handle = 0x80290219 - TBSIMP_E_TPM_INCOMPATIBLE Handle = 0x8029021A - TBSIMP_E_NO_EVENT_LOG Handle = 0x8029021B - TPM_E_PPI_ACPI_FAILURE Handle = 0x80290300 - TPM_E_PPI_USER_ABORT Handle = 0x80290301 - TPM_E_PPI_BIOS_FAILURE Handle = 0x80290302 - TPM_E_PPI_NOT_SUPPORTED Handle = 0x80290303 - TPM_E_PPI_BLOCKED_IN_BIOS Handle = 0x80290304 - TPM_E_PCP_ERROR_MASK Handle = 0x80290400 - TPM_E_PCP_DEVICE_NOT_READY Handle = 0x80290401 - TPM_E_PCP_INVALID_HANDLE Handle = 0x80290402 - TPM_E_PCP_INVALID_PARAMETER Handle = 0x80290403 - TPM_E_PCP_FLAG_NOT_SUPPORTED Handle = 0x80290404 - TPM_E_PCP_NOT_SUPPORTED Handle = 0x80290405 - TPM_E_PCP_BUFFER_TOO_SMALL Handle = 0x80290406 - TPM_E_PCP_INTERNAL_ERROR Handle = 0x80290407 - TPM_E_PCP_AUTHENTICATION_FAILED Handle = 0x80290408 - TPM_E_PCP_AUTHENTICATION_IGNORED Handle = 0x80290409 - TPM_E_PCP_POLICY_NOT_FOUND Handle = 0x8029040A - TPM_E_PCP_PROFILE_NOT_FOUND Handle = 0x8029040B - TPM_E_PCP_VALIDATION_FAILED Handle = 0x8029040C - TPM_E_PCP_WRONG_PARENT Handle = 0x8029040E - TPM_E_KEY_NOT_LOADED Handle = 0x8029040F - TPM_E_NO_KEY_CERTIFICATION Handle = 0x80290410 - TPM_E_KEY_NOT_FINALIZED Handle = 0x80290411 - TPM_E_ATTESTATION_CHALLENGE_NOT_SET Handle = 0x80290412 - TPM_E_NOT_PCR_BOUND Handle = 0x80290413 - TPM_E_KEY_ALREADY_FINALIZED Handle = 0x80290414 - TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED Handle = 0x80290415 - TPM_E_KEY_USAGE_POLICY_INVALID Handle = 0x80290416 - TPM_E_SOFT_KEY_ERROR Handle = 0x80290417 - TPM_E_KEY_NOT_AUTHENTICATED Handle = 0x80290418 - TPM_E_PCP_KEY_NOT_AIK Handle = 0x80290419 - TPM_E_KEY_NOT_SIGNING_KEY Handle = 0x8029041A - TPM_E_LOCKED_OUT Handle = 0x8029041B - TPM_E_CLAIM_TYPE_NOT_SUPPORTED Handle = 0x8029041C - TPM_E_VERSION_NOT_SUPPORTED Handle = 0x8029041D - TPM_E_BUFFER_LENGTH_MISMATCH Handle = 0x8029041E - TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED Handle = 0x8029041F - TPM_E_PCP_TICKET_MISSING Handle = 0x80290420 - TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED Handle = 0x80290421 - TPM_E_PCP_KEY_HANDLE_INVALIDATED Handle = 0x80290422 - TPM_E_PCP_UNSUPPORTED_PSS_SALT Handle = 0x40290423 - TPM_E_ZERO_EXHAUST_ENABLED Handle = 0x80290500 - PLA_E_DCS_NOT_FOUND Handle = 0x80300002 - PLA_E_DCS_IN_USE Handle = 0x803000AA - PLA_E_TOO_MANY_FOLDERS Handle = 0x80300045 - PLA_E_NO_MIN_DISK Handle = 0x80300070 - PLA_E_DCS_ALREADY_EXISTS Handle = 0x803000B7 - PLA_S_PROPERTY_IGNORED Handle = 0x00300100 - PLA_E_PROPERTY_CONFLICT Handle = 0x80300101 - PLA_E_DCS_SINGLETON_REQUIRED Handle = 0x80300102 - PLA_E_CREDENTIALS_REQUIRED Handle = 0x80300103 - PLA_E_DCS_NOT_RUNNING Handle = 0x80300104 - PLA_E_CONFLICT_INCL_EXCL_API Handle = 0x80300105 - PLA_E_NETWORK_EXE_NOT_VALID Handle = 0x80300106 - PLA_E_EXE_ALREADY_CONFIGURED Handle = 0x80300107 - PLA_E_EXE_PATH_NOT_VALID Handle = 0x80300108 - PLA_E_DC_ALREADY_EXISTS Handle = 0x80300109 - PLA_E_DCS_START_WAIT_TIMEOUT Handle = 0x8030010A - PLA_E_DC_START_WAIT_TIMEOUT Handle = 0x8030010B - PLA_E_REPORT_WAIT_TIMEOUT Handle = 0x8030010C - PLA_E_NO_DUPLICATES Handle = 0x8030010D - PLA_E_EXE_FULL_PATH_REQUIRED Handle = 0x8030010E - PLA_E_INVALID_SESSION_NAME Handle = 0x8030010F - PLA_E_PLA_CHANNEL_NOT_ENABLED Handle = 0x80300110 - PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED Handle = 0x80300111 - PLA_E_RULES_MANAGER_FAILED Handle = 0x80300112 - PLA_E_CABAPI_FAILURE Handle = 0x80300113 - FVE_E_LOCKED_VOLUME Handle = 0x80310000 - FVE_E_NOT_ENCRYPTED Handle = 0x80310001 - FVE_E_NO_TPM_BIOS Handle = 0x80310002 - FVE_E_NO_MBR_METRIC Handle = 0x80310003 - FVE_E_NO_BOOTSECTOR_METRIC Handle = 0x80310004 - FVE_E_NO_BOOTMGR_METRIC Handle = 0x80310005 - FVE_E_WRONG_BOOTMGR Handle = 0x80310006 - FVE_E_SECURE_KEY_REQUIRED Handle = 0x80310007 - FVE_E_NOT_ACTIVATED Handle = 0x80310008 - FVE_E_ACTION_NOT_ALLOWED Handle = 0x80310009 - FVE_E_AD_SCHEMA_NOT_INSTALLED Handle = 0x8031000A - FVE_E_AD_INVALID_DATATYPE Handle = 0x8031000B - FVE_E_AD_INVALID_DATASIZE Handle = 0x8031000C - FVE_E_AD_NO_VALUES Handle = 0x8031000D - FVE_E_AD_ATTR_NOT_SET Handle = 0x8031000E - FVE_E_AD_GUID_NOT_FOUND Handle = 0x8031000F - FVE_E_BAD_INFORMATION Handle = 0x80310010 - FVE_E_TOO_SMALL Handle = 0x80310011 - FVE_E_SYSTEM_VOLUME Handle = 0x80310012 - FVE_E_FAILED_WRONG_FS Handle = 0x80310013 - FVE_E_BAD_PARTITION_SIZE Handle = 0x80310014 - FVE_E_NOT_SUPPORTED Handle = 0x80310015 - FVE_E_BAD_DATA Handle = 0x80310016 - FVE_E_VOLUME_NOT_BOUND Handle = 0x80310017 - FVE_E_TPM_NOT_OWNED Handle = 0x80310018 - FVE_E_NOT_DATA_VOLUME Handle = 0x80310019 - FVE_E_AD_INSUFFICIENT_BUFFER Handle = 0x8031001A - FVE_E_CONV_READ Handle = 0x8031001B - FVE_E_CONV_WRITE Handle = 0x8031001C - FVE_E_KEY_REQUIRED Handle = 0x8031001D - FVE_E_CLUSTERING_NOT_SUPPORTED Handle = 0x8031001E - FVE_E_VOLUME_BOUND_ALREADY Handle = 0x8031001F - FVE_E_OS_NOT_PROTECTED Handle = 0x80310020 - FVE_E_PROTECTION_DISABLED Handle = 0x80310021 - FVE_E_RECOVERY_KEY_REQUIRED Handle = 0x80310022 - FVE_E_FOREIGN_VOLUME Handle = 0x80310023 - FVE_E_OVERLAPPED_UPDATE Handle = 0x80310024 - FVE_E_TPM_SRK_AUTH_NOT_ZERO Handle = 0x80310025 - FVE_E_FAILED_SECTOR_SIZE Handle = 0x80310026 - FVE_E_FAILED_AUTHENTICATION Handle = 0x80310027 - FVE_E_NOT_OS_VOLUME Handle = 0x80310028 - FVE_E_AUTOUNLOCK_ENABLED Handle = 0x80310029 - FVE_E_WRONG_BOOTSECTOR Handle = 0x8031002A - FVE_E_WRONG_SYSTEM_FS Handle = 0x8031002B - FVE_E_POLICY_PASSWORD_REQUIRED Handle = 0x8031002C - FVE_E_CANNOT_SET_FVEK_ENCRYPTED Handle = 0x8031002D - FVE_E_CANNOT_ENCRYPT_NO_KEY Handle = 0x8031002E - FVE_E_BOOTABLE_CDDVD Handle = 0x80310030 - FVE_E_PROTECTOR_EXISTS Handle = 0x80310031 - FVE_E_RELATIVE_PATH Handle = 0x80310032 - FVE_E_PROTECTOR_NOT_FOUND Handle = 0x80310033 - FVE_E_INVALID_KEY_FORMAT Handle = 0x80310034 - FVE_E_INVALID_PASSWORD_FORMAT Handle = 0x80310035 - FVE_E_FIPS_RNG_CHECK_FAILED Handle = 0x80310036 - FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD Handle = 0x80310037 - FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT Handle = 0x80310038 - FVE_E_NOT_DECRYPTED Handle = 0x80310039 - FVE_E_INVALID_PROTECTOR_TYPE Handle = 0x8031003A - FVE_E_NO_PROTECTORS_TO_TEST Handle = 0x8031003B - FVE_E_KEYFILE_NOT_FOUND Handle = 0x8031003C - FVE_E_KEYFILE_INVALID Handle = 0x8031003D - FVE_E_KEYFILE_NO_VMK Handle = 0x8031003E - FVE_E_TPM_DISABLED Handle = 0x8031003F - FVE_E_NOT_ALLOWED_IN_SAFE_MODE Handle = 0x80310040 - FVE_E_TPM_INVALID_PCR Handle = 0x80310041 - FVE_E_TPM_NO_VMK Handle = 0x80310042 - FVE_E_PIN_INVALID Handle = 0x80310043 - FVE_E_AUTH_INVALID_APPLICATION Handle = 0x80310044 - FVE_E_AUTH_INVALID_CONFIG Handle = 0x80310045 - FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED Handle = 0x80310046 - FVE_E_FS_NOT_EXTENDED Handle = 0x80310047 - FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED Handle = 0x80310048 - FVE_E_NO_LICENSE Handle = 0x80310049 - FVE_E_NOT_ON_STACK Handle = 0x8031004A - FVE_E_FS_MOUNTED Handle = 0x8031004B - FVE_E_TOKEN_NOT_IMPERSONATED Handle = 0x8031004C - FVE_E_DRY_RUN_FAILED Handle = 0x8031004D - FVE_E_REBOOT_REQUIRED Handle = 0x8031004E - FVE_E_DEBUGGER_ENABLED Handle = 0x8031004F - FVE_E_RAW_ACCESS Handle = 0x80310050 - FVE_E_RAW_BLOCKED Handle = 0x80310051 - FVE_E_BCD_APPLICATIONS_PATH_INCORRECT Handle = 0x80310052 - FVE_E_NOT_ALLOWED_IN_VERSION Handle = 0x80310053 - FVE_E_NO_AUTOUNLOCK_MASTER_KEY Handle = 0x80310054 - FVE_E_MOR_FAILED Handle = 0x80310055 - FVE_E_HIDDEN_VOLUME Handle = 0x80310056 - FVE_E_TRANSIENT_STATE Handle = 0x80310057 - FVE_E_PUBKEY_NOT_ALLOWED Handle = 0x80310058 - FVE_E_VOLUME_HANDLE_OPEN Handle = 0x80310059 - FVE_E_NO_FEATURE_LICENSE Handle = 0x8031005A - FVE_E_INVALID_STARTUP_OPTIONS Handle = 0x8031005B - FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED Handle = 0x8031005C - FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED Handle = 0x8031005D - FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED Handle = 0x8031005E - FVE_E_POLICY_RECOVERY_KEY_REQUIRED Handle = 0x8031005F - FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED Handle = 0x80310060 - FVE_E_POLICY_STARTUP_PIN_REQUIRED Handle = 0x80310061 - FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED Handle = 0x80310062 - FVE_E_POLICY_STARTUP_KEY_REQUIRED Handle = 0x80310063 - FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED Handle = 0x80310064 - FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED Handle = 0x80310065 - FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED Handle = 0x80310066 - FVE_E_POLICY_STARTUP_TPM_REQUIRED Handle = 0x80310067 - FVE_E_POLICY_INVALID_PIN_LENGTH Handle = 0x80310068 - FVE_E_KEY_PROTECTOR_NOT_SUPPORTED Handle = 0x80310069 - FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006A - FVE_E_POLICY_PASSPHRASE_REQUIRED Handle = 0x8031006B - FVE_E_FIPS_PREVENTS_PASSPHRASE Handle = 0x8031006C - FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED Handle = 0x8031006D - FVE_E_INVALID_BITLOCKER_OID Handle = 0x8031006E - FVE_E_VOLUME_TOO_SMALL Handle = 0x8031006F - FVE_E_DV_NOT_SUPPORTED_ON_FS Handle = 0x80310070 - FVE_E_DV_NOT_ALLOWED_BY_GP Handle = 0x80310071 - FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED Handle = 0x80310072 - FVE_E_POLICY_USER_CERTIFICATE_REQUIRED Handle = 0x80310073 - FVE_E_POLICY_USER_CERT_MUST_BE_HW Handle = 0x80310074 - FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310075 - FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED Handle = 0x80310076 - FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED Handle = 0x80310077 - FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED Handle = 0x80310078 - FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED Handle = 0x80310079 - FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH Handle = 0x80310080 - FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE Handle = 0x80310081 - FVE_E_RECOVERY_PARTITION Handle = 0x80310082 - FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON Handle = 0x80310083 - FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON Handle = 0x80310084 - FVE_E_NON_BITLOCKER_OID Handle = 0x80310085 - FVE_E_POLICY_PROHIBITS_SELFSIGNED Handle = 0x80310086 - FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED Handle = 0x80310087 - FVE_E_CONV_RECOVERY_FAILED Handle = 0x80310088 - FVE_E_VIRTUALIZED_SPACE_TOO_BIG Handle = 0x80310089 - FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON Handle = 0x80310090 - FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON Handle = 0x80310091 - FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON Handle = 0x80310092 - FVE_E_NON_BITLOCKER_KU Handle = 0x80310093 - FVE_E_PRIVATEKEY_AUTH_FAILED Handle = 0x80310094 - FVE_E_REMOVAL_OF_DRA_FAILED Handle = 0x80310095 - FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME Handle = 0x80310096 - FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME Handle = 0x80310097 - FVE_E_FIPS_HASH_KDF_NOT_ALLOWED Handle = 0x80310098 - FVE_E_ENH_PIN_INVALID Handle = 0x80310099 - FVE_E_INVALID_PIN_CHARS Handle = 0x8031009A - FVE_E_INVALID_DATUM_TYPE Handle = 0x8031009B - FVE_E_EFI_ONLY Handle = 0x8031009C - FVE_E_MULTIPLE_NKP_CERTS Handle = 0x8031009D - FVE_E_REMOVAL_OF_NKP_FAILED Handle = 0x8031009E - FVE_E_INVALID_NKP_CERT Handle = 0x8031009F - FVE_E_NO_EXISTING_PIN Handle = 0x803100A0 - FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH Handle = 0x803100A1 - FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100A2 - FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED Handle = 0x803100A3 - FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII Handle = 0x803100A4 - FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A5 - FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE Handle = 0x803100A6 - FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE Handle = 0x803100A7 - FVE_E_NO_EXISTING_PASSPHRASE Handle = 0x803100A8 - FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH Handle = 0x803100A9 - FVE_E_PASSPHRASE_TOO_LONG Handle = 0x803100AA - FVE_E_NO_PASSPHRASE_WITH_TPM Handle = 0x803100AB - FVE_E_NO_TPM_WITH_PASSPHRASE Handle = 0x803100AC - FVE_E_NOT_ALLOWED_ON_CSV_STACK Handle = 0x803100AD - FVE_E_NOT_ALLOWED_ON_CLUSTER Handle = 0x803100AE - FVE_E_EDRIVE_NO_FAILOVER_TO_SW Handle = 0x803100AF - FVE_E_EDRIVE_BAND_IN_USE Handle = 0x803100B0 - FVE_E_EDRIVE_DISALLOWED_BY_GP Handle = 0x803100B1 - FVE_E_EDRIVE_INCOMPATIBLE_VOLUME Handle = 0x803100B2 - FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING Handle = 0x803100B3 - FVE_E_EDRIVE_DV_NOT_SUPPORTED Handle = 0x803100B4 - FVE_E_NO_PREBOOT_KEYBOARD_DETECTED Handle = 0x803100B5 - FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED Handle = 0x803100B6 - FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE Handle = 0x803100B7 - FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE Handle = 0x803100B8 - FVE_E_WIPE_CANCEL_NOT_APPLICABLE Handle = 0x803100B9 - FVE_E_SECUREBOOT_DISABLED Handle = 0x803100BA - FVE_E_SECUREBOOT_CONFIGURATION_INVALID Handle = 0x803100BB - FVE_E_EDRIVE_DRY_RUN_FAILED Handle = 0x803100BC - FVE_E_SHADOW_COPY_PRESENT Handle = 0x803100BD - FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS Handle = 0x803100BE - FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE Handle = 0x803100BF - FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED Handle = 0x803100C0 - FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED Handle = 0x803100C1 - FVE_E_LIVEID_ACCOUNT_SUSPENDED Handle = 0x803100C2 - FVE_E_LIVEID_ACCOUNT_BLOCKED Handle = 0x803100C3 - FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x803100C4 - FVE_E_DE_FIXED_DATA_NOT_SUPPORTED Handle = 0x803100C5 - FVE_E_DE_HARDWARE_NOT_COMPLIANT Handle = 0x803100C6 - FVE_E_DE_WINRE_NOT_CONFIGURED Handle = 0x803100C7 - FVE_E_DE_PROTECTION_SUSPENDED Handle = 0x803100C8 - FVE_E_DE_OS_VOLUME_NOT_PROTECTED Handle = 0x803100C9 - FVE_E_DE_DEVICE_LOCKEDOUT Handle = 0x803100CA - FVE_E_DE_PROTECTION_NOT_YET_ENABLED Handle = 0x803100CB - FVE_E_INVALID_PIN_CHARS_DETAILED Handle = 0x803100CC - FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE Handle = 0x803100CD - FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH Handle = 0x803100CE - FVE_E_BUFFER_TOO_LARGE Handle = 0x803100CF - FVE_E_NO_SUCH_CAPABILITY_ON_TARGET Handle = 0x803100D0 - FVE_E_DE_PREVENTED_FOR_OS Handle = 0x803100D1 - FVE_E_DE_VOLUME_OPTED_OUT Handle = 0x803100D2 - FVE_E_DE_VOLUME_NOT_SUPPORTED Handle = 0x803100D3 - FVE_E_EOW_NOT_SUPPORTED_IN_VERSION Handle = 0x803100D4 - FVE_E_ADBACKUP_NOT_ENABLED Handle = 0x803100D5 - FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT Handle = 0x803100D6 - FVE_E_NOT_DE_VOLUME Handle = 0x803100D7 - FVE_E_PROTECTION_CANNOT_BE_DISABLED Handle = 0x803100D8 - FVE_E_OSV_KSR_NOT_ALLOWED Handle = 0x803100D9 - FWP_E_CALLOUT_NOT_FOUND Handle = 0x80320001 - FWP_E_CONDITION_NOT_FOUND Handle = 0x80320002 - FWP_E_FILTER_NOT_FOUND Handle = 0x80320003 - FWP_E_LAYER_NOT_FOUND Handle = 0x80320004 - FWP_E_PROVIDER_NOT_FOUND Handle = 0x80320005 - FWP_E_PROVIDER_CONTEXT_NOT_FOUND Handle = 0x80320006 - FWP_E_SUBLAYER_NOT_FOUND Handle = 0x80320007 - FWP_E_NOT_FOUND Handle = 0x80320008 - FWP_E_ALREADY_EXISTS Handle = 0x80320009 - FWP_E_IN_USE Handle = 0x8032000A - FWP_E_DYNAMIC_SESSION_IN_PROGRESS Handle = 0x8032000B - FWP_E_WRONG_SESSION Handle = 0x8032000C - FWP_E_NO_TXN_IN_PROGRESS Handle = 0x8032000D - FWP_E_TXN_IN_PROGRESS Handle = 0x8032000E - FWP_E_TXN_ABORTED Handle = 0x8032000F - FWP_E_SESSION_ABORTED Handle = 0x80320010 - FWP_E_INCOMPATIBLE_TXN Handle = 0x80320011 - FWP_E_TIMEOUT Handle = 0x80320012 - FWP_E_NET_EVENTS_DISABLED Handle = 0x80320013 - FWP_E_INCOMPATIBLE_LAYER Handle = 0x80320014 - FWP_E_KM_CLIENTS_ONLY Handle = 0x80320015 - FWP_E_LIFETIME_MISMATCH Handle = 0x80320016 - FWP_E_BUILTIN_OBJECT Handle = 0x80320017 - FWP_E_TOO_MANY_CALLOUTS Handle = 0x80320018 - FWP_E_NOTIFICATION_DROPPED Handle = 0x80320019 - FWP_E_TRAFFIC_MISMATCH Handle = 0x8032001A - FWP_E_INCOMPATIBLE_SA_STATE Handle = 0x8032001B - FWP_E_NULL_POINTER Handle = 0x8032001C - FWP_E_INVALID_ENUMERATOR Handle = 0x8032001D - FWP_E_INVALID_FLAGS Handle = 0x8032001E - FWP_E_INVALID_NET_MASK Handle = 0x8032001F - FWP_E_INVALID_RANGE Handle = 0x80320020 - FWP_E_INVALID_INTERVAL Handle = 0x80320021 - FWP_E_ZERO_LENGTH_ARRAY Handle = 0x80320022 - FWP_E_NULL_DISPLAY_NAME Handle = 0x80320023 - FWP_E_INVALID_ACTION_TYPE Handle = 0x80320024 - FWP_E_INVALID_WEIGHT Handle = 0x80320025 - FWP_E_MATCH_TYPE_MISMATCH Handle = 0x80320026 - FWP_E_TYPE_MISMATCH Handle = 0x80320027 - FWP_E_OUT_OF_BOUNDS Handle = 0x80320028 - FWP_E_RESERVED Handle = 0x80320029 - FWP_E_DUPLICATE_CONDITION Handle = 0x8032002A - FWP_E_DUPLICATE_KEYMOD Handle = 0x8032002B - FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002C - FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER Handle = 0x8032002D - FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER Handle = 0x8032002E - FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT Handle = 0x8032002F - FWP_E_INCOMPATIBLE_AUTH_METHOD Handle = 0x80320030 - FWP_E_INCOMPATIBLE_DH_GROUP Handle = 0x80320031 - FWP_E_EM_NOT_SUPPORTED Handle = 0x80320032 - FWP_E_NEVER_MATCH Handle = 0x80320033 - FWP_E_PROVIDER_CONTEXT_MISMATCH Handle = 0x80320034 - FWP_E_INVALID_PARAMETER Handle = 0x80320035 - FWP_E_TOO_MANY_SUBLAYERS Handle = 0x80320036 - FWP_E_CALLOUT_NOTIFICATION_FAILED Handle = 0x80320037 - FWP_E_INVALID_AUTH_TRANSFORM Handle = 0x80320038 - FWP_E_INVALID_CIPHER_TRANSFORM Handle = 0x80320039 - FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM Handle = 0x8032003A - FWP_E_INVALID_TRANSFORM_COMBINATION Handle = 0x8032003B - FWP_E_DUPLICATE_AUTH_METHOD Handle = 0x8032003C - FWP_E_INVALID_TUNNEL_ENDPOINT Handle = 0x8032003D - FWP_E_L2_DRIVER_NOT_READY Handle = 0x8032003E - FWP_E_KEY_DICTATOR_ALREADY_REGISTERED Handle = 0x8032003F - FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL Handle = 0x80320040 - FWP_E_CONNECTIONS_DISABLED Handle = 0x80320041 - FWP_E_INVALID_DNS_NAME Handle = 0x80320042 - FWP_E_STILL_ON Handle = 0x80320043 - FWP_E_IKEEXT_NOT_RUNNING Handle = 0x80320044 - FWP_E_DROP_NOICMP Handle = 0x80320104 - WS_S_ASYNC Handle = 0x003D0000 - WS_S_END Handle = 0x003D0001 - WS_E_INVALID_FORMAT Handle = 0x803D0000 - WS_E_OBJECT_FAULTED Handle = 0x803D0001 - WS_E_NUMERIC_OVERFLOW Handle = 0x803D0002 - WS_E_INVALID_OPERATION Handle = 0x803D0003 - WS_E_OPERATION_ABORTED Handle = 0x803D0004 - WS_E_ENDPOINT_ACCESS_DENIED Handle = 0x803D0005 - WS_E_OPERATION_TIMED_OUT Handle = 0x803D0006 - WS_E_OPERATION_ABANDONED Handle = 0x803D0007 - WS_E_QUOTA_EXCEEDED Handle = 0x803D0008 - WS_E_NO_TRANSLATION_AVAILABLE Handle = 0x803D0009 - WS_E_SECURITY_VERIFICATION_FAILURE Handle = 0x803D000A - WS_E_ADDRESS_IN_USE Handle = 0x803D000B - WS_E_ADDRESS_NOT_AVAILABLE Handle = 0x803D000C - WS_E_ENDPOINT_NOT_FOUND Handle = 0x803D000D - WS_E_ENDPOINT_NOT_AVAILABLE Handle = 0x803D000E - WS_E_ENDPOINT_FAILURE Handle = 0x803D000F - WS_E_ENDPOINT_UNREACHABLE Handle = 0x803D0010 - WS_E_ENDPOINT_ACTION_NOT_SUPPORTED Handle = 0x803D0011 - WS_E_ENDPOINT_TOO_BUSY Handle = 0x803D0012 - WS_E_ENDPOINT_FAULT_RECEIVED Handle = 0x803D0013 - WS_E_ENDPOINT_DISCONNECTED Handle = 0x803D0014 - WS_E_PROXY_FAILURE Handle = 0x803D0015 - WS_E_PROXY_ACCESS_DENIED Handle = 0x803D0016 - WS_E_NOT_SUPPORTED Handle = 0x803D0017 - WS_E_PROXY_REQUIRES_BASIC_AUTH Handle = 0x803D0018 - WS_E_PROXY_REQUIRES_DIGEST_AUTH Handle = 0x803D0019 - WS_E_PROXY_REQUIRES_NTLM_AUTH Handle = 0x803D001A - WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001B - WS_E_SERVER_REQUIRES_BASIC_AUTH Handle = 0x803D001C - WS_E_SERVER_REQUIRES_DIGEST_AUTH Handle = 0x803D001D - WS_E_SERVER_REQUIRES_NTLM_AUTH Handle = 0x803D001E - WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH Handle = 0x803D001F - WS_E_INVALID_ENDPOINT_URL Handle = 0x803D0020 - WS_E_OTHER Handle = 0x803D0021 - WS_E_SECURITY_TOKEN_EXPIRED Handle = 0x803D0022 - WS_E_SECURITY_SYSTEM_FAILURE Handle = 0x803D0023 - ERROR_NDIS_INTERFACE_CLOSING syscall.Errno = 0x80340002 - ERROR_NDIS_BAD_VERSION syscall.Errno = 0x80340004 - ERROR_NDIS_BAD_CHARACTERISTICS syscall.Errno = 0x80340005 - ERROR_NDIS_ADAPTER_NOT_FOUND syscall.Errno = 0x80340006 - ERROR_NDIS_OPEN_FAILED syscall.Errno = 0x80340007 - ERROR_NDIS_DEVICE_FAILED syscall.Errno = 0x80340008 - ERROR_NDIS_MULTICAST_FULL syscall.Errno = 0x80340009 - ERROR_NDIS_MULTICAST_EXISTS syscall.Errno = 0x8034000A - ERROR_NDIS_MULTICAST_NOT_FOUND syscall.Errno = 0x8034000B - ERROR_NDIS_REQUEST_ABORTED syscall.Errno = 0x8034000C - ERROR_NDIS_RESET_IN_PROGRESS syscall.Errno = 0x8034000D - ERROR_NDIS_NOT_SUPPORTED syscall.Errno = 0x803400BB - ERROR_NDIS_INVALID_PACKET syscall.Errno = 0x8034000F - ERROR_NDIS_ADAPTER_NOT_READY syscall.Errno = 0x80340011 - ERROR_NDIS_INVALID_LENGTH syscall.Errno = 0x80340014 - ERROR_NDIS_INVALID_DATA syscall.Errno = 0x80340015 - ERROR_NDIS_BUFFER_TOO_SHORT syscall.Errno = 0x80340016 - ERROR_NDIS_INVALID_OID syscall.Errno = 0x80340017 - ERROR_NDIS_ADAPTER_REMOVED syscall.Errno = 0x80340018 - ERROR_NDIS_UNSUPPORTED_MEDIA syscall.Errno = 0x80340019 - ERROR_NDIS_GROUP_ADDRESS_IN_USE syscall.Errno = 0x8034001A - ERROR_NDIS_FILE_NOT_FOUND syscall.Errno = 0x8034001B - ERROR_NDIS_ERROR_READING_FILE syscall.Errno = 0x8034001C - ERROR_NDIS_ALREADY_MAPPED syscall.Errno = 0x8034001D - ERROR_NDIS_RESOURCE_CONFLICT syscall.Errno = 0x8034001E - ERROR_NDIS_MEDIA_DISCONNECTED syscall.Errno = 0x8034001F - ERROR_NDIS_INVALID_ADDRESS syscall.Errno = 0x80340022 - ERROR_NDIS_INVALID_DEVICE_REQUEST syscall.Errno = 0x80340010 - ERROR_NDIS_PAUSED syscall.Errno = 0x8034002A - ERROR_NDIS_INTERFACE_NOT_FOUND syscall.Errno = 0x8034002B - ERROR_NDIS_UNSUPPORTED_REVISION syscall.Errno = 0x8034002C - ERROR_NDIS_INVALID_PORT syscall.Errno = 0x8034002D - ERROR_NDIS_INVALID_PORT_STATE syscall.Errno = 0x8034002E - ERROR_NDIS_LOW_POWER_STATE syscall.Errno = 0x8034002F - ERROR_NDIS_REINIT_REQUIRED syscall.Errno = 0x80340030 - ERROR_NDIS_NO_QUEUES syscall.Errno = 0x80340031 - ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED syscall.Errno = 0x80342000 - ERROR_NDIS_DOT11_MEDIA_IN_USE syscall.Errno = 0x80342001 - ERROR_NDIS_DOT11_POWER_STATE_INVALID syscall.Errno = 0x80342002 - ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL syscall.Errno = 0x80342003 - ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL syscall.Errno = 0x80342004 - ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342005 - ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE syscall.Errno = 0x80342006 - ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED syscall.Errno = 0x80342007 - ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED syscall.Errno = 0x80342008 - ERROR_NDIS_INDICATION_REQUIRED syscall.Errno = 0x00340001 - ERROR_NDIS_OFFLOAD_POLICY syscall.Errno = 0xC034100F - ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED syscall.Errno = 0xC0341012 - ERROR_NDIS_OFFLOAD_PATH_REJECTED syscall.Errno = 0xC0341013 - ERROR_HV_INVALID_HYPERCALL_CODE syscall.Errno = 0xC0350002 - ERROR_HV_INVALID_HYPERCALL_INPUT syscall.Errno = 0xC0350003 - ERROR_HV_INVALID_ALIGNMENT syscall.Errno = 0xC0350004 - ERROR_HV_INVALID_PARAMETER syscall.Errno = 0xC0350005 - ERROR_HV_ACCESS_DENIED syscall.Errno = 0xC0350006 - ERROR_HV_INVALID_PARTITION_STATE syscall.Errno = 0xC0350007 - ERROR_HV_OPERATION_DENIED syscall.Errno = 0xC0350008 - ERROR_HV_UNKNOWN_PROPERTY syscall.Errno = 0xC0350009 - ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE syscall.Errno = 0xC035000A - ERROR_HV_INSUFFICIENT_MEMORY syscall.Errno = 0xC035000B - ERROR_HV_PARTITION_TOO_DEEP syscall.Errno = 0xC035000C - ERROR_HV_INVALID_PARTITION_ID syscall.Errno = 0xC035000D - ERROR_HV_INVALID_VP_INDEX syscall.Errno = 0xC035000E - ERROR_HV_INVALID_PORT_ID syscall.Errno = 0xC0350011 - ERROR_HV_INVALID_CONNECTION_ID syscall.Errno = 0xC0350012 - ERROR_HV_INSUFFICIENT_BUFFERS syscall.Errno = 0xC0350013 - ERROR_HV_NOT_ACKNOWLEDGED syscall.Errno = 0xC0350014 - ERROR_HV_INVALID_VP_STATE syscall.Errno = 0xC0350015 - ERROR_HV_ACKNOWLEDGED syscall.Errno = 0xC0350016 - ERROR_HV_INVALID_SAVE_RESTORE_STATE syscall.Errno = 0xC0350017 - ERROR_HV_INVALID_SYNIC_STATE syscall.Errno = 0xC0350018 - ERROR_HV_OBJECT_IN_USE syscall.Errno = 0xC0350019 - ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO syscall.Errno = 0xC035001A - ERROR_HV_NO_DATA syscall.Errno = 0xC035001B - ERROR_HV_INACTIVE syscall.Errno = 0xC035001C - ERROR_HV_NO_RESOURCES syscall.Errno = 0xC035001D - ERROR_HV_FEATURE_UNAVAILABLE syscall.Errno = 0xC035001E - ERROR_HV_INSUFFICIENT_BUFFER syscall.Errno = 0xC0350033 - ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS syscall.Errno = 0xC0350038 - ERROR_HV_CPUID_FEATURE_VALIDATION syscall.Errno = 0xC035003C - ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION syscall.Errno = 0xC035003D - ERROR_HV_PROCESSOR_STARTUP_TIMEOUT syscall.Errno = 0xC035003E - ERROR_HV_SMX_ENABLED syscall.Errno = 0xC035003F - ERROR_HV_INVALID_LP_INDEX syscall.Errno = 0xC0350041 - ERROR_HV_INVALID_REGISTER_VALUE syscall.Errno = 0xC0350050 - ERROR_HV_INVALID_VTL_STATE syscall.Errno = 0xC0350051 - ERROR_HV_NX_NOT_DETECTED syscall.Errno = 0xC0350055 - ERROR_HV_INVALID_DEVICE_ID syscall.Errno = 0xC0350057 - ERROR_HV_INVALID_DEVICE_STATE syscall.Errno = 0xC0350058 - ERROR_HV_PENDING_PAGE_REQUESTS syscall.Errno = 0x00350059 - ERROR_HV_PAGE_REQUEST_INVALID syscall.Errno = 0xC0350060 - ERROR_HV_INVALID_CPU_GROUP_ID syscall.Errno = 0xC035006F - ERROR_HV_INVALID_CPU_GROUP_STATE syscall.Errno = 0xC0350070 - ERROR_HV_OPERATION_FAILED syscall.Errno = 0xC0350071 - ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE syscall.Errno = 0xC0350072 - ERROR_HV_INSUFFICIENT_ROOT_MEMORY syscall.Errno = 0xC0350073 - ERROR_HV_NOT_PRESENT syscall.Errno = 0xC0351000 - ERROR_VID_DUPLICATE_HANDLER syscall.Errno = 0xC0370001 - ERROR_VID_TOO_MANY_HANDLERS syscall.Errno = 0xC0370002 - ERROR_VID_QUEUE_FULL syscall.Errno = 0xC0370003 - ERROR_VID_HANDLER_NOT_PRESENT syscall.Errno = 0xC0370004 - ERROR_VID_INVALID_OBJECT_NAME syscall.Errno = 0xC0370005 - ERROR_VID_PARTITION_NAME_TOO_LONG syscall.Errno = 0xC0370006 - ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG syscall.Errno = 0xC0370007 - ERROR_VID_PARTITION_ALREADY_EXISTS syscall.Errno = 0xC0370008 - ERROR_VID_PARTITION_DOES_NOT_EXIST syscall.Errno = 0xC0370009 - ERROR_VID_PARTITION_NAME_NOT_FOUND syscall.Errno = 0xC037000A - ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS syscall.Errno = 0xC037000B - ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT syscall.Errno = 0xC037000C - ERROR_VID_MB_STILL_REFERENCED syscall.Errno = 0xC037000D - ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED syscall.Errno = 0xC037000E - ERROR_VID_INVALID_NUMA_SETTINGS syscall.Errno = 0xC037000F - ERROR_VID_INVALID_NUMA_NODE_INDEX syscall.Errno = 0xC0370010 - ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED syscall.Errno = 0xC0370011 - ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE syscall.Errno = 0xC0370012 - ERROR_VID_PAGE_RANGE_OVERFLOW syscall.Errno = 0xC0370013 - ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE syscall.Errno = 0xC0370014 - ERROR_VID_INVALID_GPA_RANGE_HANDLE syscall.Errno = 0xC0370015 - ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE syscall.Errno = 0xC0370016 - ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED syscall.Errno = 0xC0370017 - ERROR_VID_INVALID_PPM_HANDLE syscall.Errno = 0xC0370018 - ERROR_VID_MBPS_ARE_LOCKED syscall.Errno = 0xC0370019 - ERROR_VID_MESSAGE_QUEUE_CLOSED syscall.Errno = 0xC037001A - ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED syscall.Errno = 0xC037001B - ERROR_VID_STOP_PENDING syscall.Errno = 0xC037001C - ERROR_VID_INVALID_PROCESSOR_STATE syscall.Errno = 0xC037001D - ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT syscall.Errno = 0xC037001E - ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED syscall.Errno = 0xC037001F - ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET syscall.Errno = 0xC0370020 - ERROR_VID_MMIO_RANGE_DESTROYED syscall.Errno = 0xC0370021 - ERROR_VID_INVALID_CHILD_GPA_PAGE_SET syscall.Errno = 0xC0370022 - ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED syscall.Errno = 0xC0370023 - ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL syscall.Errno = 0xC0370024 - ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE syscall.Errno = 0xC0370025 - ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT syscall.Errno = 0xC0370026 - ERROR_VID_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370027 - ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM syscall.Errno = 0xC0370028 - ERROR_VID_SAVED_STATE_INCOMPATIBLE syscall.Errno = 0xC0370029 - ERROR_VID_VTL_ACCESS_DENIED syscall.Errno = 0xC037002A - ERROR_VMCOMPUTE_TERMINATED_DURING_START syscall.Errno = 0xC0370100 - ERROR_VMCOMPUTE_IMAGE_MISMATCH syscall.Errno = 0xC0370101 - ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED syscall.Errno = 0xC0370102 - ERROR_VMCOMPUTE_OPERATION_PENDING syscall.Errno = 0xC0370103 - ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS syscall.Errno = 0xC0370104 - ERROR_VMCOMPUTE_INVALID_STATE syscall.Errno = 0xC0370105 - ERROR_VMCOMPUTE_UNEXPECTED_EXIT syscall.Errno = 0xC0370106 - ERROR_VMCOMPUTE_TERMINATED syscall.Errno = 0xC0370107 - ERROR_VMCOMPUTE_CONNECT_FAILED syscall.Errno = 0xC0370108 - ERROR_VMCOMPUTE_TIMEOUT syscall.Errno = 0xC0370109 - ERROR_VMCOMPUTE_CONNECTION_CLOSED syscall.Errno = 0xC037010A - ERROR_VMCOMPUTE_UNKNOWN_MESSAGE syscall.Errno = 0xC037010B - ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION syscall.Errno = 0xC037010C - ERROR_VMCOMPUTE_INVALID_JSON syscall.Errno = 0xC037010D - ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND syscall.Errno = 0xC037010E - ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS syscall.Errno = 0xC037010F - ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED syscall.Errno = 0xC0370110 - ERROR_VMCOMPUTE_PROTOCOL_ERROR syscall.Errno = 0xC0370111 - ERROR_VMCOMPUTE_INVALID_LAYER syscall.Errno = 0xC0370112 - ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED syscall.Errno = 0xC0370113 - HCS_E_TERMINATED_DURING_START Handle = 0x80370100 - HCS_E_IMAGE_MISMATCH Handle = 0x80370101 - HCS_E_HYPERV_NOT_INSTALLED Handle = 0x80370102 - HCS_E_INVALID_STATE Handle = 0x80370105 - HCS_E_UNEXPECTED_EXIT Handle = 0x80370106 - HCS_E_TERMINATED Handle = 0x80370107 - HCS_E_CONNECT_FAILED Handle = 0x80370108 - HCS_E_CONNECTION_TIMEOUT Handle = 0x80370109 - HCS_E_CONNECTION_CLOSED Handle = 0x8037010A - HCS_E_UNKNOWN_MESSAGE Handle = 0x8037010B - HCS_E_UNSUPPORTED_PROTOCOL_VERSION Handle = 0x8037010C - HCS_E_INVALID_JSON Handle = 0x8037010D - HCS_E_SYSTEM_NOT_FOUND Handle = 0x8037010E - HCS_E_SYSTEM_ALREADY_EXISTS Handle = 0x8037010F - HCS_E_SYSTEM_ALREADY_STOPPED Handle = 0x80370110 - HCS_E_PROTOCOL_ERROR Handle = 0x80370111 - HCS_E_INVALID_LAYER Handle = 0x80370112 - HCS_E_WINDOWS_INSIDER_REQUIRED Handle = 0x80370113 - HCS_E_SERVICE_NOT_AVAILABLE Handle = 0x80370114 - HCS_E_OPERATION_NOT_STARTED Handle = 0x80370115 - HCS_E_OPERATION_ALREADY_STARTED Handle = 0x80370116 - HCS_E_OPERATION_PENDING Handle = 0x80370117 - HCS_E_OPERATION_TIMEOUT Handle = 0x80370118 - HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET Handle = 0x80370119 - HCS_E_OPERATION_RESULT_ALLOCATION_FAILED Handle = 0x8037011A - HCS_E_ACCESS_DENIED Handle = 0x8037011B - HCS_E_GUEST_CRITICAL_ERROR Handle = 0x8037011C - ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND syscall.Errno = 0xC0370200 - ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED syscall.Errno = 0x80370001 - WHV_E_UNKNOWN_CAPABILITY Handle = 0x80370300 - WHV_E_INSUFFICIENT_BUFFER Handle = 0x80370301 - WHV_E_UNKNOWN_PROPERTY Handle = 0x80370302 - WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG Handle = 0x80370303 - WHV_E_INVALID_PARTITION_CONFIG Handle = 0x80370304 - WHV_E_GPA_RANGE_NOT_FOUND Handle = 0x80370305 - WHV_E_VP_ALREADY_EXISTS Handle = 0x80370306 - WHV_E_VP_DOES_NOT_EXIST Handle = 0x80370307 - WHV_E_INVALID_VP_STATE Handle = 0x80370308 - WHV_E_INVALID_VP_REGISTER_NAME Handle = 0x80370309 - ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND syscall.Errno = 0xC0370400 - ERROR_VSMB_SAVED_STATE_CORRUPT syscall.Errno = 0xC0370401 - ERROR_VOLMGR_INCOMPLETE_REGENERATION syscall.Errno = 0x80380001 - ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION syscall.Errno = 0x80380002 - ERROR_VOLMGR_DATABASE_FULL syscall.Errno = 0xC0380001 - ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED syscall.Errno = 0xC0380002 - ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC syscall.Errno = 0xC0380003 - ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED syscall.Errno = 0xC0380004 - ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME syscall.Errno = 0xC0380005 - ERROR_VOLMGR_DISK_DUPLICATE syscall.Errno = 0xC0380006 - ERROR_VOLMGR_DISK_DYNAMIC syscall.Errno = 0xC0380007 - ERROR_VOLMGR_DISK_ID_INVALID syscall.Errno = 0xC0380008 - ERROR_VOLMGR_DISK_INVALID syscall.Errno = 0xC0380009 - ERROR_VOLMGR_DISK_LAST_VOTER syscall.Errno = 0xC038000A - ERROR_VOLMGR_DISK_LAYOUT_INVALID syscall.Errno = 0xC038000B - ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS syscall.Errno = 0xC038000C - ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED syscall.Errno = 0xC038000D - ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL syscall.Errno = 0xC038000E - ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS syscall.Errno = 0xC038000F - ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS syscall.Errno = 0xC0380010 - ERROR_VOLMGR_DISK_MISSING syscall.Errno = 0xC0380011 - ERROR_VOLMGR_DISK_NOT_EMPTY syscall.Errno = 0xC0380012 - ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE syscall.Errno = 0xC0380013 - ERROR_VOLMGR_DISK_REVECTORING_FAILED syscall.Errno = 0xC0380014 - ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID syscall.Errno = 0xC0380015 - ERROR_VOLMGR_DISK_SET_NOT_CONTAINED syscall.Errno = 0xC0380016 - ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS syscall.Errno = 0xC0380017 - ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES syscall.Errno = 0xC0380018 - ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED syscall.Errno = 0xC0380019 - ERROR_VOLMGR_EXTENT_ALREADY_USED syscall.Errno = 0xC038001A - ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS syscall.Errno = 0xC038001B - ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION syscall.Errno = 0xC038001C - ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED syscall.Errno = 0xC038001D - ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION syscall.Errno = 0xC038001E - ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH syscall.Errno = 0xC038001F - ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED syscall.Errno = 0xC0380020 - ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID syscall.Errno = 0xC0380021 - ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS syscall.Errno = 0xC0380022 - ERROR_VOLMGR_MEMBER_IN_SYNC syscall.Errno = 0xC0380023 - ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE syscall.Errno = 0xC0380024 - ERROR_VOLMGR_MEMBER_INDEX_INVALID syscall.Errno = 0xC0380025 - ERROR_VOLMGR_MEMBER_MISSING syscall.Errno = 0xC0380026 - ERROR_VOLMGR_MEMBER_NOT_DETACHED syscall.Errno = 0xC0380027 - ERROR_VOLMGR_MEMBER_REGENERATING syscall.Errno = 0xC0380028 - ERROR_VOLMGR_ALL_DISKS_FAILED syscall.Errno = 0xC0380029 - ERROR_VOLMGR_NO_REGISTERED_USERS syscall.Errno = 0xC038002A - ERROR_VOLMGR_NO_SUCH_USER syscall.Errno = 0xC038002B - ERROR_VOLMGR_NOTIFICATION_RESET syscall.Errno = 0xC038002C - ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID syscall.Errno = 0xC038002D - ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID syscall.Errno = 0xC038002E - ERROR_VOLMGR_PACK_DUPLICATE syscall.Errno = 0xC038002F - ERROR_VOLMGR_PACK_ID_INVALID syscall.Errno = 0xC0380030 - ERROR_VOLMGR_PACK_INVALID syscall.Errno = 0xC0380031 - ERROR_VOLMGR_PACK_NAME_INVALID syscall.Errno = 0xC0380032 - ERROR_VOLMGR_PACK_OFFLINE syscall.Errno = 0xC0380033 - ERROR_VOLMGR_PACK_HAS_QUORUM syscall.Errno = 0xC0380034 - ERROR_VOLMGR_PACK_WITHOUT_QUORUM syscall.Errno = 0xC0380035 - ERROR_VOLMGR_PARTITION_STYLE_INVALID syscall.Errno = 0xC0380036 - ERROR_VOLMGR_PARTITION_UPDATE_FAILED syscall.Errno = 0xC0380037 - ERROR_VOLMGR_PLEX_IN_SYNC syscall.Errno = 0xC0380038 - ERROR_VOLMGR_PLEX_INDEX_DUPLICATE syscall.Errno = 0xC0380039 - ERROR_VOLMGR_PLEX_INDEX_INVALID syscall.Errno = 0xC038003A - ERROR_VOLMGR_PLEX_LAST_ACTIVE syscall.Errno = 0xC038003B - ERROR_VOLMGR_PLEX_MISSING syscall.Errno = 0xC038003C - ERROR_VOLMGR_PLEX_REGENERATING syscall.Errno = 0xC038003D - ERROR_VOLMGR_PLEX_TYPE_INVALID syscall.Errno = 0xC038003E - ERROR_VOLMGR_PLEX_NOT_RAID5 syscall.Errno = 0xC038003F - ERROR_VOLMGR_PLEX_NOT_SIMPLE syscall.Errno = 0xC0380040 - ERROR_VOLMGR_STRUCTURE_SIZE_INVALID syscall.Errno = 0xC0380041 - ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS syscall.Errno = 0xC0380042 - ERROR_VOLMGR_TRANSACTION_IN_PROGRESS syscall.Errno = 0xC0380043 - ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE syscall.Errno = 0xC0380044 - ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK syscall.Errno = 0xC0380045 - ERROR_VOLMGR_VOLUME_ID_INVALID syscall.Errno = 0xC0380046 - ERROR_VOLMGR_VOLUME_LENGTH_INVALID syscall.Errno = 0xC0380047 - ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE syscall.Errno = 0xC0380048 - ERROR_VOLMGR_VOLUME_NOT_MIRRORED syscall.Errno = 0xC0380049 - ERROR_VOLMGR_VOLUME_NOT_RETAINED syscall.Errno = 0xC038004A - ERROR_VOLMGR_VOLUME_OFFLINE syscall.Errno = 0xC038004B - ERROR_VOLMGR_VOLUME_RETAINED syscall.Errno = 0xC038004C - ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID syscall.Errno = 0xC038004D - ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE syscall.Errno = 0xC038004E - ERROR_VOLMGR_BAD_BOOT_DISK syscall.Errno = 0xC038004F - ERROR_VOLMGR_PACK_CONFIG_OFFLINE syscall.Errno = 0xC0380050 - ERROR_VOLMGR_PACK_CONFIG_ONLINE syscall.Errno = 0xC0380051 - ERROR_VOLMGR_NOT_PRIMARY_PACK syscall.Errno = 0xC0380052 - ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED syscall.Errno = 0xC0380053 - ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID syscall.Errno = 0xC0380054 - ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID syscall.Errno = 0xC0380055 - ERROR_VOLMGR_VOLUME_MIRRORED syscall.Errno = 0xC0380056 - ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED syscall.Errno = 0xC0380057 - ERROR_VOLMGR_NO_VALID_LOG_COPIES syscall.Errno = 0xC0380058 - ERROR_VOLMGR_PRIMARY_PACK_PRESENT syscall.Errno = 0xC0380059 - ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID syscall.Errno = 0xC038005A - ERROR_VOLMGR_MIRROR_NOT_SUPPORTED syscall.Errno = 0xC038005B - ERROR_VOLMGR_RAID5_NOT_SUPPORTED syscall.Errno = 0xC038005C - ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED syscall.Errno = 0x80390001 - ERROR_BCD_TOO_MANY_ELEMENTS syscall.Errno = 0xC0390002 - ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED syscall.Errno = 0x80390003 - ERROR_VHD_DRIVE_FOOTER_MISSING syscall.Errno = 0xC03A0001 - ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0002 - ERROR_VHD_DRIVE_FOOTER_CORRUPT syscall.Errno = 0xC03A0003 - ERROR_VHD_FORMAT_UNKNOWN syscall.Errno = 0xC03A0004 - ERROR_VHD_FORMAT_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0005 - ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH syscall.Errno = 0xC03A0006 - ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION syscall.Errno = 0xC03A0007 - ERROR_VHD_SPARSE_HEADER_CORRUPT syscall.Errno = 0xC03A0008 - ERROR_VHD_BLOCK_ALLOCATION_FAILURE syscall.Errno = 0xC03A0009 - ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT syscall.Errno = 0xC03A000A - ERROR_VHD_INVALID_BLOCK_SIZE syscall.Errno = 0xC03A000B - ERROR_VHD_BITMAP_MISMATCH syscall.Errno = 0xC03A000C - ERROR_VHD_PARENT_VHD_NOT_FOUND syscall.Errno = 0xC03A000D - ERROR_VHD_CHILD_PARENT_ID_MISMATCH syscall.Errno = 0xC03A000E - ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH syscall.Errno = 0xC03A000F - ERROR_VHD_METADATA_READ_FAILURE syscall.Errno = 0xC03A0010 - ERROR_VHD_METADATA_WRITE_FAILURE syscall.Errno = 0xC03A0011 - ERROR_VHD_INVALID_SIZE syscall.Errno = 0xC03A0012 - ERROR_VHD_INVALID_FILE_SIZE syscall.Errno = 0xC03A0013 - ERROR_VIRTDISK_PROVIDER_NOT_FOUND syscall.Errno = 0xC03A0014 - ERROR_VIRTDISK_NOT_VIRTUAL_DISK syscall.Errno = 0xC03A0015 - ERROR_VHD_PARENT_VHD_ACCESS_DENIED syscall.Errno = 0xC03A0016 - ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH syscall.Errno = 0xC03A0017 - ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED syscall.Errno = 0xC03A0018 - ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT syscall.Errno = 0xC03A0019 - ERROR_VIRTUAL_DISK_LIMITATION syscall.Errno = 0xC03A001A - ERROR_VHD_INVALID_TYPE syscall.Errno = 0xC03A001B - ERROR_VHD_INVALID_STATE syscall.Errno = 0xC03A001C - ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE syscall.Errno = 0xC03A001D - ERROR_VIRTDISK_DISK_ALREADY_OWNED syscall.Errno = 0xC03A001E - ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE syscall.Errno = 0xC03A001F - ERROR_CTLOG_TRACKING_NOT_INITIALIZED syscall.Errno = 0xC03A0020 - ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE syscall.Errno = 0xC03A0021 - ERROR_CTLOG_VHD_CHANGED_OFFLINE syscall.Errno = 0xC03A0022 - ERROR_CTLOG_INVALID_TRACKING_STATE syscall.Errno = 0xC03A0023 - ERROR_CTLOG_INCONSISTENT_TRACKING_FILE syscall.Errno = 0xC03A0024 - ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA syscall.Errno = 0xC03A0025 - ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0026 - ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE syscall.Errno = 0xC03A0027 - ERROR_VHD_METADATA_FULL syscall.Errno = 0xC03A0028 - ERROR_VHD_INVALID_CHANGE_TRACKING_ID syscall.Errno = 0xC03A0029 - ERROR_VHD_CHANGE_TRACKING_DISABLED syscall.Errno = 0xC03A002A - ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION syscall.Errno = 0xC03A0030 - ERROR_QUERY_STORAGE_ERROR syscall.Errno = 0x803A0001 - HCN_E_NETWORK_NOT_FOUND Handle = 0x803B0001 - HCN_E_ENDPOINT_NOT_FOUND Handle = 0x803B0002 - HCN_E_LAYER_NOT_FOUND Handle = 0x803B0003 - HCN_E_SWITCH_NOT_FOUND Handle = 0x803B0004 - HCN_E_SUBNET_NOT_FOUND Handle = 0x803B0005 - HCN_E_ADAPTER_NOT_FOUND Handle = 0x803B0006 - HCN_E_PORT_NOT_FOUND Handle = 0x803B0007 - HCN_E_POLICY_NOT_FOUND Handle = 0x803B0008 - HCN_E_VFP_PORTSETTING_NOT_FOUND Handle = 0x803B0009 - HCN_E_INVALID_NETWORK Handle = 0x803B000A - HCN_E_INVALID_NETWORK_TYPE Handle = 0x803B000B - HCN_E_INVALID_ENDPOINT Handle = 0x803B000C - HCN_E_INVALID_POLICY Handle = 0x803B000D - HCN_E_INVALID_POLICY_TYPE Handle = 0x803B000E - HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION Handle = 0x803B000F - HCN_E_NETWORK_ALREADY_EXISTS Handle = 0x803B0010 - HCN_E_LAYER_ALREADY_EXISTS Handle = 0x803B0011 - HCN_E_POLICY_ALREADY_EXISTS Handle = 0x803B0012 - HCN_E_PORT_ALREADY_EXISTS Handle = 0x803B0013 - HCN_E_ENDPOINT_ALREADY_ATTACHED Handle = 0x803B0014 - HCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0015 - HCN_E_MAPPING_NOT_SUPPORTED Handle = 0x803B0016 - HCN_E_DEGRADED_OPERATION Handle = 0x803B0017 - HCN_E_SHARED_SWITCH_MODIFICATION Handle = 0x803B0018 - HCN_E_GUID_CONVERSION_FAILURE Handle = 0x803B0019 - HCN_E_REGKEY_FAILURE Handle = 0x803B001A - HCN_E_INVALID_JSON Handle = 0x803B001B - HCN_E_INVALID_JSON_REFERENCE Handle = 0x803B001C - HCN_E_ENDPOINT_SHARING_DISABLED Handle = 0x803B001D - HCN_E_INVALID_IP Handle = 0x803B001E - HCN_E_SWITCH_EXTENSION_NOT_FOUND Handle = 0x803B001F - HCN_E_MANAGER_STOPPED Handle = 0x803B0020 - GCN_E_MODULE_NOT_FOUND Handle = 0x803B0021 - GCN_E_NO_REQUEST_HANDLERS Handle = 0x803B0022 - GCN_E_REQUEST_UNSUPPORTED Handle = 0x803B0023 - GCN_E_RUNTIMEKEYS_FAILED Handle = 0x803B0024 - GCN_E_NETADAPTER_TIMEOUT Handle = 0x803B0025 - GCN_E_NETADAPTER_NOT_FOUND Handle = 0x803B0026 - GCN_E_NETCOMPARTMENT_NOT_FOUND Handle = 0x803B0027 - GCN_E_NETINTERFACE_NOT_FOUND Handle = 0x803B0028 - GCN_E_DEFAULTNAMESPACE_EXISTS Handle = 0x803B0029 - SDIAG_E_CANCELLED syscall.Errno = 0x803C0100 - SDIAG_E_SCRIPT syscall.Errno = 0x803C0101 - SDIAG_E_POWERSHELL syscall.Errno = 0x803C0102 - SDIAG_E_MANAGEDHOST syscall.Errno = 0x803C0103 - SDIAG_E_NOVERIFIER syscall.Errno = 0x803C0104 - SDIAG_S_CANNOTRUN syscall.Errno = 0x003C0105 - SDIAG_E_DISABLED syscall.Errno = 0x803C0106 - SDIAG_E_TRUST syscall.Errno = 0x803C0107 - SDIAG_E_CANNOTRUN syscall.Errno = 0x803C0108 - SDIAG_E_VERSION syscall.Errno = 0x803C0109 - SDIAG_E_RESOURCE syscall.Errno = 0x803C010A - SDIAG_E_ROOTCAUSE syscall.Errno = 0x803C010B - WPN_E_CHANNEL_CLOSED Handle = 0x803E0100 - WPN_E_CHANNEL_REQUEST_NOT_COMPLETE Handle = 0x803E0101 - WPN_E_INVALID_APP Handle = 0x803E0102 - WPN_E_OUTSTANDING_CHANNEL_REQUEST Handle = 0x803E0103 - WPN_E_DUPLICATE_CHANNEL Handle = 0x803E0104 - WPN_E_PLATFORM_UNAVAILABLE Handle = 0x803E0105 - WPN_E_NOTIFICATION_POSTED Handle = 0x803E0106 - WPN_E_NOTIFICATION_HIDDEN Handle = 0x803E0107 - WPN_E_NOTIFICATION_NOT_POSTED Handle = 0x803E0108 - WPN_E_CLOUD_DISABLED Handle = 0x803E0109 - WPN_E_CLOUD_INCAPABLE Handle = 0x803E0110 - WPN_E_CLOUD_AUTH_UNAVAILABLE Handle = 0x803E011A - WPN_E_CLOUD_SERVICE_UNAVAILABLE Handle = 0x803E011B - WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION Handle = 0x803E011C - WPN_E_NOTIFICATION_DISABLED Handle = 0x803E0111 - WPN_E_NOTIFICATION_INCAPABLE Handle = 0x803E0112 - WPN_E_INTERNET_INCAPABLE Handle = 0x803E0113 - WPN_E_NOTIFICATION_TYPE_DISABLED Handle = 0x803E0114 - WPN_E_NOTIFICATION_SIZE Handle = 0x803E0115 - WPN_E_TAG_SIZE Handle = 0x803E0116 - WPN_E_ACCESS_DENIED Handle = 0x803E0117 - WPN_E_DUPLICATE_REGISTRATION Handle = 0x803E0118 - WPN_E_PUSH_NOTIFICATION_INCAPABLE Handle = 0x803E0119 - WPN_E_DEV_ID_SIZE Handle = 0x803E0120 - WPN_E_TAG_ALPHANUMERIC Handle = 0x803E012A - WPN_E_INVALID_HTTP_STATUS_CODE Handle = 0x803E012B - WPN_E_OUT_OF_SESSION Handle = 0x803E0200 - WPN_E_POWER_SAVE Handle = 0x803E0201 - WPN_E_IMAGE_NOT_FOUND_IN_CACHE Handle = 0x803E0202 - WPN_E_ALL_URL_NOT_COMPLETED Handle = 0x803E0203 - WPN_E_INVALID_CLOUD_IMAGE Handle = 0x803E0204 - WPN_E_NOTIFICATION_ID_MATCHED Handle = 0x803E0205 - WPN_E_CALLBACK_ALREADY_REGISTERED Handle = 0x803E0206 - WPN_E_TOAST_NOTIFICATION_DROPPED Handle = 0x803E0207 - WPN_E_STORAGE_LOCKED Handle = 0x803E0208 - WPN_E_GROUP_SIZE Handle = 0x803E0209 - WPN_E_GROUP_ALPHANUMERIC Handle = 0x803E020A - WPN_E_CLOUD_DISABLED_FOR_APP Handle = 0x803E020B - E_MBN_CONTEXT_NOT_ACTIVATED Handle = 0x80548201 - E_MBN_BAD_SIM Handle = 0x80548202 - E_MBN_DATA_CLASS_NOT_AVAILABLE Handle = 0x80548203 - E_MBN_INVALID_ACCESS_STRING Handle = 0x80548204 - E_MBN_MAX_ACTIVATED_CONTEXTS Handle = 0x80548205 - E_MBN_PACKET_SVC_DETACHED Handle = 0x80548206 - E_MBN_PROVIDER_NOT_VISIBLE Handle = 0x80548207 - E_MBN_RADIO_POWER_OFF Handle = 0x80548208 - E_MBN_SERVICE_NOT_ACTIVATED Handle = 0x80548209 - E_MBN_SIM_NOT_INSERTED Handle = 0x8054820A - E_MBN_VOICE_CALL_IN_PROGRESS Handle = 0x8054820B - E_MBN_INVALID_CACHE Handle = 0x8054820C - E_MBN_NOT_REGISTERED Handle = 0x8054820D - E_MBN_PROVIDERS_NOT_FOUND Handle = 0x8054820E - E_MBN_PIN_NOT_SUPPORTED Handle = 0x8054820F - E_MBN_PIN_REQUIRED Handle = 0x80548210 - E_MBN_PIN_DISABLED Handle = 0x80548211 - E_MBN_FAILURE Handle = 0x80548212 - E_MBN_INVALID_PROFILE Handle = 0x80548218 - E_MBN_DEFAULT_PROFILE_EXIST Handle = 0x80548219 - E_MBN_SMS_ENCODING_NOT_SUPPORTED Handle = 0x80548220 - E_MBN_SMS_FILTER_NOT_SUPPORTED Handle = 0x80548221 - E_MBN_SMS_INVALID_MEMORY_INDEX Handle = 0x80548222 - E_MBN_SMS_LANG_NOT_SUPPORTED Handle = 0x80548223 - E_MBN_SMS_MEMORY_FAILURE Handle = 0x80548224 - E_MBN_SMS_NETWORK_TIMEOUT Handle = 0x80548225 - E_MBN_SMS_UNKNOWN_SMSC_ADDRESS Handle = 0x80548226 - E_MBN_SMS_FORMAT_NOT_SUPPORTED Handle = 0x80548227 - E_MBN_SMS_OPERATION_NOT_ALLOWED Handle = 0x80548228 - E_MBN_SMS_MEMORY_FULL Handle = 0x80548229 - PEER_E_IPV6_NOT_INSTALLED Handle = 0x80630001 - PEER_E_NOT_INITIALIZED Handle = 0x80630002 - PEER_E_CANNOT_START_SERVICE Handle = 0x80630003 - PEER_E_NOT_LICENSED Handle = 0x80630004 - PEER_E_INVALID_GRAPH Handle = 0x80630010 - PEER_E_DBNAME_CHANGED Handle = 0x80630011 - PEER_E_DUPLICATE_GRAPH Handle = 0x80630012 - PEER_E_GRAPH_NOT_READY Handle = 0x80630013 - PEER_E_GRAPH_SHUTTING_DOWN Handle = 0x80630014 - PEER_E_GRAPH_IN_USE Handle = 0x80630015 - PEER_E_INVALID_DATABASE Handle = 0x80630016 - PEER_E_TOO_MANY_ATTRIBUTES Handle = 0x80630017 - PEER_E_CONNECTION_NOT_FOUND Handle = 0x80630103 - PEER_E_CONNECT_SELF Handle = 0x80630106 - PEER_E_ALREADY_LISTENING Handle = 0x80630107 - PEER_E_NODE_NOT_FOUND Handle = 0x80630108 - PEER_E_CONNECTION_FAILED Handle = 0x80630109 - PEER_E_CONNECTION_NOT_AUTHENTICATED Handle = 0x8063010A - PEER_E_CONNECTION_REFUSED Handle = 0x8063010B - PEER_E_CLASSIFIER_TOO_LONG Handle = 0x80630201 - PEER_E_TOO_MANY_IDENTITIES Handle = 0x80630202 - PEER_E_NO_KEY_ACCESS Handle = 0x80630203 - PEER_E_GROUPS_EXIST Handle = 0x80630204 - PEER_E_RECORD_NOT_FOUND Handle = 0x80630301 - PEER_E_DATABASE_ACCESSDENIED Handle = 0x80630302 - PEER_E_DBINITIALIZATION_FAILED Handle = 0x80630303 - PEER_E_MAX_RECORD_SIZE_EXCEEDED Handle = 0x80630304 - PEER_E_DATABASE_ALREADY_PRESENT Handle = 0x80630305 - PEER_E_DATABASE_NOT_PRESENT Handle = 0x80630306 - PEER_E_IDENTITY_NOT_FOUND Handle = 0x80630401 - PEER_E_EVENT_HANDLE_NOT_FOUND Handle = 0x80630501 - PEER_E_INVALID_SEARCH Handle = 0x80630601 - PEER_E_INVALID_ATTRIBUTES Handle = 0x80630602 - PEER_E_INVITATION_NOT_TRUSTED Handle = 0x80630701 - PEER_E_CHAIN_TOO_LONG Handle = 0x80630703 - PEER_E_INVALID_TIME_PERIOD Handle = 0x80630705 - PEER_E_CIRCULAR_CHAIN_DETECTED Handle = 0x80630706 - PEER_E_CERT_STORE_CORRUPTED Handle = 0x80630801 - PEER_E_NO_CLOUD Handle = 0x80631001 - PEER_E_CLOUD_NAME_AMBIGUOUS Handle = 0x80631005 - PEER_E_INVALID_RECORD Handle = 0x80632010 - PEER_E_NOT_AUTHORIZED Handle = 0x80632020 - PEER_E_PASSWORD_DOES_NOT_MEET_POLICY Handle = 0x80632021 - PEER_E_DEFERRED_VALIDATION Handle = 0x80632030 - PEER_E_INVALID_GROUP_PROPERTIES Handle = 0x80632040 - PEER_E_INVALID_PEER_NAME Handle = 0x80632050 - PEER_E_INVALID_CLASSIFIER Handle = 0x80632060 - PEER_E_INVALID_FRIENDLY_NAME Handle = 0x80632070 - PEER_E_INVALID_ROLE_PROPERTY Handle = 0x80632071 - PEER_E_INVALID_CLASSIFIER_PROPERTY Handle = 0x80632072 - PEER_E_INVALID_RECORD_EXPIRATION Handle = 0x80632080 - PEER_E_INVALID_CREDENTIAL_INFO Handle = 0x80632081 - PEER_E_INVALID_CREDENTIAL Handle = 0x80632082 - PEER_E_INVALID_RECORD_SIZE Handle = 0x80632083 - PEER_E_UNSUPPORTED_VERSION Handle = 0x80632090 - PEER_E_GROUP_NOT_READY Handle = 0x80632091 - PEER_E_GROUP_IN_USE Handle = 0x80632092 - PEER_E_INVALID_GROUP Handle = 0x80632093 - PEER_E_NO_MEMBERS_FOUND Handle = 0x80632094 - PEER_E_NO_MEMBER_CONNECTIONS Handle = 0x80632095 - PEER_E_UNABLE_TO_LISTEN Handle = 0x80632096 - PEER_E_IDENTITY_DELETED Handle = 0x806320A0 - PEER_E_SERVICE_NOT_AVAILABLE Handle = 0x806320A1 - PEER_E_CONTACT_NOT_FOUND Handle = 0x80636001 - PEER_S_GRAPH_DATA_CREATED Handle = 0x00630001 - PEER_S_NO_EVENT_DATA Handle = 0x00630002 - PEER_S_ALREADY_CONNECTED Handle = 0x00632000 - PEER_S_SUBSCRIPTION_EXISTS Handle = 0x00636000 - PEER_S_NO_CONNECTIVITY Handle = 0x00630005 - PEER_S_ALREADY_A_MEMBER Handle = 0x00630006 - PEER_E_CANNOT_CONVERT_PEER_NAME Handle = 0x80634001 - PEER_E_INVALID_PEER_HOST_NAME Handle = 0x80634002 - PEER_E_NO_MORE Handle = 0x80634003 - PEER_E_PNRP_DUPLICATE_PEER_NAME Handle = 0x80634005 - PEER_E_INVITE_CANCELLED Handle = 0x80637000 - PEER_E_INVITE_RESPONSE_NOT_AVAILABLE Handle = 0x80637001 - PEER_E_NOT_SIGNED_IN Handle = 0x80637003 - PEER_E_PRIVACY_DECLINED Handle = 0x80637004 - PEER_E_TIMEOUT Handle = 0x80637005 - PEER_E_INVALID_ADDRESS Handle = 0x80637007 - PEER_E_FW_EXCEPTION_DISABLED Handle = 0x80637008 - PEER_E_FW_BLOCKED_BY_POLICY Handle = 0x80637009 - PEER_E_FW_BLOCKED_BY_SHIELDS_UP Handle = 0x8063700A - PEER_E_FW_DECLINED Handle = 0x8063700B - UI_E_CREATE_FAILED Handle = 0x802A0001 - UI_E_SHUTDOWN_CALLED Handle = 0x802A0002 - UI_E_ILLEGAL_REENTRANCY Handle = 0x802A0003 - UI_E_OBJECT_SEALED Handle = 0x802A0004 - UI_E_VALUE_NOT_SET Handle = 0x802A0005 - UI_E_VALUE_NOT_DETERMINED Handle = 0x802A0006 - UI_E_INVALID_OUTPUT Handle = 0x802A0007 - UI_E_BOOLEAN_EXPECTED Handle = 0x802A0008 - UI_E_DIFFERENT_OWNER Handle = 0x802A0009 - UI_E_AMBIGUOUS_MATCH Handle = 0x802A000A - UI_E_FP_OVERFLOW Handle = 0x802A000B - UI_E_WRONG_THREAD Handle = 0x802A000C - UI_E_STORYBOARD_ACTIVE Handle = 0x802A0101 - UI_E_STORYBOARD_NOT_PLAYING Handle = 0x802A0102 - UI_E_START_KEYFRAME_AFTER_END Handle = 0x802A0103 - UI_E_END_KEYFRAME_NOT_DETERMINED Handle = 0x802A0104 - UI_E_LOOPS_OVERLAP Handle = 0x802A0105 - UI_E_TRANSITION_ALREADY_USED Handle = 0x802A0106 - UI_E_TRANSITION_NOT_IN_STORYBOARD Handle = 0x802A0107 - UI_E_TRANSITION_ECLIPSED Handle = 0x802A0108 - UI_E_TIME_BEFORE_LAST_UPDATE Handle = 0x802A0109 - UI_E_TIMER_CLIENT_ALREADY_CONNECTED Handle = 0x802A010A - UI_E_INVALID_DIMENSION Handle = 0x802A010B - UI_E_PRIMITIVE_OUT_OF_BOUNDS Handle = 0x802A010C - UI_E_WINDOW_CLOSED Handle = 0x802A0201 - E_BLUETOOTH_ATT_INVALID_HANDLE Handle = 0x80650001 - E_BLUETOOTH_ATT_READ_NOT_PERMITTED Handle = 0x80650002 - E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED Handle = 0x80650003 - E_BLUETOOTH_ATT_INVALID_PDU Handle = 0x80650004 - E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION Handle = 0x80650005 - E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED Handle = 0x80650006 - E_BLUETOOTH_ATT_INVALID_OFFSET Handle = 0x80650007 - E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION Handle = 0x80650008 - E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL Handle = 0x80650009 - E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND Handle = 0x8065000A - E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG Handle = 0x8065000B - E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE Handle = 0x8065000C - E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH Handle = 0x8065000D - E_BLUETOOTH_ATT_UNLIKELY Handle = 0x8065000E - E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION Handle = 0x8065000F - E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE Handle = 0x80650010 - E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES Handle = 0x80650011 - E_BLUETOOTH_ATT_UNKNOWN_ERROR Handle = 0x80651000 - E_AUDIO_ENGINE_NODE_NOT_FOUND Handle = 0x80660001 - E_HDAUDIO_EMPTY_CONNECTION_LIST Handle = 0x80660002 - E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED Handle = 0x80660003 - E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED Handle = 0x80660004 - E_HDAUDIO_NULL_LINKED_LIST_ENTRY Handle = 0x80660005 - STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE Handle = 0x80670001 - STATEREPOSITORY_E_STATEMENT_INPROGRESS Handle = 0x80670002 - STATEREPOSITORY_E_CONFIGURATION_INVALID Handle = 0x80670003 - STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION Handle = 0x80670004 - STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED Handle = 0x80670005 - STATEREPOSITORY_E_BLOCKED Handle = 0x80670006 - STATEREPOSITORY_E_BUSY_RETRY Handle = 0x80670007 - STATEREPOSITORY_E_BUSY_RECOVERY_RETRY Handle = 0x80670008 - STATEREPOSITORY_E_LOCKED_RETRY Handle = 0x80670009 - STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY Handle = 0x8067000A - STATEREPOSITORY_E_TRANSACTION_REQUIRED Handle = 0x8067000B - STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED Handle = 0x8067000C - STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED Handle = 0x8067000D - STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED Handle = 0x8067000E - STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED Handle = 0x8067000F - STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS Handle = 0x80670010 - STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED Handle = 0x80670011 - STATEREPOSITORY_ERROR_CACHE_CORRUPTED Handle = 0x80670012 - STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED Handle = 0x00670013 - STATEREPOSITORY_TRANSACTION_IN_PROGRESS Handle = 0x00670014 - ERROR_SPACES_POOL_WAS_DELETED Handle = 0x00E70001 - ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID Handle = 0x80E70001 - ERROR_SPACES_INTERNAL_ERROR Handle = 0x80E70002 - ERROR_SPACES_RESILIENCY_TYPE_INVALID Handle = 0x80E70003 - ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID Handle = 0x80E70004 - ERROR_SPACES_DRIVE_REDUNDANCY_INVALID Handle = 0x80E70006 - ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID Handle = 0x80E70007 - ERROR_SPACES_PARITY_LAYOUT_INVALID Handle = 0x80E70008 - ERROR_SPACES_INTERLEAVE_LENGTH_INVALID Handle = 0x80E70009 - ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID Handle = 0x80E7000A - ERROR_SPACES_NOT_ENOUGH_DRIVES Handle = 0x80E7000B - ERROR_SPACES_EXTENDED_ERROR Handle = 0x80E7000C - ERROR_SPACES_PROVISIONING_TYPE_INVALID Handle = 0x80E7000D - ERROR_SPACES_ALLOCATION_SIZE_INVALID Handle = 0x80E7000E - ERROR_SPACES_ENCLOSURE_AWARE_INVALID Handle = 0x80E7000F - ERROR_SPACES_WRITE_CACHE_SIZE_INVALID Handle = 0x80E70010 - ERROR_SPACES_NUMBER_OF_GROUPS_INVALID Handle = 0x80E70011 - ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID Handle = 0x80E70012 - ERROR_SPACES_ENTRY_INCOMPLETE Handle = 0x80E70013 - ERROR_SPACES_ENTRY_INVALID Handle = 0x80E70014 - ERROR_VOLSNAP_BOOTFILE_NOT_VALID Handle = 0x80820001 - ERROR_VOLSNAP_ACTIVATION_TIMEOUT Handle = 0x80820002 - ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME Handle = 0x80830001 - ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS Handle = 0x80830002 - ERROR_TIERING_STORAGE_TIER_NOT_FOUND Handle = 0x80830003 - ERROR_TIERING_INVALID_FILE_ID Handle = 0x80830004 - ERROR_TIERING_WRONG_CLUSTER_NODE Handle = 0x80830005 - ERROR_TIERING_ALREADY_PROCESSING Handle = 0x80830006 - ERROR_TIERING_CANNOT_PIN_OBJECT Handle = 0x80830007 - ERROR_TIERING_FILE_IS_NOT_PINNED Handle = 0x80830008 - ERROR_NOT_A_TIERED_VOLUME Handle = 0x80830009 - ERROR_ATTRIBUTE_NOT_PRESENT Handle = 0x8083000A - ERROR_SECCORE_INVALID_COMMAND Handle = 0xC0E80000 - ERROR_NO_APPLICABLE_APP_LICENSES_FOUND Handle = 0xC0EA0001 - ERROR_CLIP_LICENSE_NOT_FOUND Handle = 0xC0EA0002 - ERROR_CLIP_DEVICE_LICENSE_MISSING Handle = 0xC0EA0003 - ERROR_CLIP_LICENSE_INVALID_SIGNATURE Handle = 0xC0EA0004 - ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID Handle = 0xC0EA0005 - ERROR_CLIP_LICENSE_EXPIRED Handle = 0xC0EA0006 - ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE Handle = 0xC0EA0007 - ERROR_CLIP_LICENSE_NOT_SIGNED Handle = 0xC0EA0008 - ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE Handle = 0xC0EA0009 - ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH Handle = 0xC0EA000A - DXGI_STATUS_OCCLUDED Handle = 0x087A0001 - DXGI_STATUS_CLIPPED Handle = 0x087A0002 - DXGI_STATUS_NO_REDIRECTION Handle = 0x087A0004 - DXGI_STATUS_NO_DESKTOP_ACCESS Handle = 0x087A0005 - DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x087A0006 - DXGI_STATUS_MODE_CHANGED Handle = 0x087A0007 - DXGI_STATUS_MODE_CHANGE_IN_PROGRESS Handle = 0x087A0008 - DXGI_ERROR_INVALID_CALL Handle = 0x887A0001 - DXGI_ERROR_NOT_FOUND Handle = 0x887A0002 - DXGI_ERROR_MORE_DATA Handle = 0x887A0003 - DXGI_ERROR_UNSUPPORTED Handle = 0x887A0004 - DXGI_ERROR_DEVICE_REMOVED Handle = 0x887A0005 - DXGI_ERROR_DEVICE_HUNG Handle = 0x887A0006 - DXGI_ERROR_DEVICE_RESET Handle = 0x887A0007 - DXGI_ERROR_WAS_STILL_DRAWING Handle = 0x887A000A - DXGI_ERROR_FRAME_STATISTICS_DISJOINT Handle = 0x887A000B - DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE Handle = 0x887A000C - DXGI_ERROR_DRIVER_INTERNAL_ERROR Handle = 0x887A0020 - DXGI_ERROR_NONEXCLUSIVE Handle = 0x887A0021 - DXGI_ERROR_NOT_CURRENTLY_AVAILABLE Handle = 0x887A0022 - DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED Handle = 0x887A0023 - DXGI_ERROR_REMOTE_OUTOFMEMORY Handle = 0x887A0024 - DXGI_ERROR_ACCESS_LOST Handle = 0x887A0026 - DXGI_ERROR_WAIT_TIMEOUT Handle = 0x887A0027 - DXGI_ERROR_SESSION_DISCONNECTED Handle = 0x887A0028 - DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE Handle = 0x887A0029 - DXGI_ERROR_CANNOT_PROTECT_CONTENT Handle = 0x887A002A - DXGI_ERROR_ACCESS_DENIED Handle = 0x887A002B - DXGI_ERROR_NAME_ALREADY_EXISTS Handle = 0x887A002C - DXGI_ERROR_SDK_COMPONENT_MISSING Handle = 0x887A002D - DXGI_ERROR_NOT_CURRENT Handle = 0x887A002E - DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY Handle = 0x887A0030 - DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION Handle = 0x887A0031 - DXGI_ERROR_NON_COMPOSITED_UI Handle = 0x887A0032 - DXGI_STATUS_UNOCCLUDED Handle = 0x087A0009 - DXGI_STATUS_DDA_WAS_STILL_DRAWING Handle = 0x087A000A - DXGI_ERROR_MODE_CHANGE_IN_PROGRESS Handle = 0x887A0025 - DXGI_STATUS_PRESENT_REQUIRED Handle = 0x087A002F - DXGI_ERROR_CACHE_CORRUPT Handle = 0x887A0033 - DXGI_ERROR_CACHE_FULL Handle = 0x887A0034 - DXGI_ERROR_CACHE_HASH_COLLISION Handle = 0x887A0035 - DXGI_ERROR_ALREADY_EXISTS Handle = 0x887A0036 - DXGI_DDI_ERR_WASSTILLDRAWING Handle = 0x887B0001 - DXGI_DDI_ERR_UNSUPPORTED Handle = 0x887B0002 - DXGI_DDI_ERR_NONEXCLUSIVE Handle = 0x887B0003 - D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x88790001 - D3D10_ERROR_FILE_NOT_FOUND Handle = 0x88790002 - D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS Handle = 0x887C0001 - D3D11_ERROR_FILE_NOT_FOUND Handle = 0x887C0002 - D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS Handle = 0x887C0003 - D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD Handle = 0x887C0004 - D3D12_ERROR_ADAPTER_NOT_FOUND Handle = 0x887E0001 - D3D12_ERROR_DRIVER_VERSION_MISMATCH Handle = 0x887E0002 - D2DERR_WRONG_STATE Handle = 0x88990001 - D2DERR_NOT_INITIALIZED Handle = 0x88990002 - D2DERR_UNSUPPORTED_OPERATION Handle = 0x88990003 - D2DERR_SCANNER_FAILED Handle = 0x88990004 - D2DERR_SCREEN_ACCESS_DENIED Handle = 0x88990005 - D2DERR_DISPLAY_STATE_INVALID Handle = 0x88990006 - D2DERR_ZERO_VECTOR Handle = 0x88990007 - D2DERR_INTERNAL_ERROR Handle = 0x88990008 - D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED Handle = 0x88990009 - D2DERR_INVALID_CALL Handle = 0x8899000A - D2DERR_NO_HARDWARE_DEVICE Handle = 0x8899000B - D2DERR_RECREATE_TARGET Handle = 0x8899000C - D2DERR_TOO_MANY_SHADER_ELEMENTS Handle = 0x8899000D - D2DERR_SHADER_COMPILE_FAILED Handle = 0x8899000E - D2DERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8899000F - D2DERR_UNSUPPORTED_VERSION Handle = 0x88990010 - D2DERR_BAD_NUMBER Handle = 0x88990011 - D2DERR_WRONG_FACTORY Handle = 0x88990012 - D2DERR_LAYER_ALREADY_IN_USE Handle = 0x88990013 - D2DERR_POP_CALL_DID_NOT_MATCH_PUSH Handle = 0x88990014 - D2DERR_WRONG_RESOURCE_DOMAIN Handle = 0x88990015 - D2DERR_PUSH_POP_UNBALANCED Handle = 0x88990016 - D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT Handle = 0x88990017 - D2DERR_INCOMPATIBLE_BRUSH_TYPES Handle = 0x88990018 - D2DERR_WIN32_ERROR Handle = 0x88990019 - D2DERR_TARGET_NOT_GDI_COMPATIBLE Handle = 0x8899001A - D2DERR_TEXT_EFFECT_IS_WRONG_TYPE Handle = 0x8899001B - D2DERR_TEXT_RENDERER_NOT_RELEASED Handle = 0x8899001C - D2DERR_EXCEEDS_MAX_BITMAP_SIZE Handle = 0x8899001D - D2DERR_INVALID_GRAPH_CONFIGURATION Handle = 0x8899001E - D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION Handle = 0x8899001F - D2DERR_CYCLIC_GRAPH Handle = 0x88990020 - D2DERR_BITMAP_CANNOT_DRAW Handle = 0x88990021 - D2DERR_OUTSTANDING_BITMAP_REFERENCES Handle = 0x88990022 - D2DERR_ORIGINAL_TARGET_NOT_BOUND Handle = 0x88990023 - D2DERR_INVALID_TARGET Handle = 0x88990024 - D2DERR_BITMAP_BOUND_AS_TARGET Handle = 0x88990025 - D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES Handle = 0x88990026 - D2DERR_INTERMEDIATE_TOO_LARGE Handle = 0x88990027 - D2DERR_EFFECT_IS_NOT_REGISTERED Handle = 0x88990028 - D2DERR_INVALID_PROPERTY Handle = 0x88990029 - D2DERR_NO_SUBPROPERTIES Handle = 0x8899002A - D2DERR_PRINT_JOB_CLOSED Handle = 0x8899002B - D2DERR_PRINT_FORMAT_NOT_SUPPORTED Handle = 0x8899002C - D2DERR_TOO_MANY_TRANSFORM_INPUTS Handle = 0x8899002D - D2DERR_INVALID_GLYPH_IMAGE Handle = 0x8899002E - DWRITE_E_FILEFORMAT Handle = 0x88985000 - DWRITE_E_UNEXPECTED Handle = 0x88985001 - DWRITE_E_NOFONT Handle = 0x88985002 - DWRITE_E_FILENOTFOUND Handle = 0x88985003 - DWRITE_E_FILEACCESS Handle = 0x88985004 - DWRITE_E_FONTCOLLECTIONOBSOLETE Handle = 0x88985005 - DWRITE_E_ALREADYREGISTERED Handle = 0x88985006 - DWRITE_E_CACHEFORMAT Handle = 0x88985007 - DWRITE_E_CACHEVERSION Handle = 0x88985008 - DWRITE_E_UNSUPPORTEDOPERATION Handle = 0x88985009 - DWRITE_E_TEXTRENDERERINCOMPATIBLE Handle = 0x8898500A - DWRITE_E_FLOWDIRECTIONCONFLICTS Handle = 0x8898500B - DWRITE_E_NOCOLOR Handle = 0x8898500C - DWRITE_E_REMOTEFONT Handle = 0x8898500D - DWRITE_E_DOWNLOADCANCELLED Handle = 0x8898500E - DWRITE_E_DOWNLOADFAILED Handle = 0x8898500F - DWRITE_E_TOOMANYDOWNLOADS Handle = 0x88985010 - WINCODEC_ERR_WRONGSTATE Handle = 0x88982F04 - WINCODEC_ERR_VALUEOUTOFRANGE Handle = 0x88982F05 - WINCODEC_ERR_UNKNOWNIMAGEFORMAT Handle = 0x88982F07 - WINCODEC_ERR_UNSUPPORTEDVERSION Handle = 0x88982F0B - WINCODEC_ERR_NOTINITIALIZED Handle = 0x88982F0C - WINCODEC_ERR_ALREADYLOCKED Handle = 0x88982F0D - WINCODEC_ERR_PROPERTYNOTFOUND Handle = 0x88982F40 - WINCODEC_ERR_PROPERTYNOTSUPPORTED Handle = 0x88982F41 - WINCODEC_ERR_PROPERTYSIZE Handle = 0x88982F42 - WINCODEC_ERR_CODECPRESENT Handle = 0x88982F43 - WINCODEC_ERR_CODECNOTHUMBNAIL Handle = 0x88982F44 - WINCODEC_ERR_PALETTEUNAVAILABLE Handle = 0x88982F45 - WINCODEC_ERR_CODECTOOMANYSCANLINES Handle = 0x88982F46 - WINCODEC_ERR_INTERNALERROR Handle = 0x88982F48 - WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS Handle = 0x88982F49 - WINCODEC_ERR_COMPONENTNOTFOUND Handle = 0x88982F50 - WINCODEC_ERR_IMAGESIZEOUTOFRANGE Handle = 0x88982F51 - WINCODEC_ERR_TOOMUCHMETADATA Handle = 0x88982F52 - WINCODEC_ERR_BADIMAGE Handle = 0x88982F60 - WINCODEC_ERR_BADHEADER Handle = 0x88982F61 - WINCODEC_ERR_FRAMEMISSING Handle = 0x88982F62 - WINCODEC_ERR_BADMETADATAHEADER Handle = 0x88982F63 - WINCODEC_ERR_BADSTREAMDATA Handle = 0x88982F70 - WINCODEC_ERR_STREAMWRITE Handle = 0x88982F71 - WINCODEC_ERR_STREAMREAD Handle = 0x88982F72 - WINCODEC_ERR_STREAMNOTAVAILABLE Handle = 0x88982F73 - WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT Handle = 0x88982F80 - WINCODEC_ERR_UNSUPPORTEDOPERATION Handle = 0x88982F81 - WINCODEC_ERR_INVALIDREGISTRATION Handle = 0x88982F8A - WINCODEC_ERR_COMPONENTINITIALIZEFAILURE Handle = 0x88982F8B - WINCODEC_ERR_INSUFFICIENTBUFFER Handle = 0x88982F8C - WINCODEC_ERR_DUPLICATEMETADATAPRESENT Handle = 0x88982F8D - WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE Handle = 0x88982F8E - WINCODEC_ERR_UNEXPECTEDSIZE Handle = 0x88982F8F - WINCODEC_ERR_INVALIDQUERYREQUEST Handle = 0x88982F90 - WINCODEC_ERR_UNEXPECTEDMETADATATYPE Handle = 0x88982F91 - WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT Handle = 0x88982F92 - WINCODEC_ERR_INVALIDQUERYCHARACTER Handle = 0x88982F93 - WINCODEC_ERR_WIN32ERROR Handle = 0x88982F94 - WINCODEC_ERR_INVALIDPROGRESSIVELEVEL Handle = 0x88982F95 - WINCODEC_ERR_INVALIDJPEGSCANINDEX Handle = 0x88982F96 - MILERR_OBJECTBUSY Handle = 0x88980001 - MILERR_INSUFFICIENTBUFFER Handle = 0x88980002 - MILERR_WIN32ERROR Handle = 0x88980003 - MILERR_SCANNER_FAILED Handle = 0x88980004 - MILERR_SCREENACCESSDENIED Handle = 0x88980005 - MILERR_DISPLAYSTATEINVALID Handle = 0x88980006 - MILERR_NONINVERTIBLEMATRIX Handle = 0x88980007 - MILERR_ZEROVECTOR Handle = 0x88980008 - MILERR_TERMINATED Handle = 0x88980009 - MILERR_BADNUMBER Handle = 0x8898000A - MILERR_INTERNALERROR Handle = 0x88980080 - MILERR_DISPLAYFORMATNOTSUPPORTED Handle = 0x88980084 - MILERR_INVALIDCALL Handle = 0x88980085 - MILERR_ALREADYLOCKED Handle = 0x88980086 - MILERR_NOTLOCKED Handle = 0x88980087 - MILERR_DEVICECANNOTRENDERTEXT Handle = 0x88980088 - MILERR_GLYPHBITMAPMISSED Handle = 0x88980089 - MILERR_MALFORMEDGLYPHCACHE Handle = 0x8898008A - MILERR_GENERIC_IGNORE Handle = 0x8898008B - MILERR_MALFORMED_GUIDELINE_DATA Handle = 0x8898008C - MILERR_NO_HARDWARE_DEVICE Handle = 0x8898008D - MILERR_NEED_RECREATE_AND_PRESENT Handle = 0x8898008E - MILERR_ALREADY_INITIALIZED Handle = 0x8898008F - MILERR_MISMATCHED_SIZE Handle = 0x88980090 - MILERR_NO_REDIRECTION_SURFACE_AVAILABLE Handle = 0x88980091 - MILERR_REMOTING_NOT_SUPPORTED Handle = 0x88980092 - MILERR_QUEUED_PRESENT_NOT_SUPPORTED Handle = 0x88980093 - MILERR_NOT_QUEUING_PRESENTS Handle = 0x88980094 - MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER Handle = 0x88980095 - MILERR_TOOMANYSHADERELEMNTS Handle = 0x88980096 - MILERR_MROW_READLOCK_FAILED Handle = 0x88980097 - MILERR_MROW_UPDATE_FAILED Handle = 0x88980098 - MILERR_SHADER_COMPILE_FAILED Handle = 0x88980099 - MILERR_MAX_TEXTURE_SIZE_EXCEEDED Handle = 0x8898009A - MILERR_QPC_TIME_WENT_BACKWARD Handle = 0x8898009B - MILERR_DXGI_ENUMERATION_OUT_OF_SYNC Handle = 0x8898009D - MILERR_ADAPTER_NOT_FOUND Handle = 0x8898009E - MILERR_COLORSPACE_NOT_SUPPORTED Handle = 0x8898009F - MILERR_PREFILTER_NOT_SUPPORTED Handle = 0x889800A0 - MILERR_DISPLAYID_ACCESS_DENIED Handle = 0x889800A1 - UCEERR_INVALIDPACKETHEADER Handle = 0x88980400 - UCEERR_UNKNOWNPACKET Handle = 0x88980401 - UCEERR_ILLEGALPACKET Handle = 0x88980402 - UCEERR_MALFORMEDPACKET Handle = 0x88980403 - UCEERR_ILLEGALHANDLE Handle = 0x88980404 - UCEERR_HANDLELOOKUPFAILED Handle = 0x88980405 - UCEERR_RENDERTHREADFAILURE Handle = 0x88980406 - UCEERR_CTXSTACKFRSTTARGETNULL Handle = 0x88980407 - UCEERR_CONNECTIONIDLOOKUPFAILED Handle = 0x88980408 - UCEERR_BLOCKSFULL Handle = 0x88980409 - UCEERR_MEMORYFAILURE Handle = 0x8898040A - UCEERR_PACKETRECORDOUTOFRANGE Handle = 0x8898040B - UCEERR_ILLEGALRECORDTYPE Handle = 0x8898040C - UCEERR_OUTOFHANDLES Handle = 0x8898040D - UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED Handle = 0x8898040E - UCEERR_NO_MULTIPLE_WORKER_THREADS Handle = 0x8898040F - UCEERR_REMOTINGNOTSUPPORTED Handle = 0x88980410 - UCEERR_MISSINGENDCOMMAND Handle = 0x88980411 - UCEERR_MISSINGBEGINCOMMAND Handle = 0x88980412 - UCEERR_CHANNELSYNCTIMEDOUT Handle = 0x88980413 - UCEERR_CHANNELSYNCABANDONED Handle = 0x88980414 - UCEERR_UNSUPPORTEDTRANSPORTVERSION Handle = 0x88980415 - UCEERR_TRANSPORTUNAVAILABLE Handle = 0x88980416 - UCEERR_FEEDBACK_UNSUPPORTED Handle = 0x88980417 - UCEERR_COMMANDTRANSPORTDENIED Handle = 0x88980418 - UCEERR_GRAPHICSSTREAMUNAVAILABLE Handle = 0x88980419 - UCEERR_GRAPHICSSTREAMALREADYOPEN Handle = 0x88980420 - UCEERR_TRANSPORTDISCONNECTED Handle = 0x88980421 - UCEERR_TRANSPORTOVERLOADED Handle = 0x88980422 - UCEERR_PARTITION_ZOMBIED Handle = 0x88980423 - MILAVERR_NOCLOCK Handle = 0x88980500 - MILAVERR_NOMEDIATYPE Handle = 0x88980501 - MILAVERR_NOVIDEOMIXER Handle = 0x88980502 - MILAVERR_NOVIDEOPRESENTER Handle = 0x88980503 - MILAVERR_NOREADYFRAMES Handle = 0x88980504 - MILAVERR_MODULENOTLOADED Handle = 0x88980505 - MILAVERR_WMPFACTORYNOTREGISTERED Handle = 0x88980506 - MILAVERR_INVALIDWMPVERSION Handle = 0x88980507 - MILAVERR_INSUFFICIENTVIDEORESOURCES Handle = 0x88980508 - MILAVERR_VIDEOACCELERATIONNOTAVAILABLE Handle = 0x88980509 - MILAVERR_REQUESTEDTEXTURETOOBIG Handle = 0x8898050A - MILAVERR_SEEKFAILED Handle = 0x8898050B - MILAVERR_UNEXPECTEDWMPFAILURE Handle = 0x8898050C - MILAVERR_MEDIAPLAYERCLOSED Handle = 0x8898050D - MILAVERR_UNKNOWNHARDWAREERROR Handle = 0x8898050E - MILEFFECTSERR_UNKNOWNPROPERTY Handle = 0x8898060E - MILEFFECTSERR_EFFECTNOTPARTOFGROUP Handle = 0x8898060F - MILEFFECTSERR_NOINPUTSOURCEATTACHED Handle = 0x88980610 - MILEFFECTSERR_CONNECTORNOTCONNECTED Handle = 0x88980611 - MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT Handle = 0x88980612 - MILEFFECTSERR_RESERVED Handle = 0x88980613 - MILEFFECTSERR_CYCLEDETECTED Handle = 0x88980614 - MILEFFECTSERR_EFFECTINMORETHANONEGRAPH Handle = 0x88980615 - MILEFFECTSERR_EFFECTALREADYINAGRAPH Handle = 0x88980616 - MILEFFECTSERR_EFFECTHASNOCHILDREN Handle = 0x88980617 - MILEFFECTSERR_ALREADYATTACHEDTOLISTENER Handle = 0x88980618 - MILEFFECTSERR_NOTAFFINETRANSFORM Handle = 0x88980619 - MILEFFECTSERR_EMPTYBOUNDS Handle = 0x8898061A - MILEFFECTSERR_OUTPUTSIZETOOLARGE Handle = 0x8898061B - DWMERR_STATE_TRANSITION_FAILED Handle = 0x88980700 - DWMERR_THEME_FAILED Handle = 0x88980701 - DWMERR_CATASTROPHIC_FAILURE Handle = 0x88980702 - DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED Handle = 0x88980800 - DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED Handle = 0x88980801 - DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED Handle = 0x88980802 - ONL_E_INVALID_AUTHENTICATION_TARGET Handle = 0x80860001 - ONL_E_ACCESS_DENIED_BY_TOU Handle = 0x80860002 - ONL_E_INVALID_APPLICATION Handle = 0x80860003 - ONL_E_PASSWORD_UPDATE_REQUIRED Handle = 0x80860004 - ONL_E_ACCOUNT_UPDATE_REQUIRED Handle = 0x80860005 - ONL_E_FORCESIGNIN Handle = 0x80860006 - ONL_E_ACCOUNT_LOCKED Handle = 0x80860007 - ONL_E_PARENTAL_CONSENT_REQUIRED Handle = 0x80860008 - ONL_E_EMAIL_VERIFICATION_REQUIRED Handle = 0x80860009 - ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE Handle = 0x8086000A - ONL_E_ACCOUNT_SUSPENDED_ABUSE Handle = 0x8086000B - ONL_E_ACTION_REQUIRED Handle = 0x8086000C - ONL_CONNECTION_COUNT_LIMIT Handle = 0x8086000D - ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT Handle = 0x8086000E - ONL_E_USER_AUTHENTICATION_REQUIRED Handle = 0x8086000F - ONL_E_REQUEST_THROTTLED Handle = 0x80860010 - FA_E_MAX_PERSISTED_ITEMS_REACHED Handle = 0x80270220 - FA_E_HOMEGROUP_NOT_AVAILABLE Handle = 0x80270222 - E_MONITOR_RESOLUTION_TOO_LOW Handle = 0x80270250 - E_ELEVATED_ACTIVATION_NOT_SUPPORTED Handle = 0x80270251 - E_UAC_DISABLED Handle = 0x80270252 - E_FULL_ADMIN_NOT_SUPPORTED Handle = 0x80270253 - E_APPLICATION_NOT_REGISTERED Handle = 0x80270254 - E_MULTIPLE_EXTENSIONS_FOR_APPLICATION Handle = 0x80270255 - E_MULTIPLE_PACKAGES_FOR_FAMILY Handle = 0x80270256 - E_APPLICATION_MANAGER_NOT_RUNNING Handle = 0x80270257 - S_STORE_LAUNCHED_FOR_REMEDIATION Handle = 0x00270258 - S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG Handle = 0x00270259 - E_APPLICATION_ACTIVATION_TIMED_OUT Handle = 0x8027025A - E_APPLICATION_ACTIVATION_EXEC_FAILURE Handle = 0x8027025B - E_APPLICATION_TEMPORARY_LICENSE_ERROR Handle = 0x8027025C - E_APPLICATION_TRIAL_LICENSE_EXPIRED Handle = 0x8027025D - E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED Handle = 0x80270260 - E_SKYDRIVE_ROOT_TARGET_OVERLAP Handle = 0x80270261 - E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX Handle = 0x80270262 - E_SKYDRIVE_FILE_NOT_UPLOADED Handle = 0x80270263 - E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL Handle = 0x80270264 - E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED Handle = 0x80270265 - E_SYNCENGINE_FILE_SIZE_OVER_LIMIT Handle = 0x8802B001 - E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA Handle = 0x8802B002 - E_SYNCENGINE_UNSUPPORTED_FILE_NAME Handle = 0x8802B003 - E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED Handle = 0x8802B004 - E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR Handle = 0x8802B005 - E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE Handle = 0x8802B006 - E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN Handle = 0x8802C002 - E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED Handle = 0x8802C003 - E_SYNCENGINE_UNKNOWN_SERVICE_ERROR Handle = 0x8802C004 - E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE Handle = 0x8802C005 - E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE Handle = 0x8802C006 - E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR Handle = 0x8802C007 - E_SYNCENGINE_FOLDER_INACCESSIBLE Handle = 0x8802D001 - E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME Handle = 0x8802D002 - E_SYNCENGINE_UNSUPPORTED_MARKET Handle = 0x8802D003 - E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D004 - E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED Handle = 0x8802D005 - E_SYNCENGINE_CLIENT_UPDATE_NEEDED Handle = 0x8802D006 - E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED Handle = 0x8802D007 - E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED Handle = 0x8802D008 - E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT Handle = 0x8802D009 - E_SYNCENGINE_STORAGE_SERVICE_BLOCKED Handle = 0x8802D00A - E_SYNCENGINE_FOLDER_IN_REDIRECTION Handle = 0x8802D00B - EAS_E_POLICY_NOT_MANAGED_BY_OS Handle = 0x80550001 - EAS_E_POLICY_COMPLIANT_WITH_ACTIONS Handle = 0x80550002 - EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE Handle = 0x80550003 - EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD Handle = 0x80550004 - EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE Handle = 0x80550005 - EAS_E_USER_CANNOT_CHANGE_PASSWORD Handle = 0x80550006 - EAS_E_ADMINS_HAVE_BLANK_PASSWORD Handle = 0x80550007 - EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD Handle = 0x80550008 - EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD Handle = 0x80550009 - EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS Handle = 0x8055000A - EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000B - EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER Handle = 0x8055000C - EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD Handle = 0x8055000D - WEB_E_UNSUPPORTED_FORMAT Handle = 0x83750001 - WEB_E_INVALID_XML Handle = 0x83750002 - WEB_E_MISSING_REQUIRED_ELEMENT Handle = 0x83750003 - WEB_E_MISSING_REQUIRED_ATTRIBUTE Handle = 0x83750004 - WEB_E_UNEXPECTED_CONTENT Handle = 0x83750005 - WEB_E_RESOURCE_TOO_LARGE Handle = 0x83750006 - WEB_E_INVALID_JSON_STRING Handle = 0x83750007 - WEB_E_INVALID_JSON_NUMBER Handle = 0x83750008 - WEB_E_JSON_VALUE_NOT_FOUND Handle = 0x83750009 - HTTP_E_STATUS_UNEXPECTED Handle = 0x80190001 - HTTP_E_STATUS_UNEXPECTED_REDIRECTION Handle = 0x80190003 - HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR Handle = 0x80190004 - HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR Handle = 0x80190005 - HTTP_E_STATUS_AMBIGUOUS Handle = 0x8019012C - HTTP_E_STATUS_MOVED Handle = 0x8019012D - HTTP_E_STATUS_REDIRECT Handle = 0x8019012E - HTTP_E_STATUS_REDIRECT_METHOD Handle = 0x8019012F - HTTP_E_STATUS_NOT_MODIFIED Handle = 0x80190130 - HTTP_E_STATUS_USE_PROXY Handle = 0x80190131 - HTTP_E_STATUS_REDIRECT_KEEP_VERB Handle = 0x80190133 - HTTP_E_STATUS_BAD_REQUEST Handle = 0x80190190 - HTTP_E_STATUS_DENIED Handle = 0x80190191 - HTTP_E_STATUS_PAYMENT_REQ Handle = 0x80190192 - HTTP_E_STATUS_FORBIDDEN Handle = 0x80190193 - HTTP_E_STATUS_NOT_FOUND Handle = 0x80190194 - HTTP_E_STATUS_BAD_METHOD Handle = 0x80190195 - HTTP_E_STATUS_NONE_ACCEPTABLE Handle = 0x80190196 - HTTP_E_STATUS_PROXY_AUTH_REQ Handle = 0x80190197 - HTTP_E_STATUS_REQUEST_TIMEOUT Handle = 0x80190198 - HTTP_E_STATUS_CONFLICT Handle = 0x80190199 - HTTP_E_STATUS_GONE Handle = 0x8019019A - HTTP_E_STATUS_LENGTH_REQUIRED Handle = 0x8019019B - HTTP_E_STATUS_PRECOND_FAILED Handle = 0x8019019C - HTTP_E_STATUS_REQUEST_TOO_LARGE Handle = 0x8019019D - HTTP_E_STATUS_URI_TOO_LONG Handle = 0x8019019E - HTTP_E_STATUS_UNSUPPORTED_MEDIA Handle = 0x8019019F - HTTP_E_STATUS_RANGE_NOT_SATISFIABLE Handle = 0x801901A0 - HTTP_E_STATUS_EXPECTATION_FAILED Handle = 0x801901A1 - HTTP_E_STATUS_SERVER_ERROR Handle = 0x801901F4 - HTTP_E_STATUS_NOT_SUPPORTED Handle = 0x801901F5 - HTTP_E_STATUS_BAD_GATEWAY Handle = 0x801901F6 - HTTP_E_STATUS_SERVICE_UNAVAIL Handle = 0x801901F7 - HTTP_E_STATUS_GATEWAY_TIMEOUT Handle = 0x801901F8 - HTTP_E_STATUS_VERSION_NOT_SUP Handle = 0x801901F9 - E_INVALID_PROTOCOL_OPERATION Handle = 0x83760001 - E_INVALID_PROTOCOL_FORMAT Handle = 0x83760002 - E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED Handle = 0x83760003 - E_SUBPROTOCOL_NOT_SUPPORTED Handle = 0x83760004 - E_PROTOCOL_VERSION_NOT_SUPPORTED Handle = 0x83760005 - INPUT_E_OUT_OF_ORDER Handle = 0x80400000 - INPUT_E_REENTRANCY Handle = 0x80400001 - INPUT_E_MULTIMODAL Handle = 0x80400002 - INPUT_E_PACKET Handle = 0x80400003 - INPUT_E_FRAME Handle = 0x80400004 - INPUT_E_HISTORY Handle = 0x80400005 - INPUT_E_DEVICE_INFO Handle = 0x80400006 - INPUT_E_TRANSFORM Handle = 0x80400007 - INPUT_E_DEVICE_PROPERTY Handle = 0x80400008 - INET_E_INVALID_URL Handle = 0x800C0002 - INET_E_NO_SESSION Handle = 0x800C0003 - INET_E_CANNOT_CONNECT Handle = 0x800C0004 - INET_E_RESOURCE_NOT_FOUND Handle = 0x800C0005 - INET_E_OBJECT_NOT_FOUND Handle = 0x800C0006 - INET_E_DATA_NOT_AVAILABLE Handle = 0x800C0007 - INET_E_DOWNLOAD_FAILURE Handle = 0x800C0008 - INET_E_AUTHENTICATION_REQUIRED Handle = 0x800C0009 - INET_E_NO_VALID_MEDIA Handle = 0x800C000A - INET_E_CONNECTION_TIMEOUT Handle = 0x800C000B - INET_E_INVALID_REQUEST Handle = 0x800C000C - INET_E_UNKNOWN_PROTOCOL Handle = 0x800C000D - INET_E_SECURITY_PROBLEM Handle = 0x800C000E - INET_E_CANNOT_LOAD_DATA Handle = 0x800C000F - INET_E_CANNOT_INSTANTIATE_OBJECT Handle = 0x800C0010 - INET_E_INVALID_CERTIFICATE Handle = 0x800C0019 - INET_E_REDIRECT_FAILED Handle = 0x800C0014 - INET_E_REDIRECT_TO_DIR Handle = 0x800C0015 - ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00001 - ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN Handle = 0x80B00002 - ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00003 - ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN Handle = 0x80B00004 - ERROR_IO_PREEMPTED Handle = 0x89010001 - JSCRIPT_E_CANTEXECUTE Handle = 0x89020001 - WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES Handle = 0x88010001 - WEP_E_FIXED_DATA_NOT_SUPPORTED Handle = 0x88010002 - WEP_E_HARDWARE_NOT_COMPLIANT Handle = 0x88010003 - WEP_E_LOCK_NOT_CONFIGURED Handle = 0x88010004 - WEP_E_PROTECTION_SUSPENDED Handle = 0x88010005 - WEP_E_NO_LICENSE Handle = 0x88010006 - WEP_E_OS_NOT_PROTECTED Handle = 0x88010007 - WEP_E_UNEXPECTED_FAIL Handle = 0x88010008 - WEP_E_BUFFER_TOO_LARGE Handle = 0x88010009 - ERROR_SVHDX_ERROR_STORED Handle = 0xC05C0000 - ERROR_SVHDX_ERROR_NOT_AVAILABLE Handle = 0xC05CFF00 - ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE Handle = 0xC05CFF01 - ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED Handle = 0xC05CFF02 - ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED Handle = 0xC05CFF03 - ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED Handle = 0xC05CFF04 - ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED Handle = 0xC05CFF05 - ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED Handle = 0xC05CFF06 - ERROR_SVHDX_RESERVATION_CONFLICT Handle = 0xC05CFF07 - ERROR_SVHDX_WRONG_FILE_TYPE Handle = 0xC05CFF08 - ERROR_SVHDX_VERSION_MISMATCH Handle = 0xC05CFF09 - ERROR_VHD_SHARED Handle = 0xC05CFF0A - ERROR_SVHDX_NO_INITIATOR Handle = 0xC05CFF0B - ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND Handle = 0xC05CFF0C - ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP Handle = 0xC05D0000 - ERROR_SMB_BAD_CLUSTER_DIALECT Handle = 0xC05D0001 - WININET_E_OUT_OF_HANDLES Handle = 0x80072EE1 - WININET_E_TIMEOUT Handle = 0x80072EE2 - WININET_E_EXTENDED_ERROR Handle = 0x80072EE3 - WININET_E_INTERNAL_ERROR Handle = 0x80072EE4 - WININET_E_INVALID_URL Handle = 0x80072EE5 - WININET_E_UNRECOGNIZED_SCHEME Handle = 0x80072EE6 - WININET_E_NAME_NOT_RESOLVED Handle = 0x80072EE7 - WININET_E_PROTOCOL_NOT_FOUND Handle = 0x80072EE8 - WININET_E_INVALID_OPTION Handle = 0x80072EE9 - WININET_E_BAD_OPTION_LENGTH Handle = 0x80072EEA - WININET_E_OPTION_NOT_SETTABLE Handle = 0x80072EEB - WININET_E_SHUTDOWN Handle = 0x80072EEC - WININET_E_INCORRECT_USER_NAME Handle = 0x80072EED - WININET_E_INCORRECT_PASSWORD Handle = 0x80072EEE - WININET_E_LOGIN_FAILURE Handle = 0x80072EEF - WININET_E_INVALID_OPERATION Handle = 0x80072EF0 - WININET_E_OPERATION_CANCELLED Handle = 0x80072EF1 - WININET_E_INCORRECT_HANDLE_TYPE Handle = 0x80072EF2 - WININET_E_INCORRECT_HANDLE_STATE Handle = 0x80072EF3 - WININET_E_NOT_PROXY_REQUEST Handle = 0x80072EF4 - WININET_E_REGISTRY_VALUE_NOT_FOUND Handle = 0x80072EF5 - WININET_E_BAD_REGISTRY_PARAMETER Handle = 0x80072EF6 - WININET_E_NO_DIRECT_ACCESS Handle = 0x80072EF7 - WININET_E_NO_CONTEXT Handle = 0x80072EF8 - WININET_E_NO_CALLBACK Handle = 0x80072EF9 - WININET_E_REQUEST_PENDING Handle = 0x80072EFA - WININET_E_INCORRECT_FORMAT Handle = 0x80072EFB - WININET_E_ITEM_NOT_FOUND Handle = 0x80072EFC - WININET_E_CANNOT_CONNECT Handle = 0x80072EFD - WININET_E_CONNECTION_ABORTED Handle = 0x80072EFE - WININET_E_CONNECTION_RESET Handle = 0x80072EFF - WININET_E_FORCE_RETRY Handle = 0x80072F00 - WININET_E_INVALID_PROXY_REQUEST Handle = 0x80072F01 - WININET_E_NEED_UI Handle = 0x80072F02 - WININET_E_HANDLE_EXISTS Handle = 0x80072F04 - WININET_E_SEC_CERT_DATE_INVALID Handle = 0x80072F05 - WININET_E_SEC_CERT_CN_INVALID Handle = 0x80072F06 - WININET_E_HTTP_TO_HTTPS_ON_REDIR Handle = 0x80072F07 - WININET_E_HTTPS_TO_HTTP_ON_REDIR Handle = 0x80072F08 - WININET_E_MIXED_SECURITY Handle = 0x80072F09 - WININET_E_CHG_POST_IS_NON_SECURE Handle = 0x80072F0A - WININET_E_POST_IS_NON_SECURE Handle = 0x80072F0B - WININET_E_CLIENT_AUTH_CERT_NEEDED Handle = 0x80072F0C - WININET_E_INVALID_CA Handle = 0x80072F0D - WININET_E_CLIENT_AUTH_NOT_SETUP Handle = 0x80072F0E - WININET_E_ASYNC_THREAD_FAILED Handle = 0x80072F0F - WININET_E_REDIRECT_SCHEME_CHANGE Handle = 0x80072F10 - WININET_E_DIALOG_PENDING Handle = 0x80072F11 - WININET_E_RETRY_DIALOG Handle = 0x80072F12 - WININET_E_NO_NEW_CONTAINERS Handle = 0x80072F13 - WININET_E_HTTPS_HTTP_SUBMIT_REDIR Handle = 0x80072F14 - WININET_E_SEC_CERT_ERRORS Handle = 0x80072F17 - WININET_E_SEC_CERT_REV_FAILED Handle = 0x80072F19 - WININET_E_HEADER_NOT_FOUND Handle = 0x80072F76 - WININET_E_DOWNLEVEL_SERVER Handle = 0x80072F77 - WININET_E_INVALID_SERVER_RESPONSE Handle = 0x80072F78 - WININET_E_INVALID_HEADER Handle = 0x80072F79 - WININET_E_INVALID_QUERY_REQUEST Handle = 0x80072F7A - WININET_E_HEADER_ALREADY_EXISTS Handle = 0x80072F7B - WININET_E_REDIRECT_FAILED Handle = 0x80072F7C - WININET_E_SECURITY_CHANNEL_ERROR Handle = 0x80072F7D - WININET_E_UNABLE_TO_CACHE_FILE Handle = 0x80072F7E - WININET_E_TCPIP_NOT_INSTALLED Handle = 0x80072F7F - WININET_E_DISCONNECTED Handle = 0x80072F83 - WININET_E_SERVER_UNREACHABLE Handle = 0x80072F84 - WININET_E_PROXY_SERVER_UNREACHABLE Handle = 0x80072F85 - WININET_E_BAD_AUTO_PROXY_SCRIPT Handle = 0x80072F86 - WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT Handle = 0x80072F87 - WININET_E_SEC_INVALID_CERT Handle = 0x80072F89 - WININET_E_SEC_CERT_REVOKED Handle = 0x80072F8A - WININET_E_FAILED_DUETOSECURITYCHECK Handle = 0x80072F8B - WININET_E_NOT_INITIALIZED Handle = 0x80072F8C - WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY Handle = 0x80072F8E - WININET_E_DECODING_FAILED Handle = 0x80072F8F - WININET_E_NOT_REDIRECTED Handle = 0x80072F80 - WININET_E_COOKIE_NEEDS_CONFIRMATION Handle = 0x80072F81 - WININET_E_COOKIE_DECLINED Handle = 0x80072F82 - WININET_E_REDIRECT_NEEDS_CONFIRMATION Handle = 0x80072F88 - SQLITE_E_ERROR Handle = 0x87AF0001 - SQLITE_E_INTERNAL Handle = 0x87AF0002 - SQLITE_E_PERM Handle = 0x87AF0003 - SQLITE_E_ABORT Handle = 0x87AF0004 - SQLITE_E_BUSY Handle = 0x87AF0005 - SQLITE_E_LOCKED Handle = 0x87AF0006 - SQLITE_E_NOMEM Handle = 0x87AF0007 - SQLITE_E_READONLY Handle = 0x87AF0008 - SQLITE_E_INTERRUPT Handle = 0x87AF0009 - SQLITE_E_IOERR Handle = 0x87AF000A - SQLITE_E_CORRUPT Handle = 0x87AF000B - SQLITE_E_NOTFOUND Handle = 0x87AF000C - SQLITE_E_FULL Handle = 0x87AF000D - SQLITE_E_CANTOPEN Handle = 0x87AF000E - SQLITE_E_PROTOCOL Handle = 0x87AF000F - SQLITE_E_EMPTY Handle = 0x87AF0010 - SQLITE_E_SCHEMA Handle = 0x87AF0011 - SQLITE_E_TOOBIG Handle = 0x87AF0012 - SQLITE_E_CONSTRAINT Handle = 0x87AF0013 - SQLITE_E_MISMATCH Handle = 0x87AF0014 - SQLITE_E_MISUSE Handle = 0x87AF0015 - SQLITE_E_NOLFS Handle = 0x87AF0016 - SQLITE_E_AUTH Handle = 0x87AF0017 - SQLITE_E_FORMAT Handle = 0x87AF0018 - SQLITE_E_RANGE Handle = 0x87AF0019 - SQLITE_E_NOTADB Handle = 0x87AF001A - SQLITE_E_NOTICE Handle = 0x87AF001B - SQLITE_E_WARNING Handle = 0x87AF001C - SQLITE_E_ROW Handle = 0x87AF0064 - SQLITE_E_DONE Handle = 0x87AF0065 - SQLITE_E_IOERR_READ Handle = 0x87AF010A - SQLITE_E_IOERR_SHORT_READ Handle = 0x87AF020A - SQLITE_E_IOERR_WRITE Handle = 0x87AF030A - SQLITE_E_IOERR_FSYNC Handle = 0x87AF040A - SQLITE_E_IOERR_DIR_FSYNC Handle = 0x87AF050A - SQLITE_E_IOERR_TRUNCATE Handle = 0x87AF060A - SQLITE_E_IOERR_FSTAT Handle = 0x87AF070A - SQLITE_E_IOERR_UNLOCK Handle = 0x87AF080A - SQLITE_E_IOERR_RDLOCK Handle = 0x87AF090A - SQLITE_E_IOERR_DELETE Handle = 0x87AF0A0A - SQLITE_E_IOERR_BLOCKED Handle = 0x87AF0B0A - SQLITE_E_IOERR_NOMEM Handle = 0x87AF0C0A - SQLITE_E_IOERR_ACCESS Handle = 0x87AF0D0A - SQLITE_E_IOERR_CHECKRESERVEDLOCK Handle = 0x87AF0E0A - SQLITE_E_IOERR_LOCK Handle = 0x87AF0F0A - SQLITE_E_IOERR_CLOSE Handle = 0x87AF100A - SQLITE_E_IOERR_DIR_CLOSE Handle = 0x87AF110A - SQLITE_E_IOERR_SHMOPEN Handle = 0x87AF120A - SQLITE_E_IOERR_SHMSIZE Handle = 0x87AF130A - SQLITE_E_IOERR_SHMLOCK Handle = 0x87AF140A - SQLITE_E_IOERR_SHMMAP Handle = 0x87AF150A - SQLITE_E_IOERR_SEEK Handle = 0x87AF160A - SQLITE_E_IOERR_DELETE_NOENT Handle = 0x87AF170A - SQLITE_E_IOERR_MMAP Handle = 0x87AF180A - SQLITE_E_IOERR_GETTEMPPATH Handle = 0x87AF190A - SQLITE_E_IOERR_CONVPATH Handle = 0x87AF1A0A - SQLITE_E_IOERR_VNODE Handle = 0x87AF1A02 - SQLITE_E_IOERR_AUTH Handle = 0x87AF1A03 - SQLITE_E_LOCKED_SHAREDCACHE Handle = 0x87AF0106 - SQLITE_E_BUSY_RECOVERY Handle = 0x87AF0105 - SQLITE_E_BUSY_SNAPSHOT Handle = 0x87AF0205 - SQLITE_E_CANTOPEN_NOTEMPDIR Handle = 0x87AF010E - SQLITE_E_CANTOPEN_ISDIR Handle = 0x87AF020E - SQLITE_E_CANTOPEN_FULLPATH Handle = 0x87AF030E - SQLITE_E_CANTOPEN_CONVPATH Handle = 0x87AF040E - SQLITE_E_CORRUPT_VTAB Handle = 0x87AF010B - SQLITE_E_READONLY_RECOVERY Handle = 0x87AF0108 - SQLITE_E_READONLY_CANTLOCK Handle = 0x87AF0208 - SQLITE_E_READONLY_ROLLBACK Handle = 0x87AF0308 - SQLITE_E_READONLY_DBMOVED Handle = 0x87AF0408 - SQLITE_E_ABORT_ROLLBACK Handle = 0x87AF0204 - SQLITE_E_CONSTRAINT_CHECK Handle = 0x87AF0113 - SQLITE_E_CONSTRAINT_COMMITHOOK Handle = 0x87AF0213 - SQLITE_E_CONSTRAINT_FOREIGNKEY Handle = 0x87AF0313 - SQLITE_E_CONSTRAINT_FUNCTION Handle = 0x87AF0413 - SQLITE_E_CONSTRAINT_NOTNULL Handle = 0x87AF0513 - SQLITE_E_CONSTRAINT_PRIMARYKEY Handle = 0x87AF0613 - SQLITE_E_CONSTRAINT_TRIGGER Handle = 0x87AF0713 - SQLITE_E_CONSTRAINT_UNIQUE Handle = 0x87AF0813 - SQLITE_E_CONSTRAINT_VTAB Handle = 0x87AF0913 - SQLITE_E_CONSTRAINT_ROWID Handle = 0x87AF0A13 - SQLITE_E_NOTICE_RECOVER_WAL Handle = 0x87AF011B - SQLITE_E_NOTICE_RECOVER_ROLLBACK Handle = 0x87AF021B - SQLITE_E_WARNING_AUTOINDEX Handle = 0x87AF011C - UTC_E_TOGGLE_TRACE_STARTED Handle = 0x87C51001 - UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT Handle = 0x87C51002 - UTC_E_AOT_NOT_RUNNING Handle = 0x87C51003 - UTC_E_SCRIPT_TYPE_INVALID Handle = 0x87C51004 - UTC_E_SCENARIODEF_NOT_FOUND Handle = 0x87C51005 - UTC_E_TRACEPROFILE_NOT_FOUND Handle = 0x87C51006 - UTC_E_FORWARDER_ALREADY_ENABLED Handle = 0x87C51007 - UTC_E_FORWARDER_ALREADY_DISABLED Handle = 0x87C51008 - UTC_E_EVENTLOG_ENTRY_MALFORMED Handle = 0x87C51009 - UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH Handle = 0x87C5100A - UTC_E_SCRIPT_TERMINATED Handle = 0x87C5100B - UTC_E_INVALID_CUSTOM_FILTER Handle = 0x87C5100C - UTC_E_TRACE_NOT_RUNNING Handle = 0x87C5100D - UTC_E_REESCALATED_TOO_QUICKLY Handle = 0x87C5100E - UTC_E_ESCALATION_ALREADY_RUNNING Handle = 0x87C5100F - UTC_E_PERFTRACK_ALREADY_TRACING Handle = 0x87C51010 - UTC_E_REACHED_MAX_ESCALATIONS Handle = 0x87C51011 - UTC_E_FORWARDER_PRODUCER_MISMATCH Handle = 0x87C51012 - UTC_E_INTENTIONAL_SCRIPT_FAILURE Handle = 0x87C51013 - UTC_E_SQM_INIT_FAILED Handle = 0x87C51014 - UTC_E_NO_WER_LOGGER_SUPPORTED Handle = 0x87C51015 - UTC_E_TRACERS_DONT_EXIST Handle = 0x87C51016 - UTC_E_WINRT_INIT_FAILED Handle = 0x87C51017 - UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH Handle = 0x87C51018 - UTC_E_INVALID_FILTER Handle = 0x87C51019 - UTC_E_EXE_TERMINATED Handle = 0x87C5101A - UTC_E_ESCALATION_NOT_AUTHORIZED Handle = 0x87C5101B - UTC_E_SETUP_NOT_AUTHORIZED Handle = 0x87C5101C - UTC_E_CHILD_PROCESS_FAILED Handle = 0x87C5101D - UTC_E_COMMAND_LINE_NOT_AUTHORIZED Handle = 0x87C5101E - UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML Handle = 0x87C5101F - UTC_E_ESCALATION_TIMED_OUT Handle = 0x87C51020 - UTC_E_SETUP_TIMED_OUT Handle = 0x87C51021 - UTC_E_TRIGGER_MISMATCH Handle = 0x87C51022 - UTC_E_TRIGGER_NOT_FOUND Handle = 0x87C51023 - UTC_E_SIF_NOT_SUPPORTED Handle = 0x87C51024 - UTC_E_DELAY_TERMINATED Handle = 0x87C51025 - UTC_E_DEVICE_TICKET_ERROR Handle = 0x87C51026 - UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED Handle = 0x87C51027 - UTC_E_API_RESULT_UNAVAILABLE Handle = 0x87C51028 - UTC_E_RPC_TIMEOUT Handle = 0x87C51029 - UTC_E_RPC_WAIT_FAILED Handle = 0x87C5102A - UTC_E_API_BUSY Handle = 0x87C5102B - UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET Handle = 0x87C5102C - UTC_E_EXCLUSIVITY_NOT_AVAILABLE Handle = 0x87C5102D - UTC_E_GETFILE_FILE_PATH_NOT_APPROVED Handle = 0x87C5102E - UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS Handle = 0x87C5102F - UTC_E_TIME_TRIGGER_ON_START_INVALID Handle = 0x87C51030 - UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION Handle = 0x87C51031 - UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE Handle = 0x87C51032 - UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE Handle = 0x87C51033 - UTC_E_BINARY_MISSING Handle = 0x87C51034 - UTC_E_NETWORK_CAPTURE_NOT_ALLOWED Handle = 0x87C51035 - UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID Handle = 0x87C51036 - UTC_E_UNABLE_TO_RESOLVE_SESSION Handle = 0x87C51037 - UTC_E_THROTTLED Handle = 0x87C51038 - UTC_E_UNAPPROVED_SCRIPT Handle = 0x87C51039 - UTC_E_SCRIPT_MISSING Handle = 0x87C5103A - UTC_E_SCENARIO_THROTTLED Handle = 0x87C5103B - UTC_E_API_NOT_SUPPORTED Handle = 0x87C5103C - UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED Handle = 0x87C5103D - UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED Handle = 0x87C5103E - UTC_E_CERT_REV_FAILED Handle = 0x87C5103F - UTC_E_FAILED_TO_START_NDISCAP Handle = 0x87C51040 - UTC_E_KERNELDUMP_LIMIT_REACHED Handle = 0x87C51041 - UTC_E_MISSING_AGGREGATE_EVENT_TAG Handle = 0x87C51042 - UTC_E_INVALID_AGGREGATION_STRUCT Handle = 0x87C51043 - UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION Handle = 0x87C51044 - UTC_E_FILTER_MISSING_ATTRIBUTE Handle = 0x87C51045 - UTC_E_FILTER_INVALID_TYPE Handle = 0x87C51046 - UTC_E_FILTER_VARIABLE_NOT_FOUND Handle = 0x87C51047 - UTC_E_FILTER_FUNCTION_RESTRICTED Handle = 0x87C51048 - UTC_E_FILTER_VERSION_MISMATCH Handle = 0x87C51049 - UTC_E_FILTER_INVALID_FUNCTION Handle = 0x87C51050 - UTC_E_FILTER_INVALID_FUNCTION_PARAMS Handle = 0x87C51051 - UTC_E_FILTER_INVALID_COMMAND Handle = 0x87C51052 - UTC_E_FILTER_ILLEGAL_EVAL Handle = 0x87C51053 - UTC_E_TTTRACER_RETURNED_ERROR Handle = 0x87C51054 - UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE Handle = 0x87C51055 - UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS Handle = 0x87C51056 - UTC_E_SCENARIO_HAS_NO_ACTIONS Handle = 0x87C51057 - UTC_E_TTTRACER_STORAGE_FULL Handle = 0x87C51058 - UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE Handle = 0x87C51059 - UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN Handle = 0x87C5105A - UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED Handle = 0x87C5105B - WINML_ERR_INVALID_DEVICE Handle = 0x88900001 - WINML_ERR_INVALID_BINDING Handle = 0x88900002 - WINML_ERR_VALUE_NOTFOUND Handle = 0x88900003 - WINML_ERR_SIZE_MISMATCH Handle = 0x88900004 -) diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go deleted file mode 100644 index 6048ac6..0000000 --- a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go +++ /dev/null @@ -1,149 +0,0 @@ -// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT. - -package windows - -type KNOWNFOLDERID GUID - -var ( - FOLDERID_NetworkFolder = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}} - FOLDERID_ComputerFolder = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}} - FOLDERID_InternetFolder = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}} - FOLDERID_ControlPanelFolder = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}} - FOLDERID_PrintersFolder = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}} - FOLDERID_SyncManagerFolder = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}} - FOLDERID_SyncSetupFolder = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}} - FOLDERID_ConflictFolder = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}} - FOLDERID_SyncResultsFolder = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}} - FOLDERID_RecycleBinFolder = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}} - FOLDERID_ConnectionsFolder = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}} - FOLDERID_Fonts = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}} - FOLDERID_Desktop = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}} - FOLDERID_Startup = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}} - FOLDERID_Programs = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}} - FOLDERID_StartMenu = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}} - FOLDERID_Recent = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}} - FOLDERID_SendTo = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}} - FOLDERID_Documents = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}} - FOLDERID_Favorites = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}} - FOLDERID_NetHood = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}} - FOLDERID_PrintHood = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}} - FOLDERID_Templates = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}} - FOLDERID_CommonStartup = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}} - FOLDERID_CommonPrograms = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}} - FOLDERID_CommonStartMenu = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}} - FOLDERID_PublicDesktop = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}} - FOLDERID_ProgramData = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}} - FOLDERID_CommonTemplates = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}} - FOLDERID_PublicDocuments = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}} - FOLDERID_RoamingAppData = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}} - FOLDERID_LocalAppData = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}} - FOLDERID_LocalAppDataLow = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}} - FOLDERID_InternetCache = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}} - FOLDERID_Cookies = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}} - FOLDERID_History = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}} - FOLDERID_System = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}} - FOLDERID_SystemX86 = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}} - FOLDERID_Windows = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}} - FOLDERID_Profile = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}} - FOLDERID_Pictures = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}} - FOLDERID_ProgramFilesX86 = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}} - FOLDERID_ProgramFilesCommonX86 = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}} - FOLDERID_ProgramFilesX64 = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}} - FOLDERID_ProgramFilesCommonX64 = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}} - FOLDERID_ProgramFiles = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}} - FOLDERID_ProgramFilesCommon = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}} - FOLDERID_UserProgramFiles = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}} - FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}} - FOLDERID_AdminTools = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}} - FOLDERID_CommonAdminTools = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}} - FOLDERID_Music = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}} - FOLDERID_Videos = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}} - FOLDERID_Ringtones = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}} - FOLDERID_PublicPictures = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}} - FOLDERID_PublicMusic = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}} - FOLDERID_PublicVideos = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}} - FOLDERID_PublicRingtones = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}} - FOLDERID_ResourceDir = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}} - FOLDERID_LocalizedResourcesDir = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}} - FOLDERID_CommonOEMLinks = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}} - FOLDERID_CDBurning = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}} - FOLDERID_UserProfiles = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}} - FOLDERID_Playlists = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}} - FOLDERID_SamplePlaylists = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}} - FOLDERID_SampleMusic = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}} - FOLDERID_SamplePictures = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}} - FOLDERID_SampleVideos = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}} - FOLDERID_PhotoAlbums = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}} - FOLDERID_Public = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}} - FOLDERID_ChangeRemovePrograms = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}} - FOLDERID_AppUpdates = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}} - FOLDERID_AddNewPrograms = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}} - FOLDERID_Downloads = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}} - FOLDERID_PublicDownloads = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}} - FOLDERID_SavedSearches = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}} - FOLDERID_QuickLaunch = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}} - FOLDERID_Contacts = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}} - FOLDERID_SidebarParts = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}} - FOLDERID_SidebarDefaultParts = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}} - FOLDERID_PublicGameTasks = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}} - FOLDERID_GameTasks = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}} - FOLDERID_SavedGames = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}} - FOLDERID_Games = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}} - FOLDERID_SEARCH_MAPI = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}} - FOLDERID_SEARCH_CSC = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}} - FOLDERID_Links = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}} - FOLDERID_UsersFiles = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}} - FOLDERID_UsersLibraries = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}} - FOLDERID_SearchHome = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}} - FOLDERID_OriginalImages = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}} - FOLDERID_DocumentsLibrary = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}} - FOLDERID_MusicLibrary = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}} - FOLDERID_PicturesLibrary = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}} - FOLDERID_VideosLibrary = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}} - FOLDERID_RecordedTVLibrary = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}} - FOLDERID_HomeGroup = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}} - FOLDERID_HomeGroupCurrentUser = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}} - FOLDERID_DeviceMetadataStore = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}} - FOLDERID_Libraries = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}} - FOLDERID_PublicLibraries = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}} - FOLDERID_UserPinned = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}} - FOLDERID_ImplicitAppShortcuts = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}} - FOLDERID_AccountPictures = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}} - FOLDERID_PublicUserTiles = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}} - FOLDERID_AppsFolder = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}} - FOLDERID_StartMenuAllPrograms = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}} - FOLDERID_CommonStartMenuPlaces = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}} - FOLDERID_ApplicationShortcuts = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}} - FOLDERID_RoamingTiles = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}} - FOLDERID_RoamedTileImages = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}} - FOLDERID_Screenshots = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}} - FOLDERID_CameraRoll = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}} - FOLDERID_SkyDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} - FOLDERID_OneDrive = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}} - FOLDERID_SkyDriveDocuments = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}} - FOLDERID_SkyDrivePictures = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}} - FOLDERID_SkyDriveMusic = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}} - FOLDERID_SkyDriveCameraRoll = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}} - FOLDERID_SearchHistory = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}} - FOLDERID_SearchTemplates = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}} - FOLDERID_CameraRollLibrary = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}} - FOLDERID_SavedPictures = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}} - FOLDERID_SavedPicturesLibrary = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}} - FOLDERID_RetailDemo = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}} - FOLDERID_Device = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}} - FOLDERID_DevelopmentFiles = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}} - FOLDERID_Objects3D = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}} - FOLDERID_AppCaptures = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}} - FOLDERID_LocalDocuments = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}} - FOLDERID_LocalPictures = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}} - FOLDERID_LocalVideos = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}} - FOLDERID_LocalMusic = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}} - FOLDERID_LocalDownloads = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}} - FOLDERID_RecordedCalls = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}} - FOLDERID_AllAppMods = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}} - FOLDERID_CurrentAppMods = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}} - FOLDERID_AppDataDesktop = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}} - FOLDERID_AppDataDocuments = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}} - FOLDERID_AppDataFavorites = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}} - FOLDERID_AppDataProgramData = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}} -) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go deleted file mode 100644 index 2aa4fa6..0000000 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ /dev/null @@ -1,4051 +0,0 @@ -// Code generated by 'go generate'; DO NOT EDIT. - -package windows - -import ( - "syscall" - "unsafe" -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e syscall.Errno) error { - switch e { - case 0: - return nil - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( - modadvapi32 = NewLazySystemDLL("advapi32.dll") - modkernel32 = NewLazySystemDLL("kernel32.dll") - modshell32 = NewLazySystemDLL("shell32.dll") - moduserenv = NewLazySystemDLL("userenv.dll") - modmswsock = NewLazySystemDLL("mswsock.dll") - modcrypt32 = NewLazySystemDLL("crypt32.dll") - moduser32 = NewLazySystemDLL("user32.dll") - modole32 = NewLazySystemDLL("ole32.dll") - modntdll = NewLazySystemDLL("ntdll.dll") - modpsapi = NewLazySystemDLL("psapi.dll") - modws2_32 = NewLazySystemDLL("ws2_32.dll") - moddnsapi = NewLazySystemDLL("dnsapi.dll") - modiphlpapi = NewLazySystemDLL("iphlpapi.dll") - modsecur32 = NewLazySystemDLL("secur32.dll") - modnetapi32 = NewLazySystemDLL("netapi32.dll") - modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") - - procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") - procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") - procReportEventW = modadvapi32.NewProc("ReportEventW") - procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") - procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") - procCreateServiceW = modadvapi32.NewProc("CreateServiceW") - procOpenServiceW = modadvapi32.NewProc("OpenServiceW") - procDeleteService = modadvapi32.NewProc("DeleteService") - procStartServiceW = modadvapi32.NewProc("StartServiceW") - procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") - procControlService = modadvapi32.NewProc("ControlService") - procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") - procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") - procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") - procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") - procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") - procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") - procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") - procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") - procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") - procGetLastError = modkernel32.NewProc("GetLastError") - procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") - procFreeLibrary = modkernel32.NewProc("FreeLibrary") - procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") - procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") - procGetVersion = modkernel32.NewProc("GetVersion") - procFormatMessageW = modkernel32.NewProc("FormatMessageW") - procExitProcess = modkernel32.NewProc("ExitProcess") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procReadFile = modkernel32.NewProc("ReadFile") - procWriteFile = modkernel32.NewProc("WriteFile") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procSetFilePointer = modkernel32.NewProc("SetFilePointer") - procCloseHandle = modkernel32.NewProc("CloseHandle") - procGetStdHandle = modkernel32.NewProc("GetStdHandle") - procSetStdHandle = modkernel32.NewProc("SetStdHandle") - procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") - procFindNextFileW = modkernel32.NewProc("FindNextFileW") - procFindClose = modkernel32.NewProc("FindClose") - procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") - procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") - procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") - procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procDeleteFileW = modkernel32.NewProc("DeleteFileW") - procMoveFileW = modkernel32.NewProc("MoveFileW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") - procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") - procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") - procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") - procCancelIo = modkernel32.NewProc("CancelIo") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateProcessW = modkernel32.NewProc("CreateProcessW") - procOpenProcess = modkernel32.NewProc("OpenProcess") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") - procTerminateProcess = modkernel32.NewProc("TerminateProcess") - procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") - procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") - procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") - procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") - procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") - procGetTempPathW = modkernel32.NewProc("GetTempPathW") - procCreatePipe = modkernel32.NewProc("CreatePipe") - procGetFileType = modkernel32.NewProc("GetFileType") - procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") - procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") - procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") - procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") - procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") - procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") - procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") - procSetFileTime = modkernel32.NewProc("SetFileTime") - procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") - procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") - procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") - procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") - procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") - procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") - procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") - procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") - procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") - procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") - procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") - procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") - procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") - procVirtualLock = modkernel32.NewProc("VirtualLock") - procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") - procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") - procVirtualFree = modkernel32.NewProc("VirtualFree") - procVirtualProtect = modkernel32.NewProc("VirtualProtect") - procTransmitFile = modmswsock.NewProc("TransmitFile") - procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") - procCertOpenStore = modcrypt32.NewProc("CertOpenStore") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") - procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") - procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") - procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") - procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") - procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") - procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") - procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") - procRegCloseKey = modadvapi32.NewProc("RegCloseKey") - procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") - procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") - procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") - procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") - procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") - procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") - procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") - procReadConsoleW = modkernel32.NewProc("ReadConsoleW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") - procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") - procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") - procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") - procCreateEventW = modkernel32.NewProc("CreateEventW") - procCreateEventExW = modkernel32.NewProc("CreateEventExW") - procOpenEventW = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") - procCreateMutexW = modkernel32.NewProc("CreateMutexW") - procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") - procOpenMutexW = modkernel32.NewProc("OpenMutexW") - procReleaseMutex = modkernel32.NewProc("ReleaseMutex") - procSleepEx = modkernel32.NewProc("SleepEx") - procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") - procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") - procGetProcessId = modkernel32.NewProc("GetProcessId") - procOpenThread = modkernel32.NewProc("OpenThread") - procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") - procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") - procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") - procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") - procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") - procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") - procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") - procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") - procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") - procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") - procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") - procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") - procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") - procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") - procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") - procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") - procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") - procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") - procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") - procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") - procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") - procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") - procCoCreateGuid = modole32.NewProc("CoCreateGuid") - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procRtlGetVersion = modntdll.NewProc("RtlGetVersion") - procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") - procGetProcessPreferredUILanguages = modkernel32.NewProc("GetProcessPreferredUILanguages") - procGetThreadPreferredUILanguages = modkernel32.NewProc("GetThreadPreferredUILanguages") - procGetUserPreferredUILanguages = modkernel32.NewProc("GetUserPreferredUILanguages") - procGetSystemPreferredUILanguages = modkernel32.NewProc("GetSystemPreferredUILanguages") - procEnumProcesses = modpsapi.NewProc("EnumProcesses") - procWSAStartup = modws2_32.NewProc("WSAStartup") - procWSACleanup = modws2_32.NewProc("WSACleanup") - procWSAIoctl = modws2_32.NewProc("WSAIoctl") - procsocket = modws2_32.NewProc("socket") - procsendto = modws2_32.NewProc("sendto") - procrecvfrom = modws2_32.NewProc("recvfrom") - procsetsockopt = modws2_32.NewProc("setsockopt") - procgetsockopt = modws2_32.NewProc("getsockopt") - procbind = modws2_32.NewProc("bind") - procconnect = modws2_32.NewProc("connect") - procgetsockname = modws2_32.NewProc("getsockname") - procgetpeername = modws2_32.NewProc("getpeername") - proclisten = modws2_32.NewProc("listen") - procshutdown = modws2_32.NewProc("shutdown") - procclosesocket = modws2_32.NewProc("closesocket") - procAcceptEx = modmswsock.NewProc("AcceptEx") - procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") - procWSARecv = modws2_32.NewProc("WSARecv") - procWSASend = modws2_32.NewProc("WSASend") - procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") - procWSASendTo = modws2_32.NewProc("WSASendTo") - procgethostbyname = modws2_32.NewProc("gethostbyname") - procgetservbyname = modws2_32.NewProc("getservbyname") - procntohs = modws2_32.NewProc("ntohs") - procgetprotobyname = modws2_32.NewProc("getprotobyname") - procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") - procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") - procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") - procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") - procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") - procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetACP = modkernel32.NewProc("GetACP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procTranslateNameW = modsecur32.NewProc("TranslateNameW") - procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") - procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") - procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") - procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetLengthSid = modadvapi32.NewProc("GetLengthSid") - procCopySid = modadvapi32.NewProc("CopySid") - procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") - procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") - procFreeSid = modadvapi32.NewProc("FreeSid") - procEqualSid = modadvapi32.NewProc("EqualSid") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procIsValidSid = modadvapi32.NewProc("IsValidSid") - procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") - procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") - procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") - procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") - procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") - procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") - procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") - procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") - procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") - procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") - procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") - procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") - procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") - procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") - procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") - procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") - procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") - procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") - procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") - procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") - procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") - procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") - procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") - procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") - procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") - procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") -) - -func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeregisterEventSource(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CloseServiceHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeleteService(service Handle) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { - r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { - r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { - r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) { - r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier))) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func GetLastError() (lasterr error) { - r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) - if r0 != 0 { - lasterr = syscall.Errno(r0) - } - return -} - -func LoadLibrary(libname string) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibrary(_p0) -} - -func _LoadLibrary(libname *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(libname) - if err != nil { - return - } - return _LoadLibraryEx(_p0, zero, flags) -} - -func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FreeLibrary(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(procname) - if err != nil { - return - } - return _GetProcAddress(module, _p0) -} - -func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { - r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) - proc = uintptr(r0) - if proc == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVersion() (ver uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) - ver = uint32(r0) - if ver == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { - var _p0 *uint16 - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ExitProcess(exitcode uint32) { - syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) - return -} - -func IsWow64Process(handle Handle, isWow64 *bool) (err error) { - var _p0 uint32 - if *isWow64 { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) - *isWow64 = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) { - var _p0 uint32 - if wait { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { - r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) - newlowoffset = uint32(r0) - if newlowoffset == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CloseHandle(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetStdHandle(stdhandle uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetStdHandle(stdhandle uint32, handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func findNextFile1(handle Handle, data *win32finddata1) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindClose(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetCurrentDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { - r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func RemoveDirectory(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeleteFile(path *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func MoveFile(from *uint16, to *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetComputerName(buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetEndOfFile(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetSystemTimeAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) - return -} - -func GetSystemTimePreciseAsFileTime(time *Filetime) { - syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) - return -} - -func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) - rc = uint32(r0) - if rc == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CancelIo(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CancelIoEx(s Handle, o *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { - var _p0 uint32 - if inheritHandles { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { - r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 <= 32 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) { - r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func TerminateProcess(handle Handle, exitcode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetStartupInfo(startupInfo *StartupInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { - var _p0 uint32 - if bInheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { - r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { - var _p0 uint32 - if waitAll { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0) - event = uint32(r0) - if event == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileType(filehandle Handle) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { - r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetEnvironmentStrings() (envs *uint16, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) - envs = (*uint16)(unsafe.Pointer(r0)) - if envs == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FreeEnvironmentStrings(envs *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) { - var _p0 uint32 - if inheritExisting { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DestroyEnvironmentBlock(block *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getTickCount64() (ms uint64) { - r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0) - ms = uint64(r0) - return -} - -func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileAttributes(name *uint16) (attrs uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - attrs = uint32(r0) - if attrs == INVALID_FILE_ATTRIBUTES { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetFileAttributes(name *uint16, attrs uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { - r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetCommandLine() (cmd *uint16) { - r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) - cmd = (*uint16)(unsafe.Pointer(r0)) - return -} - -func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { - r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) - argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) - if argv == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LocalFree(hmem Handle) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) - handle = Handle(r0) - if handle != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FlushFileBuffers(handle Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) - addr = uintptr(r0) - if addr == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func UnmapViewOfFile(addr uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FlushViewOfFile(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualLock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualUnlock(addr uintptr, length uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { - r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) - value = uintptr(r0) - if value == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { - r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - var _p0 uint32 - if watchSubTree { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { - r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) - store = Handle(r0) - if store == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { - r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertCloseStore(store Handle, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { - r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertFreeCertificateChain(ctx *CertChainContext) { - syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) - return -} - -func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { - r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) - context = (*CertContext)(unsafe.Pointer(r0)) - if context == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertFreeCertificateContext(ctx *CertContext) (err error) { - r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { - r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegCloseKey(key Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { - r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) - if r0 != 0 { - regerrno = syscall.Errno(r0) - } - return -} - -func GetCurrentProcessId() (pid uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) - pid = uint32(r0) - return -} - -func GetConsoleMode(console Handle, mode *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetConsoleMode(console Handle, mode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { - r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { - r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) { - r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { - r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetCurrentThreadId() (id uint32) { - r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) - id = uint32(r0) - return -} - -func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ResetEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func PulseEvent(event Handle) (err error) { - r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if initialOwner { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ReleaseMutex(mutex Handle) (err error) { - r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { - var _p0 uint32 - if alertable { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0) - ret = uint32(r0) - return -} - -func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AssignProcessToJobObject(job Handle, process Handle) (err error) { - r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func TerminateJobObject(job Handle, exitCode uint32) (err error) { - r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetErrorMode(mode uint32) (ret uint32) { - r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0) - ret = uint32(r0) - return -} - -func ResumeThread(thread Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0) - ret = uint32(r0) - if ret == 0xffffffff { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetPriorityClass(process Handle, priorityClass uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetPriorityClass(process Handle) (ret uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0) - ret = uint32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) { - r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0) - ret = int(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessId(process Handle) (id uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0) - id = uint32(r0) - if id == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) { - var _p0 uint32 - if inheritHandle { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId)) - handle = Handle(r0) - if handle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetProcessPriorityBoost(process Handle, disable bool) (err error) { - var _p0 uint32 - if disable { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindVolumeClose(findVolume Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { - r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetDriveType(rootPathName *uint16) (driveType uint32) { - r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) - driveType = uint32(r0) - return -} - -func GetLogicalDrives() (drivesBitMask uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) - drivesBitMask = uint32(r0) - if drivesBitMask == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { - r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) { - r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0) - ret = int32(r0) - if ret == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ExitWindowsEx(flags uint32, reason uint32) (err error) { - r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { - var _p0 uint32 - if forceAppsClosed { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if rebootAfterShutdown { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { - r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) { - r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax)) - chars = int32(r0) - return -} - -func coCreateGuid(pguid *GUID) (ret error) { - r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func CoTaskMemFree(address unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0) - return -} - -func rtlGetVersion(info *OsVersionInfoEx) (ret error) { - r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { - syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) - return -} - -func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { - var _p0 *uint32 - if len(processIds) > 0 { - _p0 = &processIds[0] - } - r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(processIds)), uintptr(unsafe.Pointer(bytesReturned))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { - r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) - } - return -} - -func WSACleanup() (err error) { - r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { - r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { - r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) - handle = Handle(r0) - if handle == InvalidHandle { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) { - var _p0 *byte - if len(buf) > 0 { - _p0 = &buf[0] - } - r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { - r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { - r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { - r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { - r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func listen(s Handle, backlog int32) (err error) { - r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func shutdown(s Handle, how int32) (err error) { - r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Closesocket(s Handle) (err error) { - r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { - r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { - syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) - return -} - -func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { - r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) - if r1 == socket_error { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetHostByName(name string) (h *Hostent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - return _GetHostByName(_p0) -} - -func _GetHostByName(name *byte) (h *Hostent, err error) { - r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - h = (*Hostent)(unsafe.Pointer(r0)) - if h == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetServByName(name string, proto string) (s *Servent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - var _p1 *byte - _p1, err = syscall.BytePtrFromString(proto) - if err != nil { - return - } - return _GetServByName(_p0, _p1) -} - -func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { - r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) - s = (*Servent)(unsafe.Pointer(r0)) - if s == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func Ntohs(netshort uint16) (u uint16) { - r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) - u = uint16(r0) - return -} - -func GetProtoByName(name string) (p *Protoent, err error) { - var _p0 *byte - _p0, err = syscall.BytePtrFromString(name) - if err != nil { - return - } - return _GetProtoByName(_p0) -} - -func _GetProtoByName(name *byte) (p *Protoent, err error) { - r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) - p = (*Protoent)(unsafe.Pointer(r0)) - if p == nil { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - var _p0 *uint16 - _p0, status = syscall.UTF16PtrFromString(name) - if status != nil { - return - } - return _DnsQuery(_p0, qtype, options, extra, qrs, pr) -} - -func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { - r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) - if r0 != 0 { - status = syscall.Errno(r0) - } - return -} - -func DnsRecordListFree(rl *DNSRecord, freetype uint32) { - syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) - return -} - -func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { - r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) - same = r0 != 0 - return -} - -func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { - r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) - if r0 != 0 { - sockerr = syscall.Errno(r0) - } - return -} - -func FreeAddrInfoW(addrinfo *AddrinfoW) { - syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) - return -} - -func GetIfEntry(pIfRow *MibIfRow) (errcode error) { - r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { - r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { - r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) - n = int32(r0) - if n == -1 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { - r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) - if r0 != 0 { - errcode = syscall.Errno(r0) - } - return -} - -func GetACP() (acp uint32) { - r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) - acp = uint32(r0) - return -} - -func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { - r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) - nwrite = int32(r0) - if nwrite == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) - if r1&0xff == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { - r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { - r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func NetApiBufferFree(buf *byte) (neterr error) { - r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) - if r0 != 0 { - neterr = syscall.Errno(r0) - } - return -} - -func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetLengthSid(sid *SID) (len uint32) { - r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - len = uint32(r0) - return -} - -func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { - r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) { - r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0) - isWellKnown = r0 != 0 - return -} - -func FreeSid(sid *SID) (err error) { - r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - if r1 != 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { - r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) - isEqual = r0 != 0 - return -} - -func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) { - r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0)) - return -} - -func getSidSubAuthorityCount(sid *SID) (count *uint8) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - count = (*uint8)(unsafe.Pointer(r0)) - return -} - -func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) { - r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0) - subAuthority = (*uint32)(unsafe.Pointer(r0)) - return -} - -func isValidSid(sid *SID) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) - isValid = r0 != 0 - return -} - -func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { - r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenProcessToken(process Handle, access uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func ImpersonateSelf(impersonationlevel uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func RevertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetThreadToken(thread *Handle, token Token) (err error) { - r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) { - var _p0 uint32 - if disableAllPrivileges { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) { - var _p0 uint32 - if resetToDefault { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) { - r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) - len = uint32(r0) - if len == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WTSQueryUserToken(session uint32, token *Token) (err error) { - r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func WTSFreeMemory(ptr uintptr) { - syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) - return -} - -func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { - syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - return -} - -func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return - } - return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) -} - -func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - var _p0 *uint16 - _p0, ret = syscall.UTF16PtrFromString(objectName) - if ret != nil { - return - } - return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) -} - -func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { - r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { - r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { - r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { - var _p0 uint32 - if *daclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if *daclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *daclPresent = _p0 != 0 - *daclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { - var _p0 uint32 - if *saclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if *saclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) - *saclPresent = _p0 != 0 - *saclDefaulted = _p1 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { - var _p0 uint32 - if *ownerDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) - *ownerDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { - var _p0 uint32 - if *groupDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) - *groupDefaulted = _p0 != 0 - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - len = uint32(r0) - return -} - -func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} - -func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { - r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) - isValid = r0 != 0 - return -} - -func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { - var _p0 uint32 - if daclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if daclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { - var _p0 uint32 - if saclPresent { - _p0 = 1 - } else { - _p0 = 0 - } - var _p1 uint32 - if saclDefaulted { - _p1 = 1 - } else { - _p1 = 0 - } - r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { - var _p0 uint32 - if ownerDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { - var _p0 uint32 - if groupDefaulted { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { - syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { - r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) - if r0 != 0 { - ret = syscall.Errno(r0) - } - return -} diff --git a/vendor/modules.txt b/vendor/modules.txt deleted file mode 100644 index 0ed28c9..0000000 --- a/vendor/modules.txt +++ /dev/null @@ -1,56 +0,0 @@ -# github.com/BenB196/crashplan-ffs-go-pkg v0.1.7 -## explicit -github.com/BenB196/crashplan-ffs-go-pkg -# github.com/BenB196/ip-api-go-pkg v0.0.8 -## explicit -github.com/BenB196/ip-api-go-pkg -# github.com/VictoriaMetrics/fastcache v1.5.7 -## explicit -github.com/VictoriaMetrics/fastcache -# github.com/beorn7/perks v1.0.1 -github.com/beorn7/perks/quantile -# github.com/cespare/xxhash/v2 v2.1.1 -github.com/cespare/xxhash/v2 -# github.com/golang/protobuf v1.3.2 -github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp -# github.com/golang/snappy v0.0.1 -github.com/golang/snappy -# github.com/mailru/easyjson v0.7.1 -github.com/mailru/easyjson -github.com/mailru/easyjson/buffer -github.com/mailru/easyjson/jlexer -github.com/mailru/easyjson/jwriter -# github.com/matttproud/golang_protobuf_extensions v1.0.1 -github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/olivere/elastic/v7 v7.0.14 -## explicit -github.com/olivere/elastic/v7 -github.com/olivere/elastic/v7/config -github.com/olivere/elastic/v7/uritemplates -# github.com/pkg/errors v0.9.1 -github.com/pkg/errors -# github.com/prometheus/client_golang v1.5.1 -## explicit -github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/internal -github.com/prometheus/client_golang/prometheus/promauto -github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.2.0 -github.com/prometheus/client_model/go -# github.com/prometheus/common v0.9.1 -github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg -github.com/prometheus/common/model -# github.com/prometheus/procfs v0.0.8 -github.com/prometheus/procfs -github.com/prometheus/procfs/internal/fs -github.com/prometheus/procfs/internal/util -# golang.org/x/net v0.0.0-20200202094626-16171245cfb2 -## explicit -golang.org/x/net/publicsuffix -# golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 -golang.org/x/sys/windows