internal: pod: auto update kubelet pod prefix

read the default /var/lib/kubelet/config.yaml, and
update the pod cgroup prefix

Signed-off-by: Tonghao Zhang <tonghao@bamaicloud.com>
This commit is contained in:
Tonghao Zhang 2025-07-23 03:05:18 -04:00
parent 4ee04759e3
commit ccaa483a4e
95 changed files with 23267 additions and 5 deletions

5
go.mod
View File

@ -31,6 +31,8 @@ require (
gopkg.in/natefinch/lumberjack.v2 v2.2.1
k8s.io/api v0.31.3
k8s.io/cri-client v0.31.3
k8s.io/kubelet v0.29.0
sigs.k8s.io/yaml v1.5.0
)
require (
@ -69,6 +71,7 @@ require (
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/native v1.1.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
@ -91,6 +94,7 @@ require (
github.com/prometheus/common v0.55.0 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
@ -109,6 +113,7 @@ require (
go.opentelemetry.io/otel/sdk v1.29.0 // indirect
go.opentelemetry.io/otel/trace v1.29.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.yaml.in/yaml/v2 v2.4.2 // indirect
golang.org/x/arch v0.12.0 // indirect
golang.org/x/crypto v0.29.0 // indirect
golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 // indirect

14
go.sum
View File

@ -135,6 +135,8 @@ github.com/hashicorp/go-plugin v1.6.1 h1:P7MR2UP6gNKGPp+y7EZw2kOiq4IR9WiqLvp0XOs
github.com/hashicorp/go-plugin v1.6.1/go.mod h1:XPHFku2tFo3o3QKFgSYo+cghcUhw1NA1hZyMK0PWAw0=
github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE=
github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY=
github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@ -239,6 +241,8 @@ github.com/shirou/gopsutil v2.21.11+incompatible h1:lOGOyCG67a5dv2hq5Z1BLDUqqKp3
github.com/shirou/gopsutil v2.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -313,6 +317,10 @@ go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeX
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go.yaml.in/yaml/v3 v3.0.3 h1:bXOww4E/J3f66rav3pX3m8w6jDE4knZjGOw8b5Y6iNE=
go.yaml.in/yaml/v3 v3.0.3/go.mod h1:tBHosrYAkRZjRAOREWbDnBXUf08JOwYq++0QNwQiWzI=
golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg=
golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -411,6 +419,8 @@ k8s.io/cri-client v0.31.3 h1:9ZwddaNJomqkTBYQqSmB+Ccns3beY4HyYDwmRtWTCJM=
k8s.io/cri-client v0.31.3/go.mod h1:klbWiYkOatOQOkXOYZMZMGSTM8q9eC/efsYGuXcgPes=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kubelet v0.29.0 h1:SX5hlznTBcGIrS1scaf8r8p6m3e475KMifwt9i12iOk=
k8s.io/kubelet v0.29.0/go.mod h1:kvKS2+Bz2tgDOG1S1q0TH2z1DasNuVF+8p6Aw7xvKkI=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A=
k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
@ -418,5 +428,5 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
sigs.k8s.io/yaml v1.5.0 h1:M10b2U7aEUY6hRtU870n2VTPgR5RZiL/I6Lcc2F4NUQ=
sigs.k8s.io/yaml v1.5.0/go.mod h1:wZs27Rbxoai4C0f8/9urLZtZtF3avA3gKvGyPdDqTO4=

View File

@ -158,8 +158,9 @@ func containerInitPidInContainerd(containerID string) (int, error) {
func containerCgroupSuffix(containerID string, pod *corev1.Pod) string {
if pod.Status.QOSClass == corev1.PodQOSGuaranteed {
return fmt.Sprintf("/kubepods/pod%s/%s", pod.UID, containerID)
return fmt.Sprintf("/%s/pod%s/%s", kubeletPodCgroupPrefix, pod.UID, containerID)
}
return fmt.Sprintf("/kubepods/%s/pod%s/%s", strings.ToLower(string(pod.Status.QOSClass)), pod.UID, containerID)
return fmt.Sprintf("/%s/%s/pod%s/%s", kubeletPodCgroupPrefix,
strings.ToLower(string(pod.Status.QOSClass)), pod.UID, containerID)
}

View File

@ -32,10 +32,13 @@ import (
"huatuo-bamai/internal/utils/procfsutil"
corev1 "k8s.io/api/core/v1"
kubeletconfig "k8s.io/kubelet/config/v1beta1"
"sigs.k8s.io/yaml"
)
const (
kubeletReqTimeout = 5 * time.Second
kubeletReqTimeout = 5 * time.Second
kubeletDefaultConfPath = "/var/lib/kubelet/config.yaml"
)
var (
@ -45,6 +48,7 @@ var (
kubeletHttpsClient *http.Client
kubeletTimeTicker *time.Ticker
kubeletDoneCancel context.CancelFunc
kubeletPodCgroupPrefix = "kubepods"
)
type PodContainerInitCtx struct {
@ -118,6 +122,8 @@ func ContainerPodMgrInit(ctx *PodContainerInitCtx) error {
ctx.podClientCertPath, ctx.podClientCertKey = s[0], s[1]
}
_ = kubeletCgroupDriver()
err := kubeletPodListPortUpdate(ctx)
if !errors.Is(err, syscall.ECONNREFUSED) {
return err
@ -133,6 +139,7 @@ func ContainerPodMgrInit(ctx *PodContainerInitCtx) error {
case <-t.C:
if err := kubeletPodListPortUpdate(ctx); err == nil {
log.Infof("kubelet is running now")
_ = kubeletCgroupDriver()
ContainerPodMgrClose()
break
}
@ -382,3 +389,22 @@ func isRuningPod(pod *corev1.Pod) bool {
return true
}
func kubeletCgroupDriver() error {
data, err := os.ReadFile(kubeletDefaultConfPath)
if err != nil {
return fmt.Errorf("read %s: %w", kubeletDefaultConfPath, err)
}
var config kubeletconfig.KubeletConfiguration
if err := yaml.Unmarshal(data, &config); err != nil {
return err
}
if config.CgroupDriver == "systemd" {
kubeletPodCgroupPrefix = "kubepods.slice"
}
return nil
}

201
vendor/github.com/inconshreveable/mousetrap/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022 Alan Shreve (@inconshreveable)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
vendor/github.com/inconshreveable/mousetrap/README.md generated vendored Normal file
View File

@ -0,0 +1,23 @@
# mousetrap
mousetrap is a tiny library that answers a single question.
On a Windows machine, was the process invoked by someone double clicking on
the executable file while browsing in explorer?
### Motivation
Windows developers unfamiliar with command line tools will often "double-click"
the executable for a tool. Because most CLI tools print the help and then exit
when invoked without arguments, this is often very frustrating for those users.
mousetrap provides a way to detect these invocations so that you can provide
more helpful behavior and instructions on how to run the CLI tool. To see what
this looks like, both from an organizational and a technical perspective, see
https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
### The interface
The library exposes a single interface:
func StartedByExplorer() (bool)

View File

@ -0,0 +1,16 @@
//go:build !windows
// +build !windows
package mousetrap
// StartedByExplorer returns true if the program was invoked by the user
// double-clicking on the executable from explorer.exe
//
// It is conservative and returns false if any of the internal calls fail.
// It does not guarantee that the program was run from a terminal. It only can tell you
// whether it was launched from explorer.exe
//
// On non-Windows platforms, it always returns false.
func StartedByExplorer() bool {
return false
}

View File

@ -0,0 +1,42 @@
package mousetrap
import (
"syscall"
"unsafe"
)
func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
if err != nil {
return nil, err
}
defer syscall.CloseHandle(snapshot)
var procEntry syscall.ProcessEntry32
procEntry.Size = uint32(unsafe.Sizeof(procEntry))
if err = syscall.Process32First(snapshot, &procEntry); err != nil {
return nil, err
}
for {
if procEntry.ProcessID == uint32(pid) {
return &procEntry, nil
}
err = syscall.Process32Next(snapshot, &procEntry)
if err != nil {
return nil, err
}
}
}
// StartedByExplorer returns true if the program was invoked by the user double-clicking
// on the executable from explorer.exe
//
// It is conservative and returns false if any of the internal calls fail.
// It does not guarantee that the program was run from a terminal. It only can tell you
// whether it was launched from explorer.exe
func StartedByExplorer() bool {
pe, err := getProcessEntry(syscall.Getppid())
if err != nil {
return false
}
return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
}

39
vendor/github.com/spf13/cobra/.gitignore generated vendored Normal file
View File

@ -0,0 +1,39 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
# swap
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
# session
Session.vim
# temporary
.netrwhist
*~
# auto-generated tag files
tags
*.exe
cobra.test
bin
.idea/
*.iml

57
vendor/github.com/spf13/cobra/.golangci.yml generated vendored Normal file
View File

@ -0,0 +1,57 @@
# Copyright 2013-2023 The Cobra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
run:
deadline: 5m
linters:
disable-all: true
enable:
#- bodyclose
# - deadcode ! deprecated since v1.49.0; replaced by 'unused'
#- depguard
#- dogsled
#- dupl
- errcheck
#- exhaustive
#- funlen
#- gochecknoinits
- goconst
- gocritic
#- gocyclo
- gofmt
- goimports
#- gomnd
#- goprintffuncname
- gosec
- gosimple
- govet
- ineffassign
#- lll
- misspell
#- nakedret
#- noctx
- nolintlint
#- rowserrcheck
#- scopelint
- staticcheck
#- structcheck ! deprecated since v1.49.0; replaced by 'unused'
- stylecheck
#- typecheck
- unconvert
#- unparam
- unused
# - varcheck ! deprecated since v1.49.0; replaced by 'unused'
#- whitespace
fast: false

3
vendor/github.com/spf13/cobra/.mailmap generated vendored Normal file
View File

@ -0,0 +1,3 @@
Steve Francia <steve.francia@gmail.com>
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
Fabiano Franz <ffranz@redhat.com> <contact@fabianofranz.com>

37
vendor/github.com/spf13/cobra/CONDUCT.md generated vendored Normal file
View File

@ -0,0 +1,37 @@
## Cobra User Contract
### Versioning
Cobra will follow a steady release cadence. Non breaking changes will be released as minor versions quarterly. Patch bug releases are at the discretion of the maintainers. Users can expect security patch fixes to be released within relatively short order of a CVE becoming known. For more information on security patch fixes see the CVE section below. Releases will follow [Semantic Versioning](https://semver.org/). Users tracking the Master branch should expect unpredictable breaking changes as the project continues to move forward. For stability, it is highly recommended to use a release.
### Backward Compatibility
We will maintain two major releases in a moving window. The N-1 release will only receive bug fixes and security updates and will be dropped once N+1 is released.
### Deprecation
Deprecation of Go versions or dependent packages will only occur in major releases. To reduce the change of this taking users by surprise, any large deprecation will be preceded by an announcement in the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) and an Issue on Github.
### CVE
Maintainers will make every effort to release security patches in the case of a medium to high severity CVE directly impacting the library. The speed in which these patches reach a release is up to the discretion of the maintainers. A low severity CVE may be a lower priority than a high severity one.
### Communication
Cobra maintainers will use GitHub issues and the [#cobra slack channel](https://gophers.slack.com/archives/CD3LP1199) as the primary means of communication with the community. This is to foster open communication with all users and contributors.
### Breaking Changes
Breaking changes are generally allowed in the master branch, as this is the branch used to develop the next release of Cobra.
There may be times, however, when master is closed for breaking changes. This is likely to happen as we near the release of a new version.
Breaking changes are not allowed in release branches, as these represent minor versions that have already been released. These version have consumers who expect the APIs, behaviors, etc, to remain stable during the lifetime of the patch stream for the minor release.
Examples of breaking changes include:
- Removing or renaming exported constant, variable, type, or function.
- Updating the version of critical libraries such as `spf13/pflag`, `spf13/viper` etc...
- Some version updates may be acceptable for picking up bug fixes, but maintainers must exercise caution when reviewing.
There may, at times, need to be exceptions where breaking changes are allowed in release branches. These are at the discretion of the project's maintainers, and must be carefully considered before merging.
### CI Testing
Maintainers will ensure the Cobra test suite utilizes the current supported versions of Golang.
### Disclaimer
Changes to this document and the contents therein are at the discretion of the maintainers.
None of the contents of this document are legally binding in any way to the maintainers or the users.

50
vendor/github.com/spf13/cobra/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,50 @@
# Contributing to Cobra
Thank you so much for contributing to Cobra. We appreciate your time and help.
Here are some guidelines to help you get started.
## Code of Conduct
Be kind and respectful to the members of the community. Take time to educate
others who are seeking help. Harassment of any kind will not be tolerated.
## Questions
If you have questions regarding Cobra, feel free to ask it in the community
[#cobra Slack channel][cobra-slack]
## Filing a bug or feature
1. Before filing an issue, please check the existing issues to see if a
similar one was already opened. If there is one already opened, feel free
to comment on it.
1. If you believe you've found a bug, please provide detailed steps of
reproduction, the version of Cobra and anything else you believe will be
useful to help troubleshoot it (e.g. OS environment, environment variables,
etc...). Also state the current behavior vs. the expected behavior.
1. If you'd like to see a feature or an enhancement please open an issue with
a clear title and description of what the feature is and why it would be
beneficial to the project and its users.
## Submitting changes
1. CLA: Upon submitting a Pull Request (PR), contributors will be prompted to
sign a CLA. Please sign the CLA :slightly_smiling_face:
1. Tests: If you are submitting code, please ensure you have adequate tests
for the feature. Tests can be run via `go test ./...` or `make test`.
1. Since this is golang project, ensure the new code is properly formatted to
ensure code consistency. Run `make all`.
### Quick steps to contribute
1. Fork the project.
1. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
1. Create your feature branch (`git checkout -b my-new-feature`)
1. Make changes and run tests (`make test`)
1. Add them to staging (`git add .`)
1. Commit your changes (`git commit -m 'Add some feature'`)
1. Push to the branch (`git push origin my-new-feature`)
1. Create new pull request
<!-- Links -->
[cobra-slack]: https://gophers.slack.com/archives/CD3LP1199

174
vendor/github.com/spf13/cobra/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,174 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

13
vendor/github.com/spf13/cobra/MAINTAINERS generated vendored Normal file
View File

@ -0,0 +1,13 @@
maintainers:
- spf13
- johnSchnake
- jpmcb
- marckhouzam
inactive:
- anthonyfok
- bep
- bogem
- broady
- eparis
- jharshman
- wfernandes

35
vendor/github.com/spf13/cobra/Makefile generated vendored Normal file
View File

@ -0,0 +1,35 @@
BIN="./bin"
SRC=$(shell find . -name "*.go")
ifeq (, $(shell which golangci-lint))
$(warning "could not find golangci-lint in $(PATH), run: curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh")
endif
.PHONY: fmt lint test install_deps clean
default: all
all: fmt test
fmt:
$(info ******************** checking formatting ********************)
@test -z $(shell gofmt -l $(SRC)) || (gofmt -d $(SRC); exit 1)
lint:
$(info ******************** running lint tools ********************)
golangci-lint run -v
test: install_deps
$(info ******************** running tests ********************)
go test -v ./...
richtest: install_deps
$(info ******************** running tests with kyoh86/richgo ********************)
richgo test -v ./...
install_deps:
$(info ******************** downloading dependencies ********************)
go get -v ./...
clean:
rm -rf $(BIN)

112
vendor/github.com/spf13/cobra/README.md generated vendored Normal file
View File

@ -0,0 +1,112 @@
![cobra logo](assets/CobraMain.png)
Cobra is a library for creating powerful modern CLI applications.
Cobra is used in many Go projects such as [Kubernetes](https://kubernetes.io/),
[Hugo](https://gohugo.io), and [GitHub CLI](https://github.com/cli/cli) to
name a few. [This list](site/content/projects_using_cobra.md) contains a more extensive list of projects using Cobra.
[![](https://img.shields.io/github/actions/workflow/status/spf13/cobra/test.yml?branch=main&longCache=true&label=Test&logo=github%20actions&logoColor=fff)](https://github.com/spf13/cobra/actions?query=workflow%3ATest)
[![Go Reference](https://pkg.go.dev/badge/github.com/spf13/cobra.svg)](https://pkg.go.dev/github.com/spf13/cobra)
[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cobra)](https://goreportcard.com/report/github.com/spf13/cobra)
[![Slack](https://img.shields.io/badge/Slack-cobra-brightgreen)](https://gophers.slack.com/archives/CD3LP1199)
# Overview
Cobra is a library providing a simple interface to create powerful modern CLI
interfaces similar to git & go tools.
Cobra provides:
* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
* Fully POSIX-compliant flags (including short & long versions)
* Nested subcommands
* Global, local and cascading flags
* Intelligent suggestions (`app srver`... did you mean `app server`?)
* Automatic help generation for commands and flags
* Grouping help for subcommands
* Automatic help flag recognition of `-h`, `--help`, etc.
* Automatically generated shell autocomplete for your application (bash, zsh, fish, powershell)
* Automatically generated man pages for your application
* Command aliases so you can change things without breaking them
* The flexibility to define your own help, usage, etc.
* Optional seamless integration with [viper](https://github.com/spf13/viper) for 12-factor apps
# Concepts
Cobra is built on a structure of commands, arguments & flags.
**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
The best applications read like sentences when used, and as a result, users
intuitively know how to interact with them.
The pattern to follow is
`APPNAME VERB NOUN --ADJECTIVE`
or
`APPNAME COMMAND ARG --FLAG`.
A few good real world examples may better illustrate this point.
In the following example, 'server' is a command, and 'port' is a flag:
hugo server --port=1313
In this command we are telling Git to clone the url bare.
git clone URL --bare
## Commands
Command is the central point of the application. Each interaction that
the application supports will be contained in a Command. A command can
have children commands and optionally run an action.
In the example above, 'server' is the command.
[More about cobra.Command](https://pkg.go.dev/github.com/spf13/cobra#Command)
## Flags
A flag is a way to modify the behavior of a command. Cobra supports
fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
A Cobra command can define flags that persist through to children commands
and flags that are only available to that command.
In the example above, 'port' is the flag.
Flag functionality is provided by the [pflag
library](https://github.com/spf13/pflag), a fork of the flag standard library
which maintains the same interface while adding POSIX compliance.
# Installing
Using Cobra is easy. First, use `go get` to install the latest version
of the library.
```
go get -u github.com/spf13/cobra@latest
```
Next, include Cobra in your application:
```go
import "github.com/spf13/cobra"
```
# Usage
`cobra-cli` is a command line program to generate cobra applications and command files.
It will bootstrap your application scaffolding to rapidly
develop a Cobra-based application. It is the easiest way to incorporate Cobra into your application.
It can be installed by running:
```
go install github.com/spf13/cobra-cli@latest
```
For complete details on using the Cobra-CLI generator, please read [The Cobra Generator README](https://github.com/spf13/cobra-cli/blob/main/README.md)
For complete details on using the Cobra library, please read the [The Cobra User Guide](site/content/user_guide.md).
# License
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](LICENSE.txt)

60
vendor/github.com/spf13/cobra/active_help.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"fmt"
"os"
)
const (
activeHelpMarker = "_activeHelp_ "
// The below values should not be changed: programs will be using them explicitly
// in their user documentation, and users will be using them explicitly.
activeHelpEnvVarSuffix = "ACTIVE_HELP"
activeHelpGlobalEnvVar = configEnvVarGlobalPrefix + "_" + activeHelpEnvVarSuffix
activeHelpGlobalDisable = "0"
)
// AppendActiveHelp adds the specified string to the specified array to be used as ActiveHelp.
// Such strings will be processed by the completion script and will be shown as ActiveHelp
// to the user.
// The array parameter should be the array that will contain the completions.
// This function can be called multiple times before and/or after completions are added to
// the array. Each time this function is called with the same array, the new
// ActiveHelp line will be shown below the previous ones when completion is triggered.
func AppendActiveHelp(compArray []string, activeHelpStr string) []string {
return append(compArray, fmt.Sprintf("%s%s", activeHelpMarker, activeHelpStr))
}
// GetActiveHelpConfig returns the value of the ActiveHelp environment variable
// <PROGRAM>_ACTIVE_HELP where <PROGRAM> is the name of the root command in upper
// case, with all non-ASCII-alphanumeric characters replaced by `_`.
// It will always return "0" if the global environment variable COBRA_ACTIVE_HELP
// is set to "0".
func GetActiveHelpConfig(cmd *Command) string {
activeHelpCfg := os.Getenv(activeHelpGlobalEnvVar)
if activeHelpCfg != activeHelpGlobalDisable {
activeHelpCfg = os.Getenv(activeHelpEnvVar(cmd.Root().Name()))
}
return activeHelpCfg
}
// activeHelpEnvVar returns the name of the program-specific ActiveHelp environment
// variable. It has the format <PROGRAM>_ACTIVE_HELP where <PROGRAM> is the name of the
// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
func activeHelpEnvVar(name string) string {
return configEnvVar(name, activeHelpEnvVarSuffix)
}

131
vendor/github.com/spf13/cobra/args.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"fmt"
"strings"
)
type PositionalArgs func(cmd *Command, args []string) error
// legacyArgs validation has the following behaviour:
// - root commands with no subcommands can take arbitrary arguments
// - root commands with subcommands will do subcommand validity checking
// - subcommands will always accept arbitrary arguments
func legacyArgs(cmd *Command, args []string) error {
// no subcommand, always take args
if !cmd.HasSubCommands() {
return nil
}
// root command with subcommands, do subcommand checking.
if !cmd.HasParent() && len(args) > 0 {
return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
return nil
}
// NoArgs returns an error if any args are included.
func NoArgs(cmd *Command, args []string) error {
if len(args) > 0 {
return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
}
return nil
}
// OnlyValidArgs returns an error if there are any positional args that are not in
// the `ValidArgs` field of `Command`
func OnlyValidArgs(cmd *Command, args []string) error {
if len(cmd.ValidArgs) > 0 {
// Remove any description that may be included in ValidArgs.
// A description is following a tab character.
validArgs := make([]string, 0, len(cmd.ValidArgs))
for _, v := range cmd.ValidArgs {
validArgs = append(validArgs, strings.SplitN(v, "\t", 2)[0])
}
for _, v := range args {
if !stringInSlice(v, validArgs) {
return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
}
}
}
return nil
}
// ArbitraryArgs never returns an error.
func ArbitraryArgs(cmd *Command, args []string) error {
return nil
}
// MinimumNArgs returns an error if there is not at least N args.
func MinimumNArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) < n {
return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
}
return nil
}
}
// MaximumNArgs returns an error if there are more than N args.
func MaximumNArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) > n {
return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
}
return nil
}
}
// ExactArgs returns an error if there are not exactly n args.
func ExactArgs(n int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) != n {
return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
}
return nil
}
}
// RangeArgs returns an error if the number of args is not within the expected range.
func RangeArgs(min int, max int) PositionalArgs {
return func(cmd *Command, args []string) error {
if len(args) < min || len(args) > max {
return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
}
return nil
}
}
// MatchAll allows combining several PositionalArgs to work in concert.
func MatchAll(pargs ...PositionalArgs) PositionalArgs {
return func(cmd *Command, args []string) error {
for _, parg := range pargs {
if err := parg(cmd, args); err != nil {
return err
}
}
return nil
}
}
// ExactValidArgs returns an error if there are not exactly N positional args OR
// there are any positional args that are not in the `ValidArgs` field of `Command`
//
// Deprecated: use MatchAll(ExactArgs(n), OnlyValidArgs) instead
func ExactValidArgs(n int) PositionalArgs {
return MatchAll(ExactArgs(n), OnlyValidArgs)
}

709
vendor/github.com/spf13/cobra/bash_completions.go generated vendored Normal file
View File

@ -0,0 +1,709 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"bytes"
"fmt"
"io"
"os"
"sort"
"strings"
"github.com/spf13/pflag"
)
// Annotations for Bash completion.
const (
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
BashCompCustom = "cobra_annotation_bash_completion_custom"
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
)
func writePreamble(buf io.StringWriter, name string) {
WriteStringAndCheck(buf, fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
WriteStringAndCheck(buf, fmt.Sprintf(`
__%[1]s_debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE:-} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
fi
}
# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
# _init_completion. This is a very minimal version of that function.
__%[1]s_init_completion()
{
COMPREPLY=()
_get_comp_words_by_ref "$@" cur prev words cword
}
__%[1]s_index_of_word()
{
local w word=$1
shift
index=0
for w in "$@"; do
[[ $w = "$word" ]] && return
index=$((index+1))
done
index=-1
}
__%[1]s_contains_word()
{
local w word=$1; shift
for w in "$@"; do
[[ $w = "$word" ]] && return
done
return 1
}
__%[1]s_handle_go_custom_completion()
{
__%[1]s_debug "${FUNCNAME[0]}: cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}"
local shellCompDirectiveError=%[3]d
local shellCompDirectiveNoSpace=%[4]d
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
local out requestComp lastParam lastChar comp directive args
# Prepare the command to request completions for the program.
# Calling ${words[0]} instead of directly %[1]s allows handling aliases
args=("${words[@]:1}")
# Disable ActiveHelp which is not supported for bash completion v1
requestComp="%[8]s=0 ${words[0]} %[2]s ${args[*]}"
lastParam=${words[$((${#words[@]}-1))]}
lastChar=${lastParam:$((${#lastParam}-1)):1}
__%[1]s_debug "${FUNCNAME[0]}: lastParam ${lastParam}, lastChar ${lastChar}"
if [ -z "${cur}" ] && [ "${lastChar}" != "=" ]; then
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
__%[1]s_debug "${FUNCNAME[0]}: Adding extra empty parameter"
requestComp="${requestComp} \"\""
fi
__%[1]s_debug "${FUNCNAME[0]}: calling ${requestComp}"
# Use eval to handle any environment variables and such
out=$(eval "${requestComp}" 2>/dev/null)
# Extract the directive integer at the very end of the output following a colon (:)
directive=${out##*:}
# Remove the directive
out=${out%%:*}
if [ "${directive}" = "${out}" ]; then
# There is not directive specified
directive=0
fi
__%[1]s_debug "${FUNCNAME[0]}: the completion directive is: ${directive}"
__%[1]s_debug "${FUNCNAME[0]}: the completions are: ${out}"
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
# Error code. No completion.
__%[1]s_debug "${FUNCNAME[0]}: received error from custom completion go code"
return
else
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
if [[ $(type -t compopt) = "builtin" ]]; then
__%[1]s_debug "${FUNCNAME[0]}: activating no space"
compopt -o nospace
fi
fi
if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
if [[ $(type -t compopt) = "builtin" ]]; then
__%[1]s_debug "${FUNCNAME[0]}: activating no file completion"
compopt +o default
fi
fi
fi
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local fullFilter filter filteringCmd
# Do not use quotes around the $out variable or else newline
# characters will be kept.
for filter in ${out}; do
fullFilter+="$filter|"
done
filteringCmd="_filedir $fullFilter"
__%[1]s_debug "File filtering command: $filteringCmd"
$filteringCmd
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
local subdir
# Use printf to strip any trailing newline
subdir=$(printf "%%s" "${out}")
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
__%[1]s_handle_subdirs_in_dir_flag "$subdir"
else
__%[1]s_debug "Listing directories in ."
_filedir -d
fi
else
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
done < <(compgen -W "${out}" -- "$cur")
fi
}
__%[1]s_handle_reply()
{
__%[1]s_debug "${FUNCNAME[0]}"
local comp
case $cur in
-*)
if [[ $(type -t compopt) = "builtin" ]]; then
compopt -o nospace
fi
local allflags
if [ ${#must_have_one_flag[@]} -ne 0 ]; then
allflags=("${must_have_one_flag[@]}")
else
allflags=("${flags[*]} ${two_word_flags[*]}")
fi
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
done < <(compgen -W "${allflags[*]}" -- "$cur")
if [[ $(type -t compopt) = "builtin" ]]; then
[[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
fi
# complete after --flag=abc
if [[ $cur == *=* ]]; then
if [[ $(type -t compopt) = "builtin" ]]; then
compopt +o nospace
fi
local index flag
flag="${cur%%=*}"
__%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
COMPREPLY=()
if [[ ${index} -ge 0 ]]; then
PREFIX=""
cur="${cur#*=}"
${flags_completion[${index}]}
if [ -n "${ZSH_VERSION:-}" ]; then
# zsh completion needs --flag= prefix
eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
fi
fi
fi
if [[ -z "${flag_parsing_disabled}" ]]; then
# If flag parsing is enabled, we have completed the flags and can return.
# If flag parsing is disabled, we may not know all (or any) of the flags, so we fallthrough
# to possibly call handle_go_custom_completion.
return 0;
fi
;;
esac
# check if we are handling a flag with special work handling
local index
__%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
if [[ ${index} -ge 0 ]]; then
${flags_completion[${index}]}
return
fi
# we are parsing a flag and don't have a special handler, no completion
if [[ ${cur} != "${words[cword]}" ]]; then
return
fi
local completions
completions=("${commands[@]}")
if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
completions+=("${must_have_one_noun[@]}")
elif [[ -n "${has_completion_function}" ]]; then
# if a go completion function is provided, defer to that function
__%[1]s_handle_go_custom_completion
fi
if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
completions+=("${must_have_one_flag[@]}")
fi
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
done < <(compgen -W "${completions[*]}" -- "$cur")
if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
while IFS='' read -r comp; do
COMPREPLY+=("$comp")
done < <(compgen -W "${noun_aliases[*]}" -- "$cur")
fi
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
if declare -F __%[1]s_custom_func >/dev/null; then
# try command name qualified custom func
__%[1]s_custom_func
else
# otherwise fall back to unqualified for compatibility
declare -F __custom_func >/dev/null && __custom_func
fi
fi
# available in bash-completion >= 2, not always present on macOS
if declare -F __ltrim_colon_completions >/dev/null; then
__ltrim_colon_completions "$cur"
fi
# If there is only 1 completion and it is a flag with an = it will be completed
# but we don't want a space after the =
if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
compopt -o nospace
fi
}
# The arguments should be in the form "ext1|ext2|extn"
__%[1]s_handle_filename_extension_flag()
{
local ext="$1"
_filedir "@(${ext})"
}
__%[1]s_handle_subdirs_in_dir_flag()
{
local dir="$1"
pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
}
__%[1]s_handle_flag()
{
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
# if a command required a flag, and we found it, unset must_have_one_flag()
local flagname=${words[c]}
local flagvalue=""
# if the word contained an =
if [[ ${words[c]} == *"="* ]]; then
flagvalue=${flagname#*=} # take in as flagvalue after the =
flagname=${flagname%%=*} # strip everything after the =
flagname="${flagname}=" # but put the = back
fi
__%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
must_have_one_flag=()
fi
# if you set a flag which only applies to this command, don't show subcommands
if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
commands=()
fi
# keep flag value with flagname as flaghash
# flaghash variable is an associative array which is only supported in bash > 3.
if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
if [ -n "${flagvalue}" ] ; then
flaghash[${flagname}]=${flagvalue}
elif [ -n "${words[ $((c+1)) ]}" ] ; then
flaghash[${flagname}]=${words[ $((c+1)) ]}
else
flaghash[${flagname}]="true" # pad "true" for bool flag
fi
fi
# skip the argument to a two word flag
if [[ ${words[c]} != *"="* ]] && __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
__%[1]s_debug "${FUNCNAME[0]}: found a flag ${words[c]}, skip the next argument"
c=$((c+1))
# if we are looking for a flags value, don't show commands
if [[ $c -eq $cword ]]; then
commands=()
fi
fi
c=$((c+1))
}
__%[1]s_handle_noun()
{
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
must_have_one_noun=()
elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
must_have_one_noun=()
fi
nouns+=("${words[c]}")
c=$((c+1))
}
__%[1]s_handle_command()
{
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
local next_command
if [[ -n ${last_command} ]]; then
next_command="_${last_command}_${words[c]//:/__}"
else
if [[ $c -eq 0 ]]; then
next_command="_%[1]s_root_command"
else
next_command="_${words[c]//:/__}"
fi
fi
c=$((c+1))
__%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
declare -F "$next_command" >/dev/null && $next_command
}
__%[1]s_handle_word()
{
if [[ $c -ge $cword ]]; then
__%[1]s_handle_reply
return
fi
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
if [[ "${words[c]}" == -* ]]; then
__%[1]s_handle_flag
elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
__%[1]s_handle_command
elif [[ $c -eq 0 ]]; then
__%[1]s_handle_command
elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
# aliashash variable is an associative array which is only supported in bash > 3.
if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then
words[c]=${aliashash[${words[c]}]}
__%[1]s_handle_command
else
__%[1]s_handle_noun
fi
else
__%[1]s_handle_noun
fi
__%[1]s_handle_word
}
`, name, ShellCompNoDescRequestCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, activeHelpEnvVar(name)))
}
func writePostscript(buf io.StringWriter, name string) {
name = strings.ReplaceAll(name, ":", "__")
WriteStringAndCheck(buf, fmt.Sprintf("__start_%s()\n", name))
WriteStringAndCheck(buf, fmt.Sprintf(`{
local cur prev words cword split
declare -A flaghash 2>/dev/null || :
declare -A aliashash 2>/dev/null || :
if declare -F _init_completion >/dev/null 2>&1; then
_init_completion -s || return
else
__%[1]s_init_completion -n "=" || return
fi
local c=0
local flag_parsing_disabled=
local flags=()
local two_word_flags=()
local local_nonpersistent_flags=()
local flags_with_completion=()
local flags_completion=()
local commands=("%[1]s")
local command_aliases=()
local must_have_one_flag=()
local must_have_one_noun=()
local has_completion_function=""
local last_command=""
local nouns=()
local noun_aliases=()
__%[1]s_handle_word
}
`, name))
WriteStringAndCheck(buf, fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
complete -o default -F __start_%s %s
else
complete -o default -o nospace -F __start_%s %s
fi
`, name, name, name, name))
WriteStringAndCheck(buf, "# ex: ts=4 sw=4 et filetype=sh\n")
}
func writeCommands(buf io.StringWriter, cmd *Command) {
WriteStringAndCheck(buf, " commands=()\n")
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() && c != cmd.helpCommand {
continue
}
WriteStringAndCheck(buf, fmt.Sprintf(" commands+=(%q)\n", c.Name()))
writeCmdAliases(buf, c)
}
WriteStringAndCheck(buf, "\n")
}
func writeFlagHandler(buf io.StringWriter, name string, annotations map[string][]string, cmd *Command) {
for key, value := range annotations {
switch key {
case BashCompFilenameExt:
WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) > 0 {
ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
} else {
ext = "_filedir"
}
WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
case BashCompCustom:
WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
if len(value) > 0 {
handlers := strings.Join(value, "; ")
WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
} else {
WriteStringAndCheck(buf, " flags_completion+=(:)\n")
}
case BashCompSubdirsInDir:
WriteStringAndCheck(buf, fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
var ext string
if len(value) == 1 {
ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
} else {
ext = "_filedir -d"
}
WriteStringAndCheck(buf, fmt.Sprintf(" flags_completion+=(%q)\n", ext))
}
}
}
const cbn = "\")\n"
func writeShortFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
name := flag.Shorthand
format := " "
if len(flag.NoOptDefVal) == 0 {
format += "two_word_"
}
format += "flags+=(\"-%s" + cbn
WriteStringAndCheck(buf, fmt.Sprintf(format, name))
writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
}
func writeFlag(buf io.StringWriter, flag *pflag.Flag, cmd *Command) {
name := flag.Name
format := " flags+=(\"--%s"
if len(flag.NoOptDefVal) == 0 {
format += "="
}
format += cbn
WriteStringAndCheck(buf, fmt.Sprintf(format, name))
if len(flag.NoOptDefVal) == 0 {
format = " two_word_flags+=(\"--%s" + cbn
WriteStringAndCheck(buf, fmt.Sprintf(format, name))
}
writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
}
func writeLocalNonPersistentFlag(buf io.StringWriter, flag *pflag.Flag) {
name := flag.Name
format := " local_nonpersistent_flags+=(\"--%[1]s" + cbn
if len(flag.NoOptDefVal) == 0 {
format += " local_nonpersistent_flags+=(\"--%[1]s=" + cbn
}
WriteStringAndCheck(buf, fmt.Sprintf(format, name))
if len(flag.Shorthand) > 0 {
WriteStringAndCheck(buf, fmt.Sprintf(" local_nonpersistent_flags+=(\"-%s\")\n", flag.Shorthand))
}
}
// prepareCustomAnnotationsForFlags setup annotations for go completions for registered flags
func prepareCustomAnnotationsForFlags(cmd *Command) {
flagCompletionMutex.RLock()
defer flagCompletionMutex.RUnlock()
for flag := range flagCompletionFunctions {
// Make sure the completion script calls the __*_go_custom_completion function for
// every registered flag. We need to do this here (and not when the flag was registered
// for completion) so that we can know the root command name for the prefix
// of __<prefix>_go_custom_completion
if flag.Annotations == nil {
flag.Annotations = map[string][]string{}
}
flag.Annotations[BashCompCustom] = []string{fmt.Sprintf("__%[1]s_handle_go_custom_completion", cmd.Root().Name())}
}
}
func writeFlags(buf io.StringWriter, cmd *Command) {
prepareCustomAnnotationsForFlags(cmd)
WriteStringAndCheck(buf, ` flags=()
two_word_flags=()
local_nonpersistent_flags=()
flags_with_completion=()
flags_completion=()
`)
if cmd.DisableFlagParsing {
WriteStringAndCheck(buf, " flag_parsing_disabled=1\n")
}
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
writeFlag(buf, flag, cmd)
if len(flag.Shorthand) > 0 {
writeShortFlag(buf, flag, cmd)
}
// localNonPersistentFlags are used to stop the completion of subcommands when one is set
// if TraverseChildren is true we should allow to complete subcommands
if localNonPersistentFlags.Lookup(flag.Name) != nil && !cmd.Root().TraverseChildren {
writeLocalNonPersistentFlag(buf, flag)
}
})
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
writeFlag(buf, flag, cmd)
if len(flag.Shorthand) > 0 {
writeShortFlag(buf, flag, cmd)
}
})
WriteStringAndCheck(buf, "\n")
}
func writeRequiredFlag(buf io.StringWriter, cmd *Command) {
WriteStringAndCheck(buf, " must_have_one_flag=()\n")
flags := cmd.NonInheritedFlags()
flags.VisitAll(func(flag *pflag.Flag) {
if nonCompletableFlag(flag) {
return
}
if _, ok := flag.Annotations[BashCompOneRequiredFlag]; ok {
format := " must_have_one_flag+=(\"--%s"
if flag.Value.Type() != "bool" {
format += "="
}
format += cbn
WriteStringAndCheck(buf, fmt.Sprintf(format, flag.Name))
if len(flag.Shorthand) > 0 {
WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_flag+=(\"-%s"+cbn, flag.Shorthand))
}
}
})
}
func writeRequiredNouns(buf io.StringWriter, cmd *Command) {
WriteStringAndCheck(buf, " must_have_one_noun=()\n")
sort.Strings(cmd.ValidArgs)
for _, value := range cmd.ValidArgs {
// Remove any description that may be included following a tab character.
// Descriptions are not supported by bash completion.
value = strings.SplitN(value, "\t", 2)[0]
WriteStringAndCheck(buf, fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
}
if cmd.ValidArgsFunction != nil {
WriteStringAndCheck(buf, " has_completion_function=1\n")
}
}
func writeCmdAliases(buf io.StringWriter, cmd *Command) {
if len(cmd.Aliases) == 0 {
return
}
sort.Strings(cmd.Aliases)
WriteStringAndCheck(buf, fmt.Sprint(` if [[ -z "${BASH_VERSION:-}" || "${BASH_VERSINFO[0]:-}" -gt 3 ]]; then`, "\n"))
for _, value := range cmd.Aliases {
WriteStringAndCheck(buf, fmt.Sprintf(" command_aliases+=(%q)\n", value))
WriteStringAndCheck(buf, fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
}
WriteStringAndCheck(buf, ` fi`)
WriteStringAndCheck(buf, "\n")
}
func writeArgAliases(buf io.StringWriter, cmd *Command) {
WriteStringAndCheck(buf, " noun_aliases=()\n")
sort.Strings(cmd.ArgAliases)
for _, value := range cmd.ArgAliases {
WriteStringAndCheck(buf, fmt.Sprintf(" noun_aliases+=(%q)\n", value))
}
}
func gen(buf io.StringWriter, cmd *Command) {
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() && c != cmd.helpCommand {
continue
}
gen(buf, c)
}
commandName := cmd.CommandPath()
commandName = strings.ReplaceAll(commandName, " ", "_")
commandName = strings.ReplaceAll(commandName, ":", "__")
if cmd.Root() == cmd {
WriteStringAndCheck(buf, fmt.Sprintf("_%s_root_command()\n{\n", commandName))
} else {
WriteStringAndCheck(buf, fmt.Sprintf("_%s()\n{\n", commandName))
}
WriteStringAndCheck(buf, fmt.Sprintf(" last_command=%q\n", commandName))
WriteStringAndCheck(buf, "\n")
WriteStringAndCheck(buf, " command_aliases=()\n")
WriteStringAndCheck(buf, "\n")
writeCommands(buf, cmd)
writeFlags(buf, cmd)
writeRequiredFlag(buf, cmd)
writeRequiredNouns(buf, cmd)
writeArgAliases(buf, cmd)
WriteStringAndCheck(buf, "}\n\n")
}
// GenBashCompletion generates bash completion file and writes to the passed writer.
func (c *Command) GenBashCompletion(w io.Writer) error {
buf := new(bytes.Buffer)
writePreamble(buf, c.Name())
if len(c.BashCompletionFunction) > 0 {
buf.WriteString(c.BashCompletionFunction + "\n")
}
gen(buf, c)
writePostscript(buf, c.Name())
_, err := buf.WriteTo(w)
return err
}
func nonCompletableFlag(flag *pflag.Flag) bool {
return flag.Hidden || len(flag.Deprecated) > 0
}
// GenBashCompletionFile generates bash completion file.
func (c *Command) GenBashCompletionFile(filename string) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.GenBashCompletion(outFile)
}

396
vendor/github.com/spf13/cobra/bash_completionsV2.go generated vendored Normal file
View File

@ -0,0 +1,396 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"bytes"
"fmt"
"io"
"os"
)
func (c *Command) genBashCompletion(w io.Writer, includeDesc bool) error {
buf := new(bytes.Buffer)
genBashComp(buf, c.Name(), includeDesc)
_, err := buf.WriteTo(w)
return err
}
func genBashComp(buf io.StringWriter, name string, includeDesc bool) {
compCmd := ShellCompRequestCmd
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
WriteStringAndCheck(buf, fmt.Sprintf(`# bash completion V2 for %-36[1]s -*- shell-script -*-
__%[1]s_debug()
{
if [[ -n ${BASH_COMP_DEBUG_FILE-} ]]; then
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
fi
}
# Macs have bash3 for which the bash-completion package doesn't include
# _init_completion. This is a minimal version of that function.
__%[1]s_init_completion()
{
COMPREPLY=()
_get_comp_words_by_ref "$@" cur prev words cword
}
# This function calls the %[1]s program to obtain the completion
# results and the directive. It fills the 'out' and 'directive' vars.
__%[1]s_get_completion_results() {
local requestComp lastParam lastChar args
# Prepare the command to request completions for the program.
# Calling ${words[0]} instead of directly %[1]s allows handling aliases
args=("${words[@]:1}")
requestComp="${words[0]} %[2]s ${args[*]}"
lastParam=${words[$((${#words[@]}-1))]}
lastChar=${lastParam:$((${#lastParam}-1)):1}
__%[1]s_debug "lastParam ${lastParam}, lastChar ${lastChar}"
if [[ -z ${cur} && ${lastChar} != = ]]; then
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
__%[1]s_debug "Adding extra empty parameter"
requestComp="${requestComp} ''"
fi
# When completing a flag with an = (e.g., %[1]s -n=<TAB>)
# bash focuses on the part after the =, so we need to remove
# the flag part from $cur
if [[ ${cur} == -*=* ]]; then
cur="${cur#*=}"
fi
__%[1]s_debug "Calling ${requestComp}"
# Use eval to handle any environment variables and such
out=$(eval "${requestComp}" 2>/dev/null)
# Extract the directive integer at the very end of the output following a colon (:)
directive=${out##*:}
# Remove the directive
out=${out%%:*}
if [[ ${directive} == "${out}" ]]; then
# There is not directive specified
directive=0
fi
__%[1]s_debug "The completion directive is: ${directive}"
__%[1]s_debug "The completions are: ${out}"
}
__%[1]s_process_completion_results() {
local shellCompDirectiveError=%[3]d
local shellCompDirectiveNoSpace=%[4]d
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
local shellCompDirectiveKeepOrder=%[8]d
if (((directive & shellCompDirectiveError) != 0)); then
# Error code. No completion.
__%[1]s_debug "Received error from custom completion go code"
return
else
if (((directive & shellCompDirectiveNoSpace) != 0)); then
if [[ $(type -t compopt) == builtin ]]; then
__%[1]s_debug "Activating no space"
compopt -o nospace
else
__%[1]s_debug "No space directive not supported in this version of bash"
fi
fi
if (((directive & shellCompDirectiveKeepOrder) != 0)); then
if [[ $(type -t compopt) == builtin ]]; then
# no sort isn't supported for bash less than < 4.4
if [[ ${BASH_VERSINFO[0]} -lt 4 || ( ${BASH_VERSINFO[0]} -eq 4 && ${BASH_VERSINFO[1]} -lt 4 ) ]]; then
__%[1]s_debug "No sort directive not supported in this version of bash"
else
__%[1]s_debug "Activating keep order"
compopt -o nosort
fi
else
__%[1]s_debug "No sort directive not supported in this version of bash"
fi
fi
if (((directive & shellCompDirectiveNoFileComp) != 0)); then
if [[ $(type -t compopt) == builtin ]]; then
__%[1]s_debug "Activating no file completion"
compopt +o default
else
__%[1]s_debug "No file completion directive not supported in this version of bash"
fi
fi
fi
# Separate activeHelp from normal completions
local completions=()
local activeHelp=()
__%[1]s_extract_activeHelp
if (((directive & shellCompDirectiveFilterFileExt) != 0)); then
# File extension filtering
local fullFilter filter filteringCmd
# Do not use quotes around the $completions variable or else newline
# characters will be kept.
for filter in ${completions[*]}; do
fullFilter+="$filter|"
done
filteringCmd="_filedir $fullFilter"
__%[1]s_debug "File filtering command: $filteringCmd"
$filteringCmd
elif (((directive & shellCompDirectiveFilterDirs) != 0)); then
# File completion for directories only
local subdir
subdir=${completions[0]}
if [[ -n $subdir ]]; then
__%[1]s_debug "Listing directories in $subdir"
pushd "$subdir" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 || return
else
__%[1]s_debug "Listing directories in ."
_filedir -d
fi
else
__%[1]s_handle_completion_types
fi
__%[1]s_handle_special_char "$cur" :
__%[1]s_handle_special_char "$cur" =
# Print the activeHelp statements before we finish
if ((${#activeHelp[*]} != 0)); then
printf "\n";
printf "%%s\n" "${activeHelp[@]}"
printf "\n"
# The prompt format is only available from bash 4.4.
# We test if it is available before using it.
if (x=${PS1@P}) 2> /dev/null; then
printf "%%s" "${PS1@P}${COMP_LINE[@]}"
else
# Can't print the prompt. Just print the
# text the user had typed, it is workable enough.
printf "%%s" "${COMP_LINE[@]}"
fi
fi
}
# Separate activeHelp lines from real completions.
# Fills the $activeHelp and $completions arrays.
__%[1]s_extract_activeHelp() {
local activeHelpMarker="%[9]s"
local endIndex=${#activeHelpMarker}
while IFS='' read -r comp; do
if [[ ${comp:0:endIndex} == $activeHelpMarker ]]; then
comp=${comp:endIndex}
__%[1]s_debug "ActiveHelp found: $comp"
if [[ -n $comp ]]; then
activeHelp+=("$comp")
fi
else
# Not an activeHelp line but a normal completion
completions+=("$comp")
fi
done <<<"${out}"
}
__%[1]s_handle_completion_types() {
__%[1]s_debug "__%[1]s_handle_completion_types: COMP_TYPE is $COMP_TYPE"
case $COMP_TYPE in
37|42)
# Type: menu-complete/menu-complete-backward and insert-completions
# If the user requested inserting one completion at a time, or all
# completions at once on the command-line we must remove the descriptions.
# https://github.com/spf13/cobra/issues/1508
local tab=$'\t' comp
while IFS='' read -r comp; do
[[ -z $comp ]] && continue
# Strip any description
comp=${comp%%%%$tab*}
# Only consider the completions that match
if [[ $comp == "$cur"* ]]; then
COMPREPLY+=("$comp")
fi
done < <(printf "%%s\n" "${completions[@]}")
;;
*)
# Type: complete (normal completion)
__%[1]s_handle_standard_completion_case
;;
esac
}
__%[1]s_handle_standard_completion_case() {
local tab=$'\t' comp
# Short circuit to optimize if we don't have descriptions
if [[ "${completions[*]}" != *$tab* ]]; then
IFS=$'\n' read -ra COMPREPLY -d '' < <(compgen -W "${completions[*]}" -- "$cur")
return 0
fi
local longest=0
local compline
# Look for the longest completion so that we can format things nicely
while IFS='' read -r compline; do
[[ -z $compline ]] && continue
# Strip any description before checking the length
comp=${compline%%%%$tab*}
# Only consider the completions that match
[[ $comp == "$cur"* ]] || continue
COMPREPLY+=("$compline")
if ((${#comp}>longest)); then
longest=${#comp}
fi
done < <(printf "%%s\n" "${completions[@]}")
# If there is a single completion left, remove the description text
if ((${#COMPREPLY[*]} == 1)); then
__%[1]s_debug "COMPREPLY[0]: ${COMPREPLY[0]}"
comp="${COMPREPLY[0]%%%%$tab*}"
__%[1]s_debug "Removed description from single completion, which is now: ${comp}"
COMPREPLY[0]=$comp
else # Format the descriptions
__%[1]s_format_comp_descriptions $longest
fi
}
__%[1]s_handle_special_char()
{
local comp="$1"
local char=$2
if [[ "$comp" == *${char}* && "$COMP_WORDBREAKS" == *${char}* ]]; then
local word=${comp%%"${comp##*${char}}"}
local idx=${#COMPREPLY[*]}
while ((--idx >= 0)); do
COMPREPLY[idx]=${COMPREPLY[idx]#"$word"}
done
fi
}
__%[1]s_format_comp_descriptions()
{
local tab=$'\t'
local comp desc maxdesclength
local longest=$1
local i ci
for ci in ${!COMPREPLY[*]}; do
comp=${COMPREPLY[ci]}
# Properly format the description string which follows a tab character if there is one
if [[ "$comp" == *$tab* ]]; then
__%[1]s_debug "Original comp: $comp"
desc=${comp#*$tab}
comp=${comp%%%%$tab*}
# $COLUMNS stores the current shell width.
# Remove an extra 4 because we add 2 spaces and 2 parentheses.
maxdesclength=$(( COLUMNS - longest - 4 ))
# Make sure we can fit a description of at least 8 characters
# if we are to align the descriptions.
if ((maxdesclength > 8)); then
# Add the proper number of spaces to align the descriptions
for ((i = ${#comp} ; i < longest ; i++)); do
comp+=" "
done
else
# Don't pad the descriptions so we can fit more text after the completion
maxdesclength=$(( COLUMNS - ${#comp} - 4 ))
fi
# If there is enough space for any description text,
# truncate the descriptions that are too long for the shell width
if ((maxdesclength > 0)); then
if ((${#desc} > maxdesclength)); then
desc=${desc:0:$(( maxdesclength - 1 ))}
desc+="…"
fi
comp+=" ($desc)"
fi
COMPREPLY[ci]=$comp
__%[1]s_debug "Final comp: $comp"
fi
done
}
__start_%[1]s()
{
local cur prev words cword split
COMPREPLY=()
# Call _init_completion from the bash-completion package
# to prepare the arguments properly
if declare -F _init_completion >/dev/null 2>&1; then
_init_completion -n =: || return
else
__%[1]s_init_completion -n =: || return
fi
__%[1]s_debug
__%[1]s_debug "========= starting completion logic =========="
__%[1]s_debug "cur is ${cur}, words[*] is ${words[*]}, #words[@] is ${#words[@]}, cword is $cword"
# The user could have moved the cursor backwards on the command-line.
# We need to trigger completion from the $cword location, so we need
# to truncate the command-line ($words) up to the $cword location.
words=("${words[@]:0:$cword+1}")
__%[1]s_debug "Truncated words[*]: ${words[*]},"
local out directive
__%[1]s_get_completion_results
__%[1]s_process_completion_results
}
if [[ $(type -t compopt) = "builtin" ]]; then
complete -o default -F __start_%[1]s %[1]s
else
complete -o default -o nospace -F __start_%[1]s %[1]s
fi
# ex: ts=4 sw=4 et filetype=sh
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
activeHelpMarker))
}
// GenBashCompletionFileV2 generates Bash completion version 2.
func (c *Command) GenBashCompletionFileV2(filename string, includeDesc bool) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.GenBashCompletionV2(outFile, includeDesc)
}
// GenBashCompletionV2 generates Bash completion file version 2
// and writes it to the passed writer.
func (c *Command) GenBashCompletionV2(w io.Writer, includeDesc bool) error {
return c.genBashCompletion(w, includeDesc)
}

242
vendor/github.com/spf13/cobra/cobra.go generated vendored Normal file
View File

@ -0,0 +1,242 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Commands similar to git, go tools and other modern CLI tools
// inspired by go, go-Commander, gh and subcommand
package cobra
import (
"fmt"
"io"
"os"
"reflect"
"strconv"
"strings"
"text/template"
"time"
"unicode"
)
var templateFuncs = template.FuncMap{
"trim": strings.TrimSpace,
"trimRightSpace": trimRightSpace,
"trimTrailingWhitespaces": trimRightSpace,
"appendIfNotPresent": appendIfNotPresent,
"rpad": rpad,
"gt": Gt,
"eq": Eq,
}
var initializers []func()
var finalizers []func()
const (
defaultPrefixMatching = false
defaultCommandSorting = true
defaultCaseInsensitive = false
defaultTraverseRunHooks = false
)
// EnablePrefixMatching allows setting automatic prefix matching. Automatic prefix matching can be a dangerous thing
// to automatically enable in CLI tools.
// Set this to true to enable it.
var EnablePrefixMatching = defaultPrefixMatching
// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default.
// To disable sorting, set it to false.
var EnableCommandSorting = defaultCommandSorting
// EnableCaseInsensitive allows case-insensitive commands names. (case sensitive by default)
var EnableCaseInsensitive = defaultCaseInsensitive
// EnableTraverseRunHooks executes persistent pre-run and post-run hooks from all parents.
// By default this is disabled, which means only the first run hook to be found is executed.
var EnableTraverseRunHooks = defaultTraverseRunHooks
// MousetrapHelpText enables an information splash screen on Windows
// if the CLI is started from explorer.exe.
// To disable the mousetrap, just set this variable to blank string ("").
// Works only on Microsoft Windows.
var MousetrapHelpText = `This is a command line tool.
You need to open cmd.exe and run it from there.
`
// MousetrapDisplayDuration controls how long the MousetrapHelpText message is displayed on Windows
// if the CLI is started from explorer.exe. Set to 0 to wait for the return key to be pressed.
// To disable the mousetrap, just set MousetrapHelpText to blank string ("").
// Works only on Microsoft Windows.
var MousetrapDisplayDuration = 5 * time.Second
// AddTemplateFunc adds a template function that's available to Usage and Help
// template generation.
func AddTemplateFunc(name string, tmplFunc interface{}) {
templateFuncs[name] = tmplFunc
}
// AddTemplateFuncs adds multiple template functions that are available to Usage and
// Help template generation.
func AddTemplateFuncs(tmplFuncs template.FuncMap) {
for k, v := range tmplFuncs {
templateFuncs[k] = v
}
}
// OnInitialize sets the passed functions to be run when each command's
// Execute method is called.
func OnInitialize(y ...func()) {
initializers = append(initializers, y...)
}
// OnFinalize sets the passed functions to be run when each command's
// Execute method is terminated.
func OnFinalize(y ...func()) {
finalizers = append(finalizers, y...)
}
// FIXME Gt is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans,
// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as
// ints and then compared.
func Gt(a interface{}, b interface{}) bool {
var left, right int64
av := reflect.ValueOf(a)
switch av.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
left = int64(av.Len())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
left = av.Int()
case reflect.String:
left, _ = strconv.ParseInt(av.String(), 10, 64)
}
bv := reflect.ValueOf(b)
switch bv.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
right = int64(bv.Len())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
right = bv.Int()
case reflect.String:
right, _ = strconv.ParseInt(bv.String(), 10, 64)
}
return left > right
}
// FIXME Eq is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic.
func Eq(a interface{}, b interface{}) bool {
av := reflect.ValueOf(a)
bv := reflect.ValueOf(b)
switch av.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
panic("Eq called on unsupported type")
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return av.Int() == bv.Int()
case reflect.String:
return av.String() == bv.String()
}
return false
}
func trimRightSpace(s string) string {
return strings.TrimRightFunc(s, unicode.IsSpace)
}
// FIXME appendIfNotPresent is unused by cobra and should be removed in a version 2. It exists only for compatibility with users of cobra.
// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s.
func appendIfNotPresent(s, stringToAppend string) string {
if strings.Contains(s, stringToAppend) {
return s
}
return s + " " + stringToAppend
}
// rpad adds padding to the right of a string.
func rpad(s string, padding int) string {
formattedString := fmt.Sprintf("%%-%ds", padding)
return fmt.Sprintf(formattedString, s)
}
// tmpl executes the given template text on data, writing the result to w.
func tmpl(w io.Writer, text string, data interface{}) error {
t := template.New("top")
t.Funcs(templateFuncs)
template.Must(t.Parse(text))
return t.Execute(w, data)
}
// ld compares two strings and returns the levenshtein distance between them.
func ld(s, t string, ignoreCase bool) int {
if ignoreCase {
s = strings.ToLower(s)
t = strings.ToLower(t)
}
d := make([][]int, len(s)+1)
for i := range d {
d[i] = make([]int, len(t)+1)
d[i][0] = i
}
for j := range d[0] {
d[0][j] = j
}
for j := 1; j <= len(t); j++ {
for i := 1; i <= len(s); i++ {
if s[i-1] == t[j-1] {
d[i][j] = d[i-1][j-1]
} else {
min := d[i-1][j]
if d[i][j-1] < min {
min = d[i][j-1]
}
if d[i-1][j-1] < min {
min = d[i-1][j-1]
}
d[i][j] = min + 1
}
}
}
return d[len(s)][len(t)]
}
func stringInSlice(a string, list []string) bool {
for _, b := range list {
if b == a {
return true
}
}
return false
}
// CheckErr prints the msg with the prefix 'Error:' and exits with error code 1. If the msg is nil, it does nothing.
func CheckErr(msg interface{}) {
if msg != nil {
fmt.Fprintln(os.Stderr, "Error:", msg)
os.Exit(1)
}
}
// WriteStringAndCheck writes a string into a buffer, and checks if the error is not nil.
func WriteStringAndCheck(b io.StringWriter, s string) {
_, err := b.WriteString(s)
CheckErr(err)
}

1896
vendor/github.com/spf13/cobra/command.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

20
vendor/github.com/spf13/cobra/command_notwin.go generated vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !windows
// +build !windows
package cobra
var preExecHookFn func(*Command)

41
vendor/github.com/spf13/cobra/command_win.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build windows
// +build windows
package cobra
import (
"fmt"
"os"
"time"
"github.com/inconshreveable/mousetrap"
)
var preExecHookFn = preExecHook
func preExecHook(c *Command) {
if MousetrapHelpText != "" && mousetrap.StartedByExplorer() {
c.Print(MousetrapHelpText)
if MousetrapDisplayDuration > 0 {
time.Sleep(MousetrapDisplayDuration)
} else {
c.Println("Press return to continue...")
fmt.Scanln()
}
os.Exit(1)
}
}

939
vendor/github.com/spf13/cobra/completions.go generated vendored Normal file
View File

@ -0,0 +1,939 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"fmt"
"os"
"regexp"
"strconv"
"strings"
"sync"
"github.com/spf13/pflag"
)
const (
// ShellCompRequestCmd is the name of the hidden command that is used to request
// completion results from the program. It is used by the shell completion scripts.
ShellCompRequestCmd = "__complete"
// ShellCompNoDescRequestCmd is the name of the hidden command that is used to request
// completion results without their description. It is used by the shell completion scripts.
ShellCompNoDescRequestCmd = "__completeNoDesc"
)
// Global map of flag completion functions. Make sure to use flagCompletionMutex before you try to read and write from it.
var flagCompletionFunctions = map[*pflag.Flag]func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective){}
// lock for reading and writing from flagCompletionFunctions
var flagCompletionMutex = &sync.RWMutex{}
// ShellCompDirective is a bit map representing the different behaviors the shell
// can be instructed to have once completions have been provided.
type ShellCompDirective int
type flagCompError struct {
subCommand string
flagName string
}
func (e *flagCompError) Error() string {
return "Subcommand '" + e.subCommand + "' does not support flag '" + e.flagName + "'"
}
const (
// ShellCompDirectiveError indicates an error occurred and completions should be ignored.
ShellCompDirectiveError ShellCompDirective = 1 << iota
// ShellCompDirectiveNoSpace indicates that the shell should not add a space
// after the completion even if there is a single completion provided.
ShellCompDirectiveNoSpace
// ShellCompDirectiveNoFileComp indicates that the shell should not provide
// file completion even when no completion is provided.
ShellCompDirectiveNoFileComp
// ShellCompDirectiveFilterFileExt indicates that the provided completions
// should be used as file extension filters.
// For flags, using Command.MarkFlagFilename() and Command.MarkPersistentFlagFilename()
// is a shortcut to using this directive explicitly. The BashCompFilenameExt
// annotation can also be used to obtain the same behavior for flags.
ShellCompDirectiveFilterFileExt
// ShellCompDirectiveFilterDirs indicates that only directory names should
// be provided in file completion. To request directory names within another
// directory, the returned completions should specify the directory within
// which to search. The BashCompSubdirsInDir annotation can be used to
// obtain the same behavior but only for flags.
ShellCompDirectiveFilterDirs
// ShellCompDirectiveKeepOrder indicates that the shell should preserve the order
// in which the completions are provided
ShellCompDirectiveKeepOrder
// ===========================================================================
// All directives using iota should be above this one.
// For internal use.
shellCompDirectiveMaxValue
// ShellCompDirectiveDefault indicates to let the shell perform its default
// behavior after completions have been provided.
// This one must be last to avoid messing up the iota count.
ShellCompDirectiveDefault ShellCompDirective = 0
)
const (
// Constants for the completion command
compCmdName = "completion"
compCmdNoDescFlagName = "no-descriptions"
compCmdNoDescFlagDesc = "disable completion descriptions"
compCmdNoDescFlagDefault = false
)
// CompletionOptions are the options to control shell completion
type CompletionOptions struct {
// DisableDefaultCmd prevents Cobra from creating a default 'completion' command
DisableDefaultCmd bool
// DisableNoDescFlag prevents Cobra from creating the '--no-descriptions' flag
// for shells that support completion descriptions
DisableNoDescFlag bool
// DisableDescriptions turns off all completion descriptions for shells
// that support them
DisableDescriptions bool
// HiddenDefaultCmd makes the default 'completion' command hidden
HiddenDefaultCmd bool
}
// NoFileCompletions can be used to disable file completion for commands that should
// not trigger file completions.
func NoFileCompletions(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
return nil, ShellCompDirectiveNoFileComp
}
// FixedCompletions can be used to create a completion function which always
// returns the same results.
func FixedCompletions(choices []string, directive ShellCompDirective) func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
return func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective) {
return choices, directive
}
}
// RegisterFlagCompletionFunc should be called to register a function to provide completion for a flag.
func (c *Command) RegisterFlagCompletionFunc(flagName string, f func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)) error {
flag := c.Flag(flagName)
if flag == nil {
return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' does not exist", flagName)
}
flagCompletionMutex.Lock()
defer flagCompletionMutex.Unlock()
if _, exists := flagCompletionFunctions[flag]; exists {
return fmt.Errorf("RegisterFlagCompletionFunc: flag '%s' already registered", flagName)
}
flagCompletionFunctions[flag] = f
return nil
}
// GetFlagCompletionFunc returns the completion function for the given flag of the command, if available.
func (c *Command) GetFlagCompletionFunc(flagName string) (func(*Command, []string, string) ([]string, ShellCompDirective), bool) {
flag := c.Flag(flagName)
if flag == nil {
return nil, false
}
flagCompletionMutex.RLock()
defer flagCompletionMutex.RUnlock()
completionFunc, exists := flagCompletionFunctions[flag]
return completionFunc, exists
}
// Returns a string listing the different directive enabled in the specified parameter
func (d ShellCompDirective) string() string {
var directives []string
if d&ShellCompDirectiveError != 0 {
directives = append(directives, "ShellCompDirectiveError")
}
if d&ShellCompDirectiveNoSpace != 0 {
directives = append(directives, "ShellCompDirectiveNoSpace")
}
if d&ShellCompDirectiveNoFileComp != 0 {
directives = append(directives, "ShellCompDirectiveNoFileComp")
}
if d&ShellCompDirectiveFilterFileExt != 0 {
directives = append(directives, "ShellCompDirectiveFilterFileExt")
}
if d&ShellCompDirectiveFilterDirs != 0 {
directives = append(directives, "ShellCompDirectiveFilterDirs")
}
if d&ShellCompDirectiveKeepOrder != 0 {
directives = append(directives, "ShellCompDirectiveKeepOrder")
}
if len(directives) == 0 {
directives = append(directives, "ShellCompDirectiveDefault")
}
if d >= shellCompDirectiveMaxValue {
return fmt.Sprintf("ERROR: unexpected ShellCompDirective value: %d", d)
}
return strings.Join(directives, ", ")
}
// initCompleteCmd adds a special hidden command that can be used to request custom completions.
func (c *Command) initCompleteCmd(args []string) {
completeCmd := &Command{
Use: fmt.Sprintf("%s [command-line]", ShellCompRequestCmd),
Aliases: []string{ShellCompNoDescRequestCmd},
DisableFlagsInUseLine: true,
Hidden: true,
DisableFlagParsing: true,
Args: MinimumNArgs(1),
Short: "Request shell completion choices for the specified command-line",
Long: fmt.Sprintf("%[2]s is a special command that is used by the shell completion logic\n%[1]s",
"to request completion choices for the specified command-line.", ShellCompRequestCmd),
Run: func(cmd *Command, args []string) {
finalCmd, completions, directive, err := cmd.getCompletions(args)
if err != nil {
CompErrorln(err.Error())
// Keep going for multiple reasons:
// 1- There could be some valid completions even though there was an error
// 2- Even without completions, we need to print the directive
}
noDescriptions := cmd.CalledAs() == ShellCompNoDescRequestCmd
if !noDescriptions {
if doDescriptions, err := strconv.ParseBool(getEnvConfig(cmd, configEnvVarSuffixDescriptions)); err == nil {
noDescriptions = !doDescriptions
}
}
noActiveHelp := GetActiveHelpConfig(finalCmd) == activeHelpGlobalDisable
out := finalCmd.OutOrStdout()
for _, comp := range completions {
if noActiveHelp && strings.HasPrefix(comp, activeHelpMarker) {
// Remove all activeHelp entries if it's disabled.
continue
}
if noDescriptions {
// Remove any description that may be included following a tab character.
comp = strings.SplitN(comp, "\t", 2)[0]
}
// Make sure we only write the first line to the output.
// This is needed if a description contains a linebreak.
// Otherwise the shell scripts will interpret the other lines as new flags
// and could therefore provide a wrong completion.
comp = strings.SplitN(comp, "\n", 2)[0]
// Finally trim the completion. This is especially important to get rid
// of a trailing tab when there are no description following it.
// For example, a sub-command without a description should not be completed
// with a tab at the end (or else zsh will show a -- following it
// although there is no description).
comp = strings.TrimSpace(comp)
// Print each possible completion to the output for the completion script to consume.
fmt.Fprintln(out, comp)
}
// As the last printout, print the completion directive for the completion script to parse.
// The directive integer must be that last character following a single colon (:).
// The completion script expects :<directive>
fmt.Fprintf(out, ":%d\n", directive)
// Print some helpful info to stderr for the user to understand.
// Output from stderr must be ignored by the completion script.
fmt.Fprintf(finalCmd.ErrOrStderr(), "Completion ended with directive: %s\n", directive.string())
},
}
c.AddCommand(completeCmd)
subCmd, _, err := c.Find(args)
if err != nil || subCmd.Name() != ShellCompRequestCmd {
// Only create this special command if it is actually being called.
// This reduces possible side-effects of creating such a command;
// for example, having this command would cause problems to a
// cobra program that only consists of the root command, since this
// command would cause the root command to suddenly have a subcommand.
c.RemoveCommand(completeCmd)
}
}
func (c *Command) getCompletions(args []string) (*Command, []string, ShellCompDirective, error) {
// The last argument, which is not completely typed by the user,
// should not be part of the list of arguments
toComplete := args[len(args)-1]
trimmedArgs := args[:len(args)-1]
var finalCmd *Command
var finalArgs []string
var err error
// Find the real command for which completion must be performed
// check if we need to traverse here to parse local flags on parent commands
if c.Root().TraverseChildren {
finalCmd, finalArgs, err = c.Root().Traverse(trimmedArgs)
} else {
// For Root commands that don't specify any value for their Args fields, when we call
// Find(), if those Root commands don't have any sub-commands, they will accept arguments.
// However, because we have added the __complete sub-command in the current code path, the
// call to Find() -> legacyArgs() will return an error if there are any arguments.
// To avoid this, we first remove the __complete command to get back to having no sub-commands.
rootCmd := c.Root()
if len(rootCmd.Commands()) == 1 {
rootCmd.RemoveCommand(c)
}
finalCmd, finalArgs, err = rootCmd.Find(trimmedArgs)
}
if err != nil {
// Unable to find the real command. E.g., <program> someInvalidCmd <TAB>
return c, []string{}, ShellCompDirectiveDefault, fmt.Errorf("unable to find a command for arguments: %v", trimmedArgs)
}
finalCmd.ctx = c.ctx
// These flags are normally added when `execute()` is called on `finalCmd`,
// however, when doing completion, we don't call `finalCmd.execute()`.
// Let's add the --help and --version flag ourselves but only if the finalCmd
// has not disabled flag parsing; if flag parsing is disabled, it is up to the
// finalCmd itself to handle the completion of *all* flags.
if !finalCmd.DisableFlagParsing {
finalCmd.InitDefaultHelpFlag()
finalCmd.InitDefaultVersionFlag()
}
// Check if we are doing flag value completion before parsing the flags.
// This is important because if we are completing a flag value, we need to also
// remove the flag name argument from the list of finalArgs or else the parsing
// could fail due to an invalid value (incomplete) for the flag.
flag, finalArgs, toComplete, flagErr := checkIfFlagCompletion(finalCmd, finalArgs, toComplete)
// Check if interspersed is false or -- was set on a previous arg.
// This works by counting the arguments. Normally -- is not counted as arg but
// if -- was already set or interspersed is false and there is already one arg then
// the extra added -- is counted as arg.
flagCompletion := true
_ = finalCmd.ParseFlags(append(finalArgs, "--"))
newArgCount := finalCmd.Flags().NArg()
// Parse the flags early so we can check if required flags are set
if err = finalCmd.ParseFlags(finalArgs); err != nil {
return finalCmd, []string{}, ShellCompDirectiveDefault, fmt.Errorf("Error while parsing flags from args %v: %s", finalArgs, err.Error())
}
realArgCount := finalCmd.Flags().NArg()
if newArgCount > realArgCount {
// don't do flag completion (see above)
flagCompletion = false
}
// Error while attempting to parse flags
if flagErr != nil {
// If error type is flagCompError and we don't want flagCompletion we should ignore the error
if _, ok := flagErr.(*flagCompError); !(ok && !flagCompletion) {
return finalCmd, []string{}, ShellCompDirectiveDefault, flagErr
}
}
// Look for the --help or --version flags. If they are present,
// there should be no further completions.
if helpOrVersionFlagPresent(finalCmd) {
return finalCmd, []string{}, ShellCompDirectiveNoFileComp, nil
}
// We only remove the flags from the arguments if DisableFlagParsing is not set.
// This is important for commands which have requested to do their own flag completion.
if !finalCmd.DisableFlagParsing {
finalArgs = finalCmd.Flags().Args()
}
if flag != nil && flagCompletion {
// Check if we are completing a flag value subject to annotations
if validExts, present := flag.Annotations[BashCompFilenameExt]; present {
if len(validExts) != 0 {
// File completion filtered by extensions
return finalCmd, validExts, ShellCompDirectiveFilterFileExt, nil
}
// The annotation requests simple file completion. There is no reason to do
// that since it is the default behavior anyway. Let's ignore this annotation
// in case the program also registered a completion function for this flag.
// Even though it is a mistake on the program's side, let's be nice when we can.
}
if subDir, present := flag.Annotations[BashCompSubdirsInDir]; present {
if len(subDir) == 1 {
// Directory completion from within a directory
return finalCmd, subDir, ShellCompDirectiveFilterDirs, nil
}
// Directory completion
return finalCmd, []string{}, ShellCompDirectiveFilterDirs, nil
}
}
var completions []string
var directive ShellCompDirective
// Enforce flag groups before doing flag completions
finalCmd.enforceFlagGroupsForCompletion()
// Note that we want to perform flagname completion even if finalCmd.DisableFlagParsing==true;
// doing this allows for completion of persistent flag names even for commands that disable flag parsing.
//
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as it requires
// the flag name to be complete
if flag == nil && len(toComplete) > 0 && toComplete[0] == '-' && !strings.Contains(toComplete, "=") && flagCompletion {
// First check for required flags
completions = completeRequireFlags(finalCmd, toComplete)
// If we have not found any required flags, only then can we show regular flags
if len(completions) == 0 {
doCompleteFlags := func(flag *pflag.Flag) {
if !flag.Changed ||
strings.Contains(flag.Value.Type(), "Slice") ||
strings.Contains(flag.Value.Type(), "Array") {
// If the flag is not already present, or if it can be specified multiple times (Array or Slice)
// we suggest it as a completion
completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
}
}
// We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
// that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
// non-inherited flags.
finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteFlags(flag)
})
// Try to complete non-inherited flags even if DisableFlagParsing==true.
// This allows programs to tell Cobra about flags for completion even
// if the actual parsing of flags is not done by Cobra.
// For instance, Helm uses this to provide flag name completion for
// some of its plugins.
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteFlags(flag)
})
}
directive = ShellCompDirectiveNoFileComp
if len(completions) == 1 && strings.HasSuffix(completions[0], "=") {
// If there is a single completion, the shell usually adds a space
// after the completion. We don't want that if the flag ends with an =
directive = ShellCompDirectiveNoSpace
}
if !finalCmd.DisableFlagParsing {
// If DisableFlagParsing==false, we have completed the flags as known by Cobra;
// we can return what we found.
// If DisableFlagParsing==true, Cobra may not be aware of all flags, so we
// let the logic continue to see if ValidArgsFunction needs to be called.
return finalCmd, completions, directive, nil
}
} else {
directive = ShellCompDirectiveDefault
if flag == nil {
foundLocalNonPersistentFlag := false
// If TraverseChildren is true on the root command we don't check for
// local flags because we can use a local flag on a parent command
if !finalCmd.Root().TraverseChildren {
// Check if there are any local, non-persistent flags on the command-line
localNonPersistentFlags := finalCmd.LocalNonPersistentFlags()
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
if localNonPersistentFlags.Lookup(flag.Name) != nil && flag.Changed {
foundLocalNonPersistentFlag = true
}
})
}
// Complete subcommand names, including the help command
if len(finalArgs) == 0 && !foundLocalNonPersistentFlag {
// We only complete sub-commands if:
// - there are no arguments on the command-line and
// - there are no local, non-persistent flags on the command-line or TraverseChildren is true
for _, subCmd := range finalCmd.Commands() {
if subCmd.IsAvailableCommand() || subCmd == finalCmd.helpCommand {
if strings.HasPrefix(subCmd.Name(), toComplete) {
completions = append(completions, fmt.Sprintf("%s\t%s", subCmd.Name(), subCmd.Short))
}
directive = ShellCompDirectiveNoFileComp
}
}
}
// Complete required flags even without the '-' prefix
completions = append(completions, completeRequireFlags(finalCmd, toComplete)...)
// Always complete ValidArgs, even if we are completing a subcommand name.
// This is for commands that have both subcommands and ValidArgs.
if len(finalCmd.ValidArgs) > 0 {
if len(finalArgs) == 0 {
// ValidArgs are only for the first argument
for _, validArg := range finalCmd.ValidArgs {
if strings.HasPrefix(validArg, toComplete) {
completions = append(completions, validArg)
}
}
directive = ShellCompDirectiveNoFileComp
// If no completions were found within commands or ValidArgs,
// see if there are any ArgAliases that should be completed.
if len(completions) == 0 {
for _, argAlias := range finalCmd.ArgAliases {
if strings.HasPrefix(argAlias, toComplete) {
completions = append(completions, argAlias)
}
}
}
}
// If there are ValidArgs specified (even if they don't match), we stop completion.
// Only one of ValidArgs or ValidArgsFunction can be used for a single command.
return finalCmd, completions, directive, nil
}
// Let the logic continue so as to add any ValidArgsFunction completions,
// even if we already found sub-commands.
// This is for commands that have subcommands but also specify a ValidArgsFunction.
}
}
// Find the completion function for the flag or command
var completionFn func(cmd *Command, args []string, toComplete string) ([]string, ShellCompDirective)
if flag != nil && flagCompletion {
flagCompletionMutex.RLock()
completionFn = flagCompletionFunctions[flag]
flagCompletionMutex.RUnlock()
} else {
completionFn = finalCmd.ValidArgsFunction
}
if completionFn != nil {
// Go custom completion defined for this flag or command.
// Call the registered completion function to get the completions.
var comps []string
comps, directive = completionFn(finalCmd, finalArgs, toComplete)
completions = append(completions, comps...)
}
return finalCmd, completions, directive, nil
}
func helpOrVersionFlagPresent(cmd *Command) bool {
if versionFlag := cmd.Flags().Lookup("version"); versionFlag != nil &&
len(versionFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && versionFlag.Changed {
return true
}
if helpFlag := cmd.Flags().Lookup("help"); helpFlag != nil &&
len(helpFlag.Annotations[FlagSetByCobraAnnotation]) > 0 && helpFlag.Changed {
return true
}
return false
}
func getFlagNameCompletions(flag *pflag.Flag, toComplete string) []string {
if nonCompletableFlag(flag) {
return []string{}
}
var completions []string
flagName := "--" + flag.Name
if strings.HasPrefix(flagName, toComplete) {
// Flag without the =
completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
// Why suggest both long forms: --flag and --flag= ?
// This forces the user to *always* have to type either an = or a space after the flag name.
// Let's be nice and avoid making users have to do that.
// Since boolean flags and shortname flags don't show the = form, let's go that route and never show it.
// The = form will still work, we just won't suggest it.
// This also makes the list of suggested flags shorter as we avoid all the = forms.
//
// if len(flag.NoOptDefVal) == 0 {
// // Flag requires a value, so it can be suffixed with =
// flagName += "="
// completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
// }
}
flagName = "-" + flag.Shorthand
if len(flag.Shorthand) > 0 && strings.HasPrefix(flagName, toComplete) {
completions = append(completions, fmt.Sprintf("%s\t%s", flagName, flag.Usage))
}
return completions
}
func completeRequireFlags(finalCmd *Command, toComplete string) []string {
var completions []string
doCompleteRequiredFlags := func(flag *pflag.Flag) {
if _, present := flag.Annotations[BashCompOneRequiredFlag]; present {
if !flag.Changed {
// If the flag is not already present, we suggest it as a completion
completions = append(completions, getFlagNameCompletions(flag, toComplete)...)
}
}
}
// We cannot use finalCmd.Flags() because we may not have called ParsedFlags() for commands
// that have set DisableFlagParsing; it is ParseFlags() that merges the inherited and
// non-inherited flags.
finalCmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteRequiredFlags(flag)
})
finalCmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
doCompleteRequiredFlags(flag)
})
return completions
}
func checkIfFlagCompletion(finalCmd *Command, args []string, lastArg string) (*pflag.Flag, []string, string, error) {
if finalCmd.DisableFlagParsing {
// We only do flag completion if we are allowed to parse flags
// This is important for commands which have requested to do their own flag completion.
return nil, args, lastArg, nil
}
var flagName string
trimmedArgs := args
flagWithEqual := false
orgLastArg := lastArg
// When doing completion of a flag name, as soon as an argument starts with
// a '-' we know it is a flag. We cannot use isFlagArg() here as that function
// requires the flag name to be complete
if len(lastArg) > 0 && lastArg[0] == '-' {
if index := strings.Index(lastArg, "="); index >= 0 {
// Flag with an =
if strings.HasPrefix(lastArg[:index], "--") {
// Flag has full name
flagName = lastArg[2:index]
} else {
// Flag is shorthand
// We have to get the last shorthand flag name
// e.g. `-asd` => d to provide the correct completion
// https://github.com/spf13/cobra/issues/1257
flagName = lastArg[index-1 : index]
}
lastArg = lastArg[index+1:]
flagWithEqual = true
} else {
// Normal flag completion
return nil, args, lastArg, nil
}
}
if len(flagName) == 0 {
if len(args) > 0 {
prevArg := args[len(args)-1]
if isFlagArg(prevArg) {
// Only consider the case where the flag does not contain an =.
// If the flag contains an = it means it has already been fully processed,
// so we don't need to deal with it here.
if index := strings.Index(prevArg, "="); index < 0 {
if strings.HasPrefix(prevArg, "--") {
// Flag has full name
flagName = prevArg[2:]
} else {
// Flag is shorthand
// We have to get the last shorthand flag name
// e.g. `-asd` => d to provide the correct completion
// https://github.com/spf13/cobra/issues/1257
flagName = prevArg[len(prevArg)-1:]
}
// Remove the uncompleted flag or else there could be an error created
// for an invalid value for that flag
trimmedArgs = args[:len(args)-1]
}
}
}
}
if len(flagName) == 0 {
// Not doing flag completion
return nil, trimmedArgs, lastArg, nil
}
flag := findFlag(finalCmd, flagName)
if flag == nil {
// Flag not supported by this command, the interspersed option might be set so return the original args
return nil, args, orgLastArg, &flagCompError{subCommand: finalCmd.Name(), flagName: flagName}
}
if !flagWithEqual {
if len(flag.NoOptDefVal) != 0 {
// We had assumed dealing with a two-word flag but the flag is a boolean flag.
// In that case, there is no value following it, so we are not really doing flag completion.
// Reset everything to do noun completion.
trimmedArgs = args
flag = nil
}
}
return flag, trimmedArgs, lastArg, nil
}
// InitDefaultCompletionCmd adds a default 'completion' command to c.
// This function will do nothing if any of the following is true:
// 1- the feature has been explicitly disabled by the program,
// 2- c has no subcommands (to avoid creating one),
// 3- c already has a 'completion' command provided by the program.
func (c *Command) InitDefaultCompletionCmd() {
if c.CompletionOptions.DisableDefaultCmd || !c.HasSubCommands() {
return
}
for _, cmd := range c.commands {
if cmd.Name() == compCmdName || cmd.HasAlias(compCmdName) {
// A completion command is already available
return
}
}
haveNoDescFlag := !c.CompletionOptions.DisableNoDescFlag && !c.CompletionOptions.DisableDescriptions
completionCmd := &Command{
Use: compCmdName,
Short: "Generate the autocompletion script for the specified shell",
Long: fmt.Sprintf(`Generate the autocompletion script for %[1]s for the specified shell.
See each sub-command's help for details on how to use the generated script.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
Hidden: c.CompletionOptions.HiddenDefaultCmd,
GroupID: c.completionCommandGroupID,
}
c.AddCommand(completionCmd)
out := c.OutOrStdout()
noDesc := c.CompletionOptions.DisableDescriptions
shortDesc := "Generate the autocompletion script for %s"
bash := &Command{
Use: "bash",
Short: fmt.Sprintf(shortDesc, "bash"),
Long: fmt.Sprintf(`Generate the autocompletion script for the bash shell.
This script depends on the 'bash-completion' package.
If it is not installed already, you can install it via your OS's package manager.
To load completions in your current shell session:
source <(%[1]s completion bash)
To load completions for every new session, execute once:
#### Linux:
%[1]s completion bash > /etc/bash_completion.d/%[1]s
#### macOS:
%[1]s completion bash > $(brew --prefix)/etc/bash_completion.d/%[1]s
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
Args: NoArgs,
DisableFlagsInUseLine: true,
ValidArgsFunction: NoFileCompletions,
RunE: func(cmd *Command, args []string) error {
return cmd.Root().GenBashCompletionV2(out, !noDesc)
},
}
if haveNoDescFlag {
bash.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
}
zsh := &Command{
Use: "zsh",
Short: fmt.Sprintf(shortDesc, "zsh"),
Long: fmt.Sprintf(`Generate the autocompletion script for the zsh shell.
If shell completion is not already enabled in your environment you will need
to enable it. You can execute the following once:
echo "autoload -U compinit; compinit" >> ~/.zshrc
To load completions in your current shell session:
source <(%[1]s completion zsh)
To load completions for every new session, execute once:
#### Linux:
%[1]s completion zsh > "${fpath[1]}/_%[1]s"
#### macOS:
%[1]s completion zsh > $(brew --prefix)/share/zsh/site-functions/_%[1]s
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
RunE: func(cmd *Command, args []string) error {
if noDesc {
return cmd.Root().GenZshCompletionNoDesc(out)
}
return cmd.Root().GenZshCompletion(out)
},
}
if haveNoDescFlag {
zsh.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
}
fish := &Command{
Use: "fish",
Short: fmt.Sprintf(shortDesc, "fish"),
Long: fmt.Sprintf(`Generate the autocompletion script for the fish shell.
To load completions in your current shell session:
%[1]s completion fish | source
To load completions for every new session, execute once:
%[1]s completion fish > ~/.config/fish/completions/%[1]s.fish
You will need to start a new shell for this setup to take effect.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
RunE: func(cmd *Command, args []string) error {
return cmd.Root().GenFishCompletion(out, !noDesc)
},
}
if haveNoDescFlag {
fish.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
}
powershell := &Command{
Use: "powershell",
Short: fmt.Sprintf(shortDesc, "powershell"),
Long: fmt.Sprintf(`Generate the autocompletion script for powershell.
To load completions in your current shell session:
%[1]s completion powershell | Out-String | Invoke-Expression
To load completions for every new session, add the output of the above command
to your powershell profile.
`, c.Root().Name()),
Args: NoArgs,
ValidArgsFunction: NoFileCompletions,
RunE: func(cmd *Command, args []string) error {
if noDesc {
return cmd.Root().GenPowerShellCompletion(out)
}
return cmd.Root().GenPowerShellCompletionWithDesc(out)
},
}
if haveNoDescFlag {
powershell.Flags().BoolVar(&noDesc, compCmdNoDescFlagName, compCmdNoDescFlagDefault, compCmdNoDescFlagDesc)
}
completionCmd.AddCommand(bash, zsh, fish, powershell)
}
func findFlag(cmd *Command, name string) *pflag.Flag {
flagSet := cmd.Flags()
if len(name) == 1 {
// First convert the short flag into a long flag
// as the cmd.Flag() search only accepts long flags
if short := flagSet.ShorthandLookup(name); short != nil {
name = short.Name
} else {
set := cmd.InheritedFlags()
if short = set.ShorthandLookup(name); short != nil {
name = short.Name
} else {
return nil
}
}
}
return cmd.Flag(name)
}
// CompDebug prints the specified string to the same file as where the
// completion script prints its logs.
// Note that completion printouts should never be on stdout as they would
// be wrongly interpreted as actual completion choices by the completion script.
func CompDebug(msg string, printToStdErr bool) {
msg = fmt.Sprintf("[Debug] %s", msg)
// Such logs are only printed when the user has set the environment
// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
if path := os.Getenv("BASH_COMP_DEBUG_FILE"); path != "" {
f, err := os.OpenFile(path,
os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err == nil {
defer f.Close()
WriteStringAndCheck(f, msg)
}
}
if printToStdErr {
// Must print to stderr for this not to be read by the completion script.
fmt.Fprint(os.Stderr, msg)
}
}
// CompDebugln prints the specified string with a newline at the end
// to the same file as where the completion script prints its logs.
// Such logs are only printed when the user has set the environment
// variable BASH_COMP_DEBUG_FILE to the path of some file to be used.
func CompDebugln(msg string, printToStdErr bool) {
CompDebug(fmt.Sprintf("%s\n", msg), printToStdErr)
}
// CompError prints the specified completion message to stderr.
func CompError(msg string) {
msg = fmt.Sprintf("[Error] %s", msg)
CompDebug(msg, true)
}
// CompErrorln prints the specified completion message to stderr with a newline at the end.
func CompErrorln(msg string) {
CompError(fmt.Sprintf("%s\n", msg))
}
// These values should not be changed: users will be using them explicitly.
const (
configEnvVarGlobalPrefix = "COBRA"
configEnvVarSuffixDescriptions = "COMPLETION_DESCRIPTIONS"
)
var configEnvVarPrefixSubstRegexp = regexp.MustCompile(`[^A-Z0-9_]`)
// configEnvVar returns the name of the program-specific configuration environment
// variable. It has the format <PROGRAM>_<SUFFIX> where <PROGRAM> is the name of the
// root command in upper case, with all non-ASCII-alphanumeric characters replaced by `_`.
func configEnvVar(name, suffix string) string {
// This format should not be changed: users will be using it explicitly.
v := strings.ToUpper(fmt.Sprintf("%s_%s", name, suffix))
v = configEnvVarPrefixSubstRegexp.ReplaceAllString(v, "_")
return v
}
// getEnvConfig returns the value of the configuration environment variable
// <PROGRAM>_<SUFFIX> where <PROGRAM> is the name of the root command in upper
// case, with all non-ASCII-alphanumeric characters replaced by `_`.
// If the value is empty or not set, the value of the environment variable
// COBRA_<SUFFIX> is returned instead.
func getEnvConfig(cmd *Command, suffix string) string {
v := os.Getenv(configEnvVar(cmd.Root().Name(), suffix))
if v == "" {
v = os.Getenv(configEnvVar(configEnvVarGlobalPrefix, suffix))
}
return v
}

292
vendor/github.com/spf13/cobra/fish_completions.go generated vendored Normal file
View File

@ -0,0 +1,292 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"bytes"
"fmt"
"io"
"os"
"strings"
)
func genFishComp(buf io.StringWriter, name string, includeDesc bool) {
// Variables should not contain a '-' or ':' character
nameForVar := name
nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
compCmd := ShellCompRequestCmd
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
WriteStringAndCheck(buf, fmt.Sprintf("# fish completion for %-36s -*- shell-script -*-\n", name))
WriteStringAndCheck(buf, fmt.Sprintf(`
function __%[1]s_debug
set -l file "$BASH_COMP_DEBUG_FILE"
if test -n "$file"
echo "$argv" >> $file
end
end
function __%[1]s_perform_completion
__%[1]s_debug "Starting __%[1]s_perform_completion"
# Extract all args except the last one
set -l args (commandline -opc)
# Extract the last arg and escape it in case it is a space
set -l lastArg (string escape -- (commandline -ct))
__%[1]s_debug "args: $args"
__%[1]s_debug "last arg: $lastArg"
# Disable ActiveHelp which is not supported for fish shell
set -l requestComp "%[10]s=0 $args[1] %[3]s $args[2..-1] $lastArg"
__%[1]s_debug "Calling $requestComp"
set -l results (eval $requestComp 2> /dev/null)
# Some programs may output extra empty lines after the directive.
# Let's ignore them or else it will break completion.
# Ref: https://github.com/spf13/cobra/issues/1279
for line in $results[-1..1]
if test (string trim -- $line) = ""
# Found an empty line, remove it
set results $results[1..-2]
else
# Found non-empty line, we have our proper output
break
end
end
set -l comps $results[1..-2]
set -l directiveLine $results[-1]
# For Fish, when completing a flag with an = (e.g., <program> -n=<TAB>)
# completions must be prefixed with the flag
set -l flagPrefix (string match -r -- '-.*=' "$lastArg")
__%[1]s_debug "Comps: $comps"
__%[1]s_debug "DirectiveLine: $directiveLine"
__%[1]s_debug "flagPrefix: $flagPrefix"
for comp in $comps
printf "%%s%%s\n" "$flagPrefix" "$comp"
end
printf "%%s\n" "$directiveLine"
end
# this function limits calls to __%[1]s_perform_completion, by caching the result behind $__%[1]s_perform_completion_once_result
function __%[1]s_perform_completion_once
__%[1]s_debug "Starting __%[1]s_perform_completion_once"
if test -n "$__%[1]s_perform_completion_once_result"
__%[1]s_debug "Seems like a valid result already exists, skipping __%[1]s_perform_completion"
return 0
end
set --global __%[1]s_perform_completion_once_result (__%[1]s_perform_completion)
if test -z "$__%[1]s_perform_completion_once_result"
__%[1]s_debug "No completions, probably due to a failure"
return 1
end
__%[1]s_debug "Performed completions and set __%[1]s_perform_completion_once_result"
return 0
end
# this function is used to clear the $__%[1]s_perform_completion_once_result variable after completions are run
function __%[1]s_clear_perform_completion_once_result
__%[1]s_debug ""
__%[1]s_debug "========= clearing previously set __%[1]s_perform_completion_once_result variable =========="
set --erase __%[1]s_perform_completion_once_result
__%[1]s_debug "Successfully erased the variable __%[1]s_perform_completion_once_result"
end
function __%[1]s_requires_order_preservation
__%[1]s_debug ""
__%[1]s_debug "========= checking if order preservation is required =========="
__%[1]s_perform_completion_once
if test -z "$__%[1]s_perform_completion_once_result"
__%[1]s_debug "Error determining if order preservation is required"
return 1
end
set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
__%[1]s_debug "Directive is: $directive"
set -l shellCompDirectiveKeepOrder %[9]d
set -l keeporder (math (math --scale 0 $directive / $shellCompDirectiveKeepOrder) %% 2)
__%[1]s_debug "Keeporder is: $keeporder"
if test $keeporder -ne 0
__%[1]s_debug "This does require order preservation"
return 0
end
__%[1]s_debug "This doesn't require order preservation"
return 1
end
# This function does two things:
# - Obtain the completions and store them in the global __%[1]s_comp_results
# - Return false if file completion should be performed
function __%[1]s_prepare_completions
__%[1]s_debug ""
__%[1]s_debug "========= starting completion logic =========="
# Start fresh
set --erase __%[1]s_comp_results
__%[1]s_perform_completion_once
__%[1]s_debug "Completion results: $__%[1]s_perform_completion_once_result"
if test -z "$__%[1]s_perform_completion_once_result"
__%[1]s_debug "No completion, probably due to a failure"
# Might as well do file completion, in case it helps
return 1
end
set -l directive (string sub --start 2 $__%[1]s_perform_completion_once_result[-1])
set --global __%[1]s_comp_results $__%[1]s_perform_completion_once_result[1..-2]
__%[1]s_debug "Completions are: $__%[1]s_comp_results"
__%[1]s_debug "Directive is: $directive"
set -l shellCompDirectiveError %[4]d
set -l shellCompDirectiveNoSpace %[5]d
set -l shellCompDirectiveNoFileComp %[6]d
set -l shellCompDirectiveFilterFileExt %[7]d
set -l shellCompDirectiveFilterDirs %[8]d
if test -z "$directive"
set directive 0
end
set -l compErr (math (math --scale 0 $directive / $shellCompDirectiveError) %% 2)
if test $compErr -eq 1
__%[1]s_debug "Received error directive: aborting."
# Might as well do file completion, in case it helps
return 1
end
set -l filefilter (math (math --scale 0 $directive / $shellCompDirectiveFilterFileExt) %% 2)
set -l dirfilter (math (math --scale 0 $directive / $shellCompDirectiveFilterDirs) %% 2)
if test $filefilter -eq 1; or test $dirfilter -eq 1
__%[1]s_debug "File extension filtering or directory filtering not supported"
# Do full file completion instead
return 1
end
set -l nospace (math (math --scale 0 $directive / $shellCompDirectiveNoSpace) %% 2)
set -l nofiles (math (math --scale 0 $directive / $shellCompDirectiveNoFileComp) %% 2)
__%[1]s_debug "nospace: $nospace, nofiles: $nofiles"
# If we want to prevent a space, or if file completion is NOT disabled,
# we need to count the number of valid completions.
# To do so, we will filter on prefix as the completions we have received
# may not already be filtered so as to allow fish to match on different
# criteria than the prefix.
if test $nospace -ne 0; or test $nofiles -eq 0
set -l prefix (commandline -t | string escape --style=regex)
__%[1]s_debug "prefix: $prefix"
set -l completions (string match -r -- "^$prefix.*" $__%[1]s_comp_results)
set --global __%[1]s_comp_results $completions
__%[1]s_debug "Filtered completions are: $__%[1]s_comp_results"
# Important not to quote the variable for count to work
set -l numComps (count $__%[1]s_comp_results)
__%[1]s_debug "numComps: $numComps"
if test $numComps -eq 1; and test $nospace -ne 0
# We must first split on \t to get rid of the descriptions to be
# able to check what the actual completion will be.
# We don't need descriptions anyway since there is only a single
# real completion which the shell will expand immediately.
set -l split (string split --max 1 \t $__%[1]s_comp_results[1])
# Fish won't add a space if the completion ends with any
# of the following characters: @=/:.,
set -l lastChar (string sub -s -1 -- $split)
if not string match -r -q "[@=/:.,]" -- "$lastChar"
# In other cases, to support the "nospace" directive we trick the shell
# by outputting an extra, longer completion.
__%[1]s_debug "Adding second completion to perform nospace directive"
set --global __%[1]s_comp_results $split[1] $split[1].
__%[1]s_debug "Completions are now: $__%[1]s_comp_results"
end
end
if test $numComps -eq 0; and test $nofiles -eq 0
# To be consistent with bash and zsh, we only trigger file
# completion when there are no other completions
__%[1]s_debug "Requesting file completion"
return 1
end
end
return 0
end
# Since Fish completions are only loaded once the user triggers them, we trigger them ourselves
# so we can properly delete any completions provided by another script.
# Only do this if the program can be found, or else fish may print some errors; besides,
# the existing completions will only be loaded if the program can be found.
if type -q "%[2]s"
# The space after the program name is essential to trigger completion for the program
# and not completion of the program name itself.
# Also, we use '> /dev/null 2>&1' since '&>' is not supported in older versions of fish.
complete --do-complete "%[2]s " > /dev/null 2>&1
end
# Remove any pre-existing completions for the program since we will be handling all of them.
complete -c %[2]s -e
# this will get called after the two calls below and clear the $__%[1]s_perform_completion_once_result global
complete -c %[2]s -n '__%[1]s_clear_perform_completion_once_result'
# The call to __%[1]s_prepare_completions will setup __%[1]s_comp_results
# which provides the program's completion choices.
# If this doesn't require order preservation, we don't use the -k flag
complete -c %[2]s -n 'not __%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
# otherwise we use the -k flag
complete -k -c %[2]s -n '__%[1]s_requires_order_preservation && __%[1]s_prepare_completions' -f -a '$__%[1]s_comp_results'
`, nameForVar, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
}
// GenFishCompletion generates fish completion file and writes to the passed writer.
func (c *Command) GenFishCompletion(w io.Writer, includeDesc bool) error {
buf := new(bytes.Buffer)
genFishComp(buf, c.Name(), includeDesc)
_, err := buf.WriteTo(w)
return err
}
// GenFishCompletionFile generates fish completion file.
func (c *Command) GenFishCompletionFile(filename string, includeDesc bool) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.GenFishCompletion(outFile, includeDesc)
}

290
vendor/github.com/spf13/cobra/flag_groups.go generated vendored Normal file
View File

@ -0,0 +1,290 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"fmt"
"sort"
"strings"
flag "github.com/spf13/pflag"
)
const (
requiredAsGroupAnnotation = "cobra_annotation_required_if_others_set"
oneRequiredAnnotation = "cobra_annotation_one_required"
mutuallyExclusiveAnnotation = "cobra_annotation_mutually_exclusive"
)
// MarkFlagsRequiredTogether marks the given flags with annotations so that Cobra errors
// if the command is invoked with a subset (but not all) of the given flags.
func (c *Command) MarkFlagsRequiredTogether(flagNames ...string) {
c.mergePersistentFlags()
for _, v := range flagNames {
f := c.Flags().Lookup(v)
if f == nil {
panic(fmt.Sprintf("Failed to find flag %q and mark it as being required in a flag group", v))
}
if err := c.Flags().SetAnnotation(v, requiredAsGroupAnnotation, append(f.Annotations[requiredAsGroupAnnotation], strings.Join(flagNames, " "))); err != nil {
// Only errs if the flag isn't found.
panic(err)
}
}
}
// MarkFlagsOneRequired marks the given flags with annotations so that Cobra errors
// if the command is invoked without at least one flag from the given set of flags.
func (c *Command) MarkFlagsOneRequired(flagNames ...string) {
c.mergePersistentFlags()
for _, v := range flagNames {
f := c.Flags().Lookup(v)
if f == nil {
panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a one-required flag group", v))
}
if err := c.Flags().SetAnnotation(v, oneRequiredAnnotation, append(f.Annotations[oneRequiredAnnotation], strings.Join(flagNames, " "))); err != nil {
// Only errs if the flag isn't found.
panic(err)
}
}
}
// MarkFlagsMutuallyExclusive marks the given flags with annotations so that Cobra errors
// if the command is invoked with more than one flag from the given set of flags.
func (c *Command) MarkFlagsMutuallyExclusive(flagNames ...string) {
c.mergePersistentFlags()
for _, v := range flagNames {
f := c.Flags().Lookup(v)
if f == nil {
panic(fmt.Sprintf("Failed to find flag %q and mark it as being in a mutually exclusive flag group", v))
}
// Each time this is called is a single new entry; this allows it to be a member of multiple groups if needed.
if err := c.Flags().SetAnnotation(v, mutuallyExclusiveAnnotation, append(f.Annotations[mutuallyExclusiveAnnotation], strings.Join(flagNames, " "))); err != nil {
panic(err)
}
}
}
// ValidateFlagGroups validates the mutuallyExclusive/oneRequired/requiredAsGroup logic and returns the
// first error encountered.
func (c *Command) ValidateFlagGroups() error {
if c.DisableFlagParsing {
return nil
}
flags := c.Flags()
// groupStatus format is the list of flags as a unique ID,
// then a map of each flag name and whether it is set or not.
groupStatus := map[string]map[string]bool{}
oneRequiredGroupStatus := map[string]map[string]bool{}
mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
flags.VisitAll(func(pflag *flag.Flag) {
processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus)
processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus)
processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus)
})
if err := validateRequiredFlagGroups(groupStatus); err != nil {
return err
}
if err := validateOneRequiredFlagGroups(oneRequiredGroupStatus); err != nil {
return err
}
if err := validateExclusiveFlagGroups(mutuallyExclusiveGroupStatus); err != nil {
return err
}
return nil
}
func hasAllFlags(fs *flag.FlagSet, flagnames ...string) bool {
for _, fname := range flagnames {
f := fs.Lookup(fname)
if f == nil {
return false
}
}
return true
}
func processFlagForGroupAnnotation(flags *flag.FlagSet, pflag *flag.Flag, annotation string, groupStatus map[string]map[string]bool) {
groupInfo, found := pflag.Annotations[annotation]
if found {
for _, group := range groupInfo {
if groupStatus[group] == nil {
flagnames := strings.Split(group, " ")
// Only consider this flag group at all if all the flags are defined.
if !hasAllFlags(flags, flagnames...) {
continue
}
groupStatus[group] = make(map[string]bool, len(flagnames))
for _, name := range flagnames {
groupStatus[group][name] = false
}
}
groupStatus[group][pflag.Name] = pflag.Changed
}
}
}
func validateRequiredFlagGroups(data map[string]map[string]bool) error {
keys := sortedKeys(data)
for _, flagList := range keys {
flagnameAndStatus := data[flagList]
unset := []string{}
for flagname, isSet := range flagnameAndStatus {
if !isSet {
unset = append(unset, flagname)
}
}
if len(unset) == len(flagnameAndStatus) || len(unset) == 0 {
continue
}
// Sort values, so they can be tested/scripted against consistently.
sort.Strings(unset)
return fmt.Errorf("if any flags in the group [%v] are set they must all be set; missing %v", flagList, unset)
}
return nil
}
func validateOneRequiredFlagGroups(data map[string]map[string]bool) error {
keys := sortedKeys(data)
for _, flagList := range keys {
flagnameAndStatus := data[flagList]
var set []string
for flagname, isSet := range flagnameAndStatus {
if isSet {
set = append(set, flagname)
}
}
if len(set) >= 1 {
continue
}
// Sort values, so they can be tested/scripted against consistently.
sort.Strings(set)
return fmt.Errorf("at least one of the flags in the group [%v] is required", flagList)
}
return nil
}
func validateExclusiveFlagGroups(data map[string]map[string]bool) error {
keys := sortedKeys(data)
for _, flagList := range keys {
flagnameAndStatus := data[flagList]
var set []string
for flagname, isSet := range flagnameAndStatus {
if isSet {
set = append(set, flagname)
}
}
if len(set) == 0 || len(set) == 1 {
continue
}
// Sort values, so they can be tested/scripted against consistently.
sort.Strings(set)
return fmt.Errorf("if any flags in the group [%v] are set none of the others can be; %v were all set", flagList, set)
}
return nil
}
func sortedKeys(m map[string]map[string]bool) []string {
keys := make([]string, len(m))
i := 0
for k := range m {
keys[i] = k
i++
}
sort.Strings(keys)
return keys
}
// enforceFlagGroupsForCompletion will do the following:
// - when a flag in a group is present, other flags in the group will be marked required
// - when none of the flags in a one-required group are present, all flags in the group will be marked required
// - when a flag in a mutually exclusive group is present, other flags in the group will be marked as hidden
// This allows the standard completion logic to behave appropriately for flag groups
func (c *Command) enforceFlagGroupsForCompletion() {
if c.DisableFlagParsing {
return
}
flags := c.Flags()
groupStatus := map[string]map[string]bool{}
oneRequiredGroupStatus := map[string]map[string]bool{}
mutuallyExclusiveGroupStatus := map[string]map[string]bool{}
c.Flags().VisitAll(func(pflag *flag.Flag) {
processFlagForGroupAnnotation(flags, pflag, requiredAsGroupAnnotation, groupStatus)
processFlagForGroupAnnotation(flags, pflag, oneRequiredAnnotation, oneRequiredGroupStatus)
processFlagForGroupAnnotation(flags, pflag, mutuallyExclusiveAnnotation, mutuallyExclusiveGroupStatus)
})
// If a flag that is part of a group is present, we make all the other flags
// of that group required so that the shell completion suggests them automatically
for flagList, flagnameAndStatus := range groupStatus {
for _, isSet := range flagnameAndStatus {
if isSet {
// One of the flags of the group is set, mark the other ones as required
for _, fName := range strings.Split(flagList, " ") {
_ = c.MarkFlagRequired(fName)
}
}
}
}
// If none of the flags of a one-required group are present, we make all the flags
// of that group required so that the shell completion suggests them automatically
for flagList, flagnameAndStatus := range oneRequiredGroupStatus {
isSet := false
for _, isSet = range flagnameAndStatus {
if isSet {
break
}
}
// None of the flags of the group are set, mark all flags in the group
// as required
if !isSet {
for _, fName := range strings.Split(flagList, " ") {
_ = c.MarkFlagRequired(fName)
}
}
}
// If a flag that is mutually exclusive to others is present, we hide the other
// flags of that group so the shell completion does not suggest them
for flagList, flagnameAndStatus := range mutuallyExclusiveGroupStatus {
for flagName, isSet := range flagnameAndStatus {
if isSet {
// One of the flags of the mutually exclusive group is set, mark the other ones as hidden
// Don't mark the flag that is already set as hidden because it may be an
// array or slice flag and therefore must continue being suggested
for _, fName := range strings.Split(flagList, " ") {
if fName != flagName {
flag := c.Flags().Lookup(fName)
flag.Hidden = true
}
}
}
}
}
}

325
vendor/github.com/spf13/cobra/powershell_completions.go generated vendored Normal file
View File

@ -0,0 +1,325 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The generated scripts require PowerShell v5.0+ (which comes Windows 10, but
// can be downloaded separately for windows 7 or 8.1).
package cobra
import (
"bytes"
"fmt"
"io"
"os"
"strings"
)
func genPowerShellComp(buf io.StringWriter, name string, includeDesc bool) {
// Variables should not contain a '-' or ':' character
nameForVar := name
nameForVar = strings.ReplaceAll(nameForVar, "-", "_")
nameForVar = strings.ReplaceAll(nameForVar, ":", "_")
compCmd := ShellCompRequestCmd
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
WriteStringAndCheck(buf, fmt.Sprintf(`# powershell completion for %-36[1]s -*- shell-script -*-
function __%[1]s_debug {
if ($env:BASH_COMP_DEBUG_FILE) {
"$args" | Out-File -Append -FilePath "$env:BASH_COMP_DEBUG_FILE"
}
}
filter __%[1]s_escapeStringWithSpecialChars {
`+" $_ -replace '\\s|#|@|\\$|;|,|''|\\{|\\}|\\(|\\)|\"|`|\\||<|>|&','`$&'"+`
}
[scriptblock]${__%[2]sCompleterBlock} = {
param(
$WordToComplete,
$CommandAst,
$CursorPosition
)
# Get the current command line and convert into a string
$Command = $CommandAst.CommandElements
$Command = "$Command"
__%[1]s_debug ""
__%[1]s_debug "========= starting completion logic =========="
__%[1]s_debug "WordToComplete: $WordToComplete Command: $Command CursorPosition: $CursorPosition"
# The user could have moved the cursor backwards on the command-line.
# We need to trigger completion from the $CursorPosition location, so we need
# to truncate the command-line ($Command) up to the $CursorPosition location.
# Make sure the $Command is longer then the $CursorPosition before we truncate.
# This happens because the $Command does not include the last space.
if ($Command.Length -gt $CursorPosition) {
$Command=$Command.Substring(0,$CursorPosition)
}
__%[1]s_debug "Truncated command: $Command"
$ShellCompDirectiveError=%[4]d
$ShellCompDirectiveNoSpace=%[5]d
$ShellCompDirectiveNoFileComp=%[6]d
$ShellCompDirectiveFilterFileExt=%[7]d
$ShellCompDirectiveFilterDirs=%[8]d
$ShellCompDirectiveKeepOrder=%[9]d
# Prepare the command to request completions for the program.
# Split the command at the first space to separate the program and arguments.
$Program,$Arguments = $Command.Split(" ",2)
$RequestComp="$Program %[3]s $Arguments"
__%[1]s_debug "RequestComp: $RequestComp"
# we cannot use $WordToComplete because it
# has the wrong values if the cursor was moved
# so use the last argument
if ($WordToComplete -ne "" ) {
$WordToComplete = $Arguments.Split(" ")[-1]
}
__%[1]s_debug "New WordToComplete: $WordToComplete"
# Check for flag with equal sign
$IsEqualFlag = ($WordToComplete -Like "--*=*" )
if ( $IsEqualFlag ) {
__%[1]s_debug "Completing equal sign flag"
# Remove the flag part
$Flag,$WordToComplete = $WordToComplete.Split("=",2)
}
if ( $WordToComplete -eq "" -And ( -Not $IsEqualFlag )) {
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go method.
__%[1]s_debug "Adding extra empty parameter"
# PowerShell 7.2+ changed the way how the arguments are passed to executables,
# so for pre-7.2 or when Legacy argument passing is enabled we need to use
`+" # `\"`\" to pass an empty argument, a \"\" or '' does not work!!!"+`
if ($PSVersionTable.PsVersion -lt [version]'7.2.0' -or
($PSVersionTable.PsVersion -lt [version]'7.3.0' -and -not [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -or
(($PSVersionTable.PsVersion -ge [version]'7.3.0' -or [ExperimentalFeature]::IsEnabled("PSNativeCommandArgumentPassing")) -and
$PSNativeCommandArgumentPassing -eq 'Legacy')) {
`+" $RequestComp=\"$RequestComp\" + ' `\"`\"'"+`
} else {
$RequestComp="$RequestComp" + ' ""'
}
}
__%[1]s_debug "Calling $RequestComp"
# First disable ActiveHelp which is not supported for Powershell
${env:%[10]s}=0
#call the command store the output in $out and redirect stderr and stdout to null
# $Out is an array contains each line per element
Invoke-Expression -OutVariable out "$RequestComp" 2>&1 | Out-Null
# get directive from last line
[int]$Directive = $Out[-1].TrimStart(':')
if ($Directive -eq "") {
# There is no directive specified
$Directive = 0
}
__%[1]s_debug "The completion directive is: $Directive"
# remove directive (last element) from out
$Out = $Out | Where-Object { $_ -ne $Out[-1] }
__%[1]s_debug "The completions are: $Out"
if (($Directive -band $ShellCompDirectiveError) -ne 0 ) {
# Error code. No completion.
__%[1]s_debug "Received error from custom completion go code"
return
}
$Longest = 0
[Array]$Values = $Out | ForEach-Object {
#Split the output in name and description
`+" $Name, $Description = $_.Split(\"`t\",2)"+`
__%[1]s_debug "Name: $Name Description: $Description"
# Look for the longest completion so that we can format things nicely
if ($Longest -lt $Name.Length) {
$Longest = $Name.Length
}
# Set the description to a one space string if there is none set.
# This is needed because the CompletionResult does not accept an empty string as argument
if (-Not $Description) {
$Description = " "
}
@{Name="$Name";Description="$Description"}
}
$Space = " "
if (($Directive -band $ShellCompDirectiveNoSpace) -ne 0 ) {
# remove the space here
__%[1]s_debug "ShellCompDirectiveNoSpace is called"
$Space = ""
}
if ((($Directive -band $ShellCompDirectiveFilterFileExt) -ne 0 ) -or
(($Directive -band $ShellCompDirectiveFilterDirs) -ne 0 )) {
__%[1]s_debug "ShellCompDirectiveFilterFileExt ShellCompDirectiveFilterDirs are not supported"
# return here to prevent the completion of the extensions
return
}
$Values = $Values | Where-Object {
# filter the result
$_.Name -like "$WordToComplete*"
# Join the flag back if we have an equal sign flag
if ( $IsEqualFlag ) {
__%[1]s_debug "Join the equal sign flag back to the completion value"
$_.Name = $Flag + "=" + $_.Name
}
}
# we sort the values in ascending order by name if keep order isn't passed
if (($Directive -band $ShellCompDirectiveKeepOrder) -eq 0 ) {
$Values = $Values | Sort-Object -Property Name
}
if (($Directive -band $ShellCompDirectiveNoFileComp) -ne 0 ) {
__%[1]s_debug "ShellCompDirectiveNoFileComp is called"
if ($Values.Length -eq 0) {
# Just print an empty string here so the
# shell does not start to complete paths.
# We cannot use CompletionResult here because
# it does not accept an empty string as argument.
""
return
}
}
# Get the current mode
$Mode = (Get-PSReadLineKeyHandler | Where-Object {$_.Key -eq "Tab" }).Function
__%[1]s_debug "Mode: $Mode"
$Values | ForEach-Object {
# store temporary because switch will overwrite $_
$comp = $_
# PowerShell supports three different completion modes
# - TabCompleteNext (default windows style - on each key press the next option is displayed)
# - Complete (works like bash)
# - MenuComplete (works like zsh)
# You set the mode with Set-PSReadLineKeyHandler -Key Tab -Function <mode>
# CompletionResult Arguments:
# 1) CompletionText text to be used as the auto completion result
# 2) ListItemText text to be displayed in the suggestion list
# 3) ResultType type of completion result
# 4) ToolTip text for the tooltip with details about the object
switch ($Mode) {
# bash like
"Complete" {
if ($Values.Length -eq 1) {
__%[1]s_debug "Only one completion left"
# insert space after value
[System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
} else {
# Add the proper number of spaces to align the descriptions
while($comp.Name.Length -lt $Longest) {
$comp.Name = $comp.Name + " "
}
# Check for empty description and only add parentheses if needed
if ($($comp.Description) -eq " " ) {
$Description = ""
} else {
$Description = " ($($comp.Description))"
}
[System.Management.Automation.CompletionResult]::new("$($comp.Name)$Description", "$($comp.Name)$Description", 'ParameterValue', "$($comp.Description)")
}
}
# zsh like
"MenuComplete" {
# insert space after value
# MenuComplete will automatically show the ToolTip of
# the highlighted value at the bottom of the suggestions.
[System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars) + $Space, "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
}
# TabCompleteNext and in case we get something unknown
Default {
# Like MenuComplete but we don't want to add a space here because
# the user need to press space anyway to get the completion.
# Description will not be shown because that's not possible with TabCompleteNext
[System.Management.Automation.CompletionResult]::new($($comp.Name | __%[1]s_escapeStringWithSpecialChars), "$($comp.Name)", 'ParameterValue', "$($comp.Description)")
}
}
}
}
Register-ArgumentCompleter -CommandName '%[1]s' -ScriptBlock ${__%[2]sCompleterBlock}
`, name, nameForVar, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder, activeHelpEnvVar(name)))
}
func (c *Command) genPowerShellCompletion(w io.Writer, includeDesc bool) error {
buf := new(bytes.Buffer)
genPowerShellComp(buf, c.Name(), includeDesc)
_, err := buf.WriteTo(w)
return err
}
func (c *Command) genPowerShellCompletionFile(filename string, includeDesc bool) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.genPowerShellCompletion(outFile, includeDesc)
}
// GenPowerShellCompletionFile generates powershell completion file without descriptions.
func (c *Command) GenPowerShellCompletionFile(filename string) error {
return c.genPowerShellCompletionFile(filename, false)
}
// GenPowerShellCompletion generates powershell completion file without descriptions
// and writes it to the passed writer.
func (c *Command) GenPowerShellCompletion(w io.Writer) error {
return c.genPowerShellCompletion(w, false)
}
// GenPowerShellCompletionFileWithDesc generates powershell completion file with descriptions.
func (c *Command) GenPowerShellCompletionFileWithDesc(filename string) error {
return c.genPowerShellCompletionFile(filename, true)
}
// GenPowerShellCompletionWithDesc generates powershell completion file with descriptions
// and writes it to the passed writer.
func (c *Command) GenPowerShellCompletionWithDesc(w io.Writer) error {
return c.genPowerShellCompletion(w, true)
}

98
vendor/github.com/spf13/cobra/shell_completions.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"github.com/spf13/pflag"
)
// MarkFlagRequired instructs the various shell completion implementations to
// prioritize the named flag when performing completion,
// and causes your command to report an error if invoked without the flag.
func (c *Command) MarkFlagRequired(name string) error {
return MarkFlagRequired(c.Flags(), name)
}
// MarkPersistentFlagRequired instructs the various shell completion implementations to
// prioritize the named persistent flag when performing completion,
// and causes your command to report an error if invoked without the flag.
func (c *Command) MarkPersistentFlagRequired(name string) error {
return MarkFlagRequired(c.PersistentFlags(), name)
}
// MarkFlagRequired instructs the various shell completion implementations to
// prioritize the named flag when performing completion,
// and causes your command to report an error if invoked without the flag.
func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
}
// MarkFlagFilename instructs the various shell completion implementations to
// limit completions for the named flag to the specified file extensions.
func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(c.Flags(), name, extensions...)
}
// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
// The bash completion script will call the bash function f for the flag.
//
// This will only work for bash completion.
// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
// to register a Go function which will work across all shells.
func (c *Command) MarkFlagCustom(name string, f string) error {
return MarkFlagCustom(c.Flags(), name, f)
}
// MarkPersistentFlagFilename instructs the various shell completion
// implementations to limit completions for the named persistent flag to the
// specified file extensions.
func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
}
// MarkFlagFilename instructs the various shell completion implementations to
// limit completions for the named flag to the specified file extensions.
func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
}
// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
// The bash completion script will call the bash function f for the flag.
//
// This will only work for bash completion.
// It is recommended to instead use c.RegisterFlagCompletionFunc(...) which allows
// to register a Go function which will work across all shells.
func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
return flags.SetAnnotation(name, BashCompCustom, []string{f})
}
// MarkFlagDirname instructs the various shell completion implementations to
// limit completions for the named flag to directory names.
func (c *Command) MarkFlagDirname(name string) error {
return MarkFlagDirname(c.Flags(), name)
}
// MarkPersistentFlagDirname instructs the various shell completion
// implementations to limit completions for the named persistent flag to
// directory names.
func (c *Command) MarkPersistentFlagDirname(name string) error {
return MarkFlagDirname(c.PersistentFlags(), name)
}
// MarkFlagDirname instructs the various shell completion implementations to
// limit completions for the named flag to directory names.
func MarkFlagDirname(flags *pflag.FlagSet, name string) error {
return flags.SetAnnotation(name, BashCompSubdirsInDir, []string{})
}

308
vendor/github.com/spf13/cobra/zsh_completions.go generated vendored Normal file
View File

@ -0,0 +1,308 @@
// Copyright 2013-2023 The Cobra Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cobra
import (
"bytes"
"fmt"
"io"
"os"
)
// GenZshCompletionFile generates zsh completion file including descriptions.
func (c *Command) GenZshCompletionFile(filename string) error {
return c.genZshCompletionFile(filename, true)
}
// GenZshCompletion generates zsh completion file including descriptions
// and writes it to the passed writer.
func (c *Command) GenZshCompletion(w io.Writer) error {
return c.genZshCompletion(w, true)
}
// GenZshCompletionFileNoDesc generates zsh completion file without descriptions.
func (c *Command) GenZshCompletionFileNoDesc(filename string) error {
return c.genZshCompletionFile(filename, false)
}
// GenZshCompletionNoDesc generates zsh completion file without descriptions
// and writes it to the passed writer.
func (c *Command) GenZshCompletionNoDesc(w io.Writer) error {
return c.genZshCompletion(w, false)
}
// MarkZshCompPositionalArgumentFile only worked for zsh and its behavior was
// not consistent with Bash completion. It has therefore been disabled.
// Instead, when no other completion is specified, file completion is done by
// default for every argument. One can disable file completion on a per-argument
// basis by using ValidArgsFunction and ShellCompDirectiveNoFileComp.
// To achieve file extension filtering, one can use ValidArgsFunction and
// ShellCompDirectiveFilterFileExt.
//
// Deprecated
func (c *Command) MarkZshCompPositionalArgumentFile(argPosition int, patterns ...string) error {
return nil
}
// MarkZshCompPositionalArgumentWords only worked for zsh. It has therefore
// been disabled.
// To achieve the same behavior across all shells, one can use
// ValidArgs (for the first argument only) or ValidArgsFunction for
// any argument (can include the first one also).
//
// Deprecated
func (c *Command) MarkZshCompPositionalArgumentWords(argPosition int, words ...string) error {
return nil
}
func (c *Command) genZshCompletionFile(filename string, includeDesc bool) error {
outFile, err := os.Create(filename)
if err != nil {
return err
}
defer outFile.Close()
return c.genZshCompletion(outFile, includeDesc)
}
func (c *Command) genZshCompletion(w io.Writer, includeDesc bool) error {
buf := new(bytes.Buffer)
genZshComp(buf, c.Name(), includeDesc)
_, err := buf.WriteTo(w)
return err
}
func genZshComp(buf io.StringWriter, name string, includeDesc bool) {
compCmd := ShellCompRequestCmd
if !includeDesc {
compCmd = ShellCompNoDescRequestCmd
}
WriteStringAndCheck(buf, fmt.Sprintf(`#compdef %[1]s
compdef _%[1]s %[1]s
# zsh completion for %-36[1]s -*- shell-script -*-
__%[1]s_debug()
{
local file="$BASH_COMP_DEBUG_FILE"
if [[ -n ${file} ]]; then
echo "$*" >> "${file}"
fi
}
_%[1]s()
{
local shellCompDirectiveError=%[3]d
local shellCompDirectiveNoSpace=%[4]d
local shellCompDirectiveNoFileComp=%[5]d
local shellCompDirectiveFilterFileExt=%[6]d
local shellCompDirectiveFilterDirs=%[7]d
local shellCompDirectiveKeepOrder=%[8]d
local lastParam lastChar flagPrefix requestComp out directive comp lastComp noSpace keepOrder
local -a completions
__%[1]s_debug "\n========= starting completion logic =========="
__%[1]s_debug "CURRENT: ${CURRENT}, words[*]: ${words[*]}"
# The user could have moved the cursor backwards on the command-line.
# We need to trigger completion from the $CURRENT location, so we need
# to truncate the command-line ($words) up to the $CURRENT location.
# (We cannot use $CURSOR as its value does not work when a command is an alias.)
words=("${=words[1,CURRENT]}")
__%[1]s_debug "Truncated words[*]: ${words[*]},"
lastParam=${words[-1]}
lastChar=${lastParam[-1]}
__%[1]s_debug "lastParam: ${lastParam}, lastChar: ${lastChar}"
# For zsh, when completing a flag with an = (e.g., %[1]s -n=<TAB>)
# completions must be prefixed with the flag
setopt local_options BASH_REMATCH
if [[ "${lastParam}" =~ '-.*=' ]]; then
# We are dealing with a flag with an =
flagPrefix="-P ${BASH_REMATCH}"
fi
# Prepare the command to obtain completions
requestComp="${words[1]} %[2]s ${words[2,-1]}"
if [ "${lastChar}" = "" ]; then
# If the last parameter is complete (there is a space following it)
# We add an extra empty parameter so we can indicate this to the go completion code.
__%[1]s_debug "Adding extra empty parameter"
requestComp="${requestComp} \"\""
fi
__%[1]s_debug "About to call: eval ${requestComp}"
# Use eval to handle any environment variables and such
out=$(eval ${requestComp} 2>/dev/null)
__%[1]s_debug "completion output: ${out}"
# Extract the directive integer following a : from the last line
local lastLine
while IFS='\n' read -r line; do
lastLine=${line}
done < <(printf "%%s\n" "${out[@]}")
__%[1]s_debug "last line: ${lastLine}"
if [ "${lastLine[1]}" = : ]; then
directive=${lastLine[2,-1]}
# Remove the directive including the : and the newline
local suffix
(( suffix=${#lastLine}+2))
out=${out[1,-$suffix]}
else
# There is no directive specified. Leave $out as is.
__%[1]s_debug "No directive found. Setting do default"
directive=0
fi
__%[1]s_debug "directive: ${directive}"
__%[1]s_debug "completions: ${out}"
__%[1]s_debug "flagPrefix: ${flagPrefix}"
if [ $((directive & shellCompDirectiveError)) -ne 0 ]; then
__%[1]s_debug "Completion received error. Ignoring completions."
return
fi
local activeHelpMarker="%[9]s"
local endIndex=${#activeHelpMarker}
local startIndex=$((${#activeHelpMarker}+1))
local hasActiveHelp=0
while IFS='\n' read -r comp; do
# Check if this is an activeHelp statement (i.e., prefixed with $activeHelpMarker)
if [ "${comp[1,$endIndex]}" = "$activeHelpMarker" ];then
__%[1]s_debug "ActiveHelp found: $comp"
comp="${comp[$startIndex,-1]}"
if [ -n "$comp" ]; then
compadd -x "${comp}"
__%[1]s_debug "ActiveHelp will need delimiter"
hasActiveHelp=1
fi
continue
fi
if [ -n "$comp" ]; then
# If requested, completions are returned with a description.
# The description is preceded by a TAB character.
# For zsh's _describe, we need to use a : instead of a TAB.
# We first need to escape any : as part of the completion itself.
comp=${comp//:/\\:}
local tab="$(printf '\t')"
comp=${comp//$tab/:}
__%[1]s_debug "Adding completion: ${comp}"
completions+=${comp}
lastComp=$comp
fi
done < <(printf "%%s\n" "${out[@]}")
# Add a delimiter after the activeHelp statements, but only if:
# - there are completions following the activeHelp statements, or
# - file completion will be performed (so there will be choices after the activeHelp)
if [ $hasActiveHelp -eq 1 ]; then
if [ ${#completions} -ne 0 ] || [ $((directive & shellCompDirectiveNoFileComp)) -eq 0 ]; then
__%[1]s_debug "Adding activeHelp delimiter"
compadd -x "--"
hasActiveHelp=0
fi
fi
if [ $((directive & shellCompDirectiveNoSpace)) -ne 0 ]; then
__%[1]s_debug "Activating nospace."
noSpace="-S ''"
fi
if [ $((directive & shellCompDirectiveKeepOrder)) -ne 0 ]; then
__%[1]s_debug "Activating keep order."
keepOrder="-V"
fi
if [ $((directive & shellCompDirectiveFilterFileExt)) -ne 0 ]; then
# File extension filtering
local filteringCmd
filteringCmd='_files'
for filter in ${completions[@]}; do
if [ ${filter[1]} != '*' ]; then
# zsh requires a glob pattern to do file filtering
filter="\*.$filter"
fi
filteringCmd+=" -g $filter"
done
filteringCmd+=" ${flagPrefix}"
__%[1]s_debug "File filtering command: $filteringCmd"
_arguments '*:filename:'"$filteringCmd"
elif [ $((directive & shellCompDirectiveFilterDirs)) -ne 0 ]; then
# File completion for directories only
local subdir
subdir="${completions[1]}"
if [ -n "$subdir" ]; then
__%[1]s_debug "Listing directories in $subdir"
pushd "${subdir}" >/dev/null 2>&1
else
__%[1]s_debug "Listing directories in ."
fi
local result
_arguments '*:dirname:_files -/'" ${flagPrefix}"
result=$?
if [ -n "$subdir" ]; then
popd >/dev/null 2>&1
fi
return $result
else
__%[1]s_debug "Calling _describe"
if eval _describe $keepOrder "completions" completions $flagPrefix $noSpace; then
__%[1]s_debug "_describe found some completions"
# Return the success of having called _describe
return 0
else
__%[1]s_debug "_describe did not find completions."
__%[1]s_debug "Checking if we should do file completion."
if [ $((directive & shellCompDirectiveNoFileComp)) -ne 0 ]; then
__%[1]s_debug "deactivating file completion"
# We must return an error code here to let zsh know that there were no
# completions found by _describe; this is what will trigger other
# matching algorithms to attempt to find completions.
# For example zsh can match letters in the middle of words.
return 1
else
# Perform file completion
__%[1]s_debug "Activating file completion"
# We must return the result of this command, so it must be the
# last command, or else we must store its result to return it.
_arguments '*:filename:_files'" ${flagPrefix}"
fi
fi
fi
}
# don't run the completion function when being source-ed or eval-ed
if [ "$funcstack[1]" = "_%[1]s" ]; then
_%[1]s
fi
`, name, compCmd,
ShellCompDirectiveError, ShellCompDirectiveNoSpace, ShellCompDirectiveNoFileComp,
ShellCompDirectiveFilterFileExt, ShellCompDirectiveFilterDirs, ShellCompDirectiveKeepOrder,
activeHelpMarker))
}

17
vendor/go.yaml.in/yaml/v2/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,17 @@
language: go
go:
- "1.4.x"
- "1.5.x"
- "1.6.x"
- "1.7.x"
- "1.8.x"
- "1.9.x"
- "1.10.x"
- "1.11.x"
- "1.12.x"
- "1.13.x"
- "1.14.x"
- "tip"
go_import_path: gopkg.in/yaml.v2

201
vendor/go.yaml.in/yaml/v2/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

31
vendor/go.yaml.in/yaml/v2/LICENSE.libyaml generated vendored Normal file
View File

@ -0,0 +1,31 @@
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original copyright and license:
apic.go
emitterc.go
parserc.go
readerc.go
scannerc.go
writerc.go
yamlh.go
yamlprivateh.go
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

13
vendor/go.yaml.in/yaml/v2/NOTICE generated vendored Normal file
View File

@ -0,0 +1,13 @@
Copyright 2011-2016 Canonical Ltd.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

131
vendor/go.yaml.in/yaml/v2/README.md generated vendored Normal file
View File

@ -0,0 +1,131 @@
# YAML support for the Go language
Introduction
------------
The yaml package enables Go programs to comfortably encode and decode YAML
values. It was developed within [Canonical](https://www.canonical.com) as
part of the [juju](https://juju.ubuntu.com) project, and is based on a
pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
C library to parse and generate YAML data quickly and reliably.
Compatibility
-------------
The yaml package supports most of YAML 1.1 and 1.2, including support for
anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
implemented, and base-60 floats from YAML 1.1 are purposefully not
supported since they're a poor design and are gone in YAML 1.2.
Installation and usage
----------------------
The import path for the package is *go.yaml.in/yaml/v2*.
To install it, run:
go get go.yaml.in/yaml/v2
API documentation
-----------------
See: <https://pkg.go.dev/go.yaml.in/yaml/v2>
API stability
-------------
The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
License
-------
The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
Example
-------
```Go
package main
import (
"fmt"
"log"
"go.yaml.in/yaml/v2"
)
var data = `
a: Easy!
b:
c: 2
d: [3, 4]
`
// Note: struct fields must be public in order for unmarshal to
// correctly populate the data.
type T struct {
A string
B struct {
RenamedC int `yaml:"c"`
D []int `yaml:",flow"`
}
}
func main() {
t := T{}
err := yaml.Unmarshal([]byte(data), &t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t:\n%v\n\n", t)
d, err := yaml.Marshal(&t)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- t dump:\n%s\n\n", string(d))
m := make(map[interface{}]interface{})
err = yaml.Unmarshal([]byte(data), &m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m:\n%v\n\n", m)
d, err = yaml.Marshal(&m)
if err != nil {
log.Fatalf("error: %v", err)
}
fmt.Printf("--- m dump:\n%s\n\n", string(d))
}
```
This example will generate the following output:
```
--- t:
{Easy! {2 [3 4]}}
--- t dump:
a: Easy!
b:
c: 2
d: [3, 4]
--- m:
map[a:Easy! b:map[c:2 d:[3 4]]]
--- m dump:
a: Easy!
b:
c: 2
d:
- 3
- 4
```

744
vendor/go.yaml.in/yaml/v2/apic.go generated vendored Normal file
View File

@ -0,0 +1,744 @@
package yaml
import (
"io"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
// Check if we can move the queue at the beginning of the buffer.
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// Create a new parser object.
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, input_raw_buffer_size),
buffer: make([]byte, 0, input_buffer_size),
}
return true
}
// Destroy a parser object.
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
// String read handler.
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n = copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
// Reader read handler.
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_reader.Read(buffer)
}
// Set a string input.
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
// Set a file input.
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_reader_read_handler
parser.input_reader = r
}
// Set the source encoding.
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("must set the encoding only once")
}
parser.encoding = encoding
}
var disableLineWrapping = false
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
if disableLineWrapping {
emitter.best_width = -1
}
}
// Destroy an emitter object.
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
// String write handler.
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
// yaml_writer_write_handler uses emitter.output_writer to write the
// emitted text.
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
// Set a string output.
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = output_buffer
}
// Set a file output.
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
// Set the output encoding.
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("must set the output encoding only once")
}
emitter.encoding = encoding
}
// Set the canonical output style.
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
//// Set the indentation increment.
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
// Set the preferred line width.
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
// Set if unescaped non-ASCII characters are allowed.
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
// Set the preferred line break character.
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
///*
// * Destroy a token object.
// */
//
//YAML_DECLARE(void)
//yaml_token_delete(yaml_token_t *token)
//{
// assert(token); // Non-NULL token object expected.
//
// switch (token.type)
// {
// case YAML_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case YAML_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case YAML_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case YAML_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case YAML_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
//}
//
///*
// * Check if a string is a valid UTF-8 sequence.
// *
// * Check 'reader.c' for more details on UTF-8 encoding.
// */
//
//static int
//yaml_check_utf8(yaml_char_t *start, size_t length)
//{
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
//}
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(
event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool,
) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
///*
// * Create ALIAS.
// */
//
//YAML_DECLARE(int)
//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
//{
// mark yaml_mark_t = { 0, 0, 0 }
// anchor_copy *yaml_char_t = NULL
//
// assert(event) // Non-NULL event object is expected.
// assert(anchor) // Non-NULL anchor is expected.
//
// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
//
// anchor_copy = yaml_strdup(anchor)
// if (!anchor_copy)
// return 0
//
// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
//
// return 1
//}
// Create SCALAR.
func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
*event = yaml_event_t{
typ: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-START.
func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create SEQUENCE-END.
func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
*event = yaml_event_t{
typ: yaml_SEQUENCE_END_EVENT,
}
return true
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
}
// Destroy an event object.
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
///*
// * Create a document object.
// */
//
//YAML_DECLARE(int)
//yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives_start *yaml_tag_directive_t,
// tag_directives_end *yaml_tag_directive_t,
// start_implicit int, end_implicit int)
//{
// struct {
// error yaml_error_type_t
// } context
// struct {
// start *yaml_node_t
// end *yaml_node_t
// top *yaml_node_t
// } nodes = { NULL, NULL, NULL }
// version_directive_copy *yaml_version_directive_t = NULL
// struct {
// start *yaml_tag_directive_t
// end *yaml_tag_directive_t
// top *yaml_tag_directive_t
// } tag_directives_copy = { NULL, NULL, NULL }
// value yaml_tag_directive_t = { NULL, NULL }
// mark yaml_mark_t = { 0, 0, 0 }
//
// assert(document) // Non-NULL document object is expected.
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end))
// // Valid tag directives are expected.
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
// if (!version_directive_copy) goto error
// version_directive_copy.major = version_directive.major
// version_directive_copy.minor = version_directive.minor
// }
//
// if (tag_directives_start != tag_directives_end) {
// tag_directive *yaml_tag_directive_t
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error
// for (tag_directive = tag_directives_start
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle)
// assert(tag_directive.prefix)
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error
// value.handle = yaml_strdup(tag_directive.handle)
// value.prefix = yaml_strdup(tag_directive.prefix)
// if (!value.handle || !value.prefix) goto error
// if (!PUSH(&context, tag_directives_copy, value))
// goto error
// value.handle = NULL
// value.prefix = NULL
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark)
//
// return 1
//
//error:
// STACK_DEL(&context, nodes)
// yaml_free(version_directive_copy)
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
// }
// STACK_DEL(&context, tag_directives_copy)
// yaml_free(value.handle)
// yaml_free(value.prefix)
//
// return 0
//}
//
///*
// * Destroy a document object.
// */
//
//YAML_DECLARE(void)
//yaml_document_delete(document *yaml_document_t)
//{
// struct {
// error yaml_error_type_t
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
//
// assert(document) // Non-NULL document object is expected.
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// node yaml_node_t = POP(&context, document.nodes)
// yaml_free(node.tag)
// switch (node.type) {
// case YAML_SCALAR_NODE:
// yaml_free(node.data.scalar.value)
// break
// case YAML_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items)
// break
// case YAML_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs)
// break
// default:
// assert(0) // Should not happen.
// }
// }
// STACK_DEL(&context, document.nodes)
//
// yaml_free(document.version_directive)
// for (tag_directive = document.tag_directives.start
// tag_directive != document.tag_directives.end
// tag_directive++) {
// yaml_free(tag_directive.handle)
// yaml_free(tag_directive.prefix)
// }
// yaml_free(document.tag_directives.start)
//
// memset(document, 0, sizeof(yaml_document_t))
//}
//
///**
// * Get a document node.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_node(document *yaml_document_t, index int)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1
// }
// return NULL
//}
//
///**
// * Get the root object.
// */
//
//YAML_DECLARE(yaml_node_t *)
//yaml_document_get_root_node(document *yaml_document_t)
//{
// assert(document) // Non-NULL document object is expected.
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start
// }
// return NULL
//}
//
///*
// * Add a scalar node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_scalar(document *yaml_document_t,
// tag *yaml_char_t, value *yaml_char_t, length int,
// style yaml_scalar_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// value_copy *yaml_char_t = NULL
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
// assert(value) // Non-NULL value is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (length < 0) {
// length = strlen((char *)value)
// }
//
// if (!yaml_check_utf8(value, length)) goto error
// value_copy = yaml_malloc(length+1)
// if (!value_copy) goto error
// memcpy(value_copy, value, length)
// value_copy[length] = '\0'
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// yaml_free(tag_copy)
// yaml_free(value_copy)
//
// return 0
//}
//
///*
// * Add a sequence node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_sequence(document *yaml_document_t,
// tag *yaml_char_t, style yaml_sequence_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_item_t
// end *yaml_node_item_t
// top *yaml_node_item_t
// } items = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, items)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Add a mapping node to a document.
// */
//
//YAML_DECLARE(int)
//yaml_document_add_mapping(document *yaml_document_t,
// tag *yaml_char_t, style yaml_mapping_style_t)
//{
// struct {
// error yaml_error_type_t
// } context
// mark yaml_mark_t = { 0, 0, 0 }
// tag_copy *yaml_char_t = NULL
// struct {
// start *yaml_node_pair_t
// end *yaml_node_pair_t
// top *yaml_node_pair_t
// } pairs = { NULL, NULL, NULL }
// node yaml_node_t
//
// assert(document) // Non-NULL document object is expected.
//
// if (!tag) {
// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
// tag_copy = yaml_strdup(tag)
// if (!tag_copy) goto error
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark)
// if (!PUSH(&context, document.nodes, node)) goto error
//
// return document.nodes.top - document.nodes.start
//
//error:
// STACK_DEL(&context, pairs)
// yaml_free(tag_copy)
//
// return 0
//}
//
///*
// * Append an item to a sequence node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_sequence_item(document *yaml_document_t,
// sequence int, item int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// assert(document) // Non-NULL document is required.
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top)
// // Valid sequence id is required.
// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
// // A sequence node is required.
// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
// // Valid item id is required.
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0
//
// return 1
//}
//
///*
// * Append a pair of a key and a value to a mapping node.
// */
//
//YAML_DECLARE(int)
//yaml_document_append_mapping_pair(document *yaml_document_t,
// mapping int, key int, value int)
//{
// struct {
// error yaml_error_type_t
// } context
//
// pair yaml_node_pair_t
//
// assert(document) // Non-NULL document is required.
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top)
// // Valid mapping id is required.
// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
// // A mapping node is required.
// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
// // Valid key id is required.
// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
// // Valid value id is required.
//
// pair.key = key
// pair.value = value
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0
//
// return 1
//}
//
//

815
vendor/go.yaml.in/yaml/v2/decode.go generated vendored Normal file
View File

@ -0,0 +1,815 @@
package yaml
import (
"encoding"
"encoding/base64"
"fmt"
"io"
"math"
"reflect"
"strconv"
"time"
)
const (
documentNode = 1 << iota
mappingNode
sequenceNode
scalarNode
aliasNode
)
type node struct {
kind int
line, column int
tag string
// For an alias node, alias holds the resolved alias.
alias *node
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
doneInit bool
}
func newParser(b []byte) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
return &p
}
func newParserFromReader(r io.Reader) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
yaml_parser_set_input_reader(&p.parser, r)
return &p
}
func (p *parser) init() {
if p.doneInit {
return
}
p.expect(yaml_STREAM_START_EVENT)
p.doneInit = true
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
}
yaml_parser_delete(&p.parser)
}
// expect consumes an event from the event stream and
// checks that it's of the expected type.
func (p *parser) expect(e yaml_event_type_t) {
if p.event.typ == yaml_NO_EVENT {
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
}
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
if p.event.typ != e {
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
p.fail()
}
yaml_event_delete(&p.event)
p.event.typ = yaml_NO_EVENT
}
// peek peeks at the next event in the event stream,
// puts the results into p.event and returns the event type.
func (p *parser) peek() yaml_event_type_t {
if p.event.typ != yaml_NO_EVENT {
return p.event.typ
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
return p.event.typ
}
func (p *parser) fail() {
var where string
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
// Scanner errors don't iterate line before returning error
if p.parser.error == yaml_SCANNER_ERROR {
line++
}
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
if line != 0 {
where = "line " + strconv.Itoa(line) + ": "
}
var msg string
if len(p.parser.problem) > 0 {
msg = p.parser.problem
} else {
msg = "unknown problem parsing YAML content"
}
failf("%s%s", where, msg)
}
func (p *parser) anchor(n *node, anchor []byte) {
if anchor != nil {
p.doc.anchors[string(anchor)] = n
}
}
func (p *parser) parse() *node {
p.init()
switch p.peek() {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
return p.alias()
case yaml_MAPPING_START_EVENT:
return p.mapping()
case yaml_SEQUENCE_START_EVENT:
return p.sequence()
case yaml_DOCUMENT_START_EVENT:
return p.document()
case yaml_STREAM_END_EVENT:
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + p.event.typ.String())
}
}
func (p *parser) node(kind int) *node {
return &node{
kind: kind,
line: p.event.start_mark.line,
column: p.event.start_mark.column,
}
}
func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.expect(yaml_DOCUMENT_START_EVENT)
n.children = append(n.children, p.parse())
p.expect(yaml_DOCUMENT_END_EVENT)
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
n.alias = p.doc.anchors[n.value]
if n.alias == nil {
failf("unknown anchor '%s' referenced", n.value)
}
p.expect(yaml_ALIAS_EVENT)
return n
}
func (p *parser) scalar() *node {
n := p.node(scalarNode)
n.value = string(p.event.value)
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.expect(yaml_SCALAR_EVENT)
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.expect(yaml_SEQUENCE_START_EVENT)
for p.peek() != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.expect(yaml_SEQUENCE_END_EVENT)
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.expect(yaml_MAPPING_START_EVENT)
for p.peek() != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.expect(yaml_MAPPING_END_EVENT)
return n
}
// ----------------------------------------------------------------------------
// Decoder, unmarshals a node into a provided value.
type decoder struct {
doc *node
aliases map[*node]bool
mapType reflect.Type
terrors []string
strict bool
decodeCount int
aliasCount int
aliasDepth int
}
var (
mapItemType = reflect.TypeOf(MapItem{})
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
timeType = reflect.TypeOf(time.Time{})
ptrTimeType = reflect.TypeOf(&time.Time{})
)
func newDecoder(strict bool) *decoder {
d := &decoder{mapType: defaultMapType, strict: strict}
d.aliases = make(map[*node]bool)
return d
}
func (d *decoder) terror(n *node, tag string, out reflect.Value) {
if n.tag != "" {
tag = n.tag
}
value := n.value
if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
if len(value) > 10 {
value = " `" + value[:7] + "...`"
} else {
value = " `" + value + "`"
}
}
d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
}
func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
terrlen := len(d.terrors)
err := u.UnmarshalYAML(func(v interface{}) (err error) {
defer handleErr(&err)
d.unmarshal(n, reflect.ValueOf(v))
if len(d.terrors) > terrlen {
issues := d.terrors[terrlen:]
d.terrors = d.terrors[:terrlen]
return &TypeError{issues}
}
return nil
})
if e, ok := err.(*TypeError); ok {
d.terrors = append(d.terrors, e.Errors...)
return false
}
if err != nil {
fail(err)
}
return true
}
// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
// if a value is found to implement it.
// It returns the initialized and dereferenced out value, whether
// unmarshalling was already done by UnmarshalYAML, and if so whether
// its types unmarshalled appropriately.
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
for again {
again = false
if out.Kind() == reflect.Ptr {
if out.IsNil() {
out.Set(reflect.New(out.Type().Elem()))
}
out = out.Elem()
again = true
}
if out.CanAddr() {
if u, ok := out.Addr().Interface().(Unmarshaler); ok {
good = d.callUnmarshaler(n, u)
return out, true, good
}
}
}
return out, false, false
}
const (
// 400,000 decode operations is ~500kb of dense object declarations, or
// ~5kb of dense object declarations with 10000% alias expansion
alias_ratio_range_low = 400000
// 4,000,000 decode operations is ~5MB of dense object declarations, or
// ~4.5MB of dense object declarations with 10% alias expansion
alias_ratio_range_high = 4000000
// alias_ratio_range is the range over which we scale allowed alias ratios
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
)
func allowedAliasRatio(decodeCount int) float64 {
switch {
case decodeCount <= alias_ratio_range_low:
// allow 99% to come from alias expansion for small-to-medium documents
return 0.99
case decodeCount >= alias_ratio_range_high:
// allow 10% to come from alias expansion for very large documents
return 0.10
default:
// scale smoothly from 99% down to 10% over the range.
// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
}
}
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
d.decodeCount++
if d.aliasDepth > 0 {
d.aliasCount++
}
if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
failf("document contains excessive aliasing")
}
switch n.kind {
case documentNode:
return d.document(n, out)
case aliasNode:
return d.alias(n, out)
}
out, unmarshaled, good := d.prepare(n, out)
if unmarshaled {
return good
}
switch n.kind {
case scalarNode:
good = d.scalar(n, out)
case mappingNode:
good = d.mapping(n, out)
case sequenceNode:
good = d.sequence(n, out)
default:
panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
}
return good
}
func (d *decoder) document(n *node, out reflect.Value) (good bool) {
if len(n.children) == 1 {
d.doc = n
d.unmarshal(n.children[0], out)
return true
}
return false
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
if d.aliases[n] {
// TODO this could actually be allowed in some circumstances.
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n] = true
d.aliasDepth++
good = d.unmarshal(n.alias, out)
d.aliasDepth--
delete(d.aliases, n)
return good
}
var zeroValue reflect.Value
func resetMap(out reflect.Value) {
for _, k := range out.MapKeys() {
out.SetMapIndex(k, zeroValue)
}
}
func (d *decoder) scalar(n *node, out reflect.Value) bool {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
tag = yaml_STR_TAG
resolved = n.value
} else {
tag, resolved = resolve(n.tag, n.value)
if tag == yaml_BINARY_TAG {
data, err := base64.StdEncoding.DecodeString(resolved.(string))
if err != nil {
failf("!!binary value contains invalid base64 data")
}
resolved = string(data)
}
}
if resolved == nil {
if out.Kind() == reflect.Map && !out.CanAddr() {
resetMap(out)
} else {
out.Set(reflect.Zero(out.Type()))
}
return true
}
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
// We've resolved to exactly the type we want, so use that.
out.Set(resolvedv)
return true
}
// Perhaps we can use the value as a TextUnmarshaler to
// set its value.
if out.CanAddr() {
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
if ok {
var text []byte
if tag == yaml_BINARY_TAG {
text = []byte(resolved.(string))
} else {
// We let any value be unmarshaled into TextUnmarshaler.
// That might be more lax than we'd like, but the
// TextUnmarshaler itself should bowl out any dubious values.
text = []byte(n.value)
}
err := u.UnmarshalText(text)
if err != nil {
fail(err)
}
return true
}
}
switch out.Kind() {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
return true
}
if resolved != nil {
out.SetString(n.value)
return true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else if tag == yaml_TIMESTAMP_TAG {
// It looks like a timestamp but for backward compatibility
// reasons we set it as a string, so that code that unmarshals
// timestamp-like values into interface{} will continue to
// see a string and not a time.Time.
// TODO(v3) Drop this.
out.Set(reflect.ValueOf(n.value))
} else {
out.Set(reflect.ValueOf(resolved))
}
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
return true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
return true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
return true
}
}
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
switch resolved := resolved.(type) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
return true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
return true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
return true
case int64:
out.SetFloat(float64(resolved))
return true
case uint64:
out.SetFloat(float64(resolved))
return true
case float64:
out.SetFloat(resolved)
return true
}
case reflect.Struct:
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
out.Set(resolvedv)
return true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
return true
}
}
d.terror(n, tag, out)
return false
}
func settableValueOf(i interface{}) reflect.Value {
v := reflect.ValueOf(i)
sv := reflect.New(v.Type()).Elem()
sv.Set(v)
return sv
}
func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
l := len(n.children)
var iface reflect.Value
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Array:
if l != out.Len() {
failf("invalid array: want %d elements but got %d", out.Len(), l)
}
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
out = settableValueOf(make([]interface{}, l))
default:
d.terror(n, yaml_SEQ_TAG, out)
return false
}
et := out.Type().Elem()
j := 0
for i := 0; i < l; i++ {
e := reflect.New(et).Elem()
if ok := d.unmarshal(n.children[i], e); ok {
out.Index(j).Set(e)
j++
}
}
if out.Kind() != reflect.Array {
out.Set(out.Slice(0, j))
}
if iface.IsValid() {
iface.Set(out)
}
return true
}
func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Struct:
return d.mappingStruct(n, out)
case reflect.Slice:
return d.mappingSlice(n, out)
case reflect.Map:
// okay
case reflect.Interface:
if d.mapType.Kind() == reflect.Map {
iface := out
out = reflect.MakeMap(d.mapType)
iface.Set(out)
} else {
slicev := reflect.New(d.mapType).Elem()
if !d.mappingSlice(n, slicev) {
return false
}
out.Set(slicev)
return true
}
default:
d.terror(n, yaml_MAP_TAG, out)
return false
}
outt := out.Type()
kt := outt.Key()
et := outt.Elem()
mapType := d.mapType
if outt.Key() == ifaceType && outt.Elem() == ifaceType {
d.mapType = outt
}
if out.IsNil() {
out.Set(reflect.MakeMap(outt))
}
l := len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
k := reflect.New(kt).Elem()
if d.unmarshal(n.children[i], k) {
kkind := k.Kind()
if kkind == reflect.Interface {
kkind = k.Elem().Kind()
}
if kkind == reflect.Map || kkind == reflect.Slice {
failf("invalid map key: %#v", k.Interface())
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
d.setMapIndex(n.children[i+1], out, k, e)
}
}
}
d.mapType = mapType
return true
}
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
if d.strict && out.MapIndex(k) != zeroValue {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
return
}
out.SetMapIndex(k, v)
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
d.terror(n, yaml_MAP_TAG, out)
return false
}
mapType := d.mapType
d.mapType = outt
var slice []MapItem
var l = len(n.children)
for i := 0; i < l; i += 2 {
if isMerge(n.children[i]) {
d.merge(n.children[i+1], out)
continue
}
item := MapItem{}
k := reflect.ValueOf(&item.Key).Elem()
if d.unmarshal(n.children[i], k) {
v := reflect.ValueOf(&item.Value).Elem()
if d.unmarshal(n.children[i+1], v) {
slice = append(slice, item)
}
}
}
out.Set(reflect.ValueOf(slice))
d.mapType = mapType
return true
}
func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
sinfo, err := getStructInfo(out.Type())
if err != nil {
panic(err)
}
name := settableValueOf("")
l := len(n.children)
var inlineMap reflect.Value
var elemType reflect.Type
if sinfo.InlineMap != -1 {
inlineMap = out.Field(sinfo.InlineMap)
inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
elemType = inlineMap.Type().Elem()
}
var doneFields []bool
if d.strict {
doneFields = make([]bool, len(sinfo.FieldsList))
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
d.merge(n.children[i+1], out)
continue
}
if !d.unmarshal(ni, name) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
if d.strict {
if doneFields[info.Id] {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
continue
}
doneFields[info.Id] = true
}
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
} else {
field = out.FieldByIndex(info.Inline)
}
d.unmarshal(n.children[i+1], field)
} else if sinfo.InlineMap != -1 {
if inlineMap.IsNil() {
inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
d.setMapIndex(n.children[i+1], inlineMap, name, value)
} else if d.strict {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
}
}
return true
}
func failWantMap() {
failf("map merge requires map or sequence of maps as the value")
}
func (d *decoder) merge(n *node, out reflect.Value) {
switch n.kind {
case mappingNode:
d.unmarshal(n, out)
case aliasNode:
if n.alias != nil && n.alias.kind != mappingNode {
failWantMap()
}
d.unmarshal(n, out)
case sequenceNode:
// Step backwards as earlier nodes take precedence.
for i := len(n.children) - 1; i >= 0; i-- {
ni := n.children[i]
if ni.kind == aliasNode {
if ni.alias != nil && ni.alias.kind != mappingNode {
failWantMap()
}
} else if ni.kind != mappingNode {
failWantMap()
}
d.unmarshal(ni, out)
}
default:
failWantMap()
}
}
func isMerge(n *node) bool {
return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
}

1685
vendor/go.yaml.in/yaml/v2/emitterc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

390
vendor/go.yaml.in/yaml/v2/encode.go generated vendored Normal file
View File

@ -0,0 +1,390 @@
package yaml
import (
"encoding"
"fmt"
"io"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
)
// jsonNumber is the interface of the encoding/json.Number datatype.
// Repeating the interface here avoids a dependency on encoding/json, and also
// supports other libraries like jsoniter, which use a similar datatype with
// the same interface. Detecting this interface is useful when dealing with
// structures containing json.Number, which is a string under the hood. The
// encoder should prefer the use of Int64(), Float64() and string(), in that
// order, when encoding this type.
type jsonNumber interface {
Float64() (float64, error)
Int64() (int64, error)
String() string
}
type encoder struct {
emitter yaml_emitter_t
event yaml_event_t
out []byte
flow bool
// doneInit holds whether the initial stream_start_event has been
// emitted.
doneInit bool
}
func newEncoder() *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func newEncoderWithWriter(w io.Writer) *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, w)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func (e *encoder) init() {
if e.doneInit {
return
}
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
e.doneInit = true
}
func (e *encoder) finish() {
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) destroy() {
yaml_emitter_delete(&e.emitter)
}
func (e *encoder) emit() {
// This will internally delete the e.event value.
e.must(yaml_emitter_emit(&e.emitter, &e.event))
}
func (e *encoder) must(ok bool) {
if !ok {
msg := e.emitter.problem
if msg == "" {
msg = "unknown problem generating YAML content"
}
failf("%s", msg)
}
}
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
e.init()
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
e.marshal(tag, in)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
e.nilv()
return
}
iface := in.Interface()
switch m := iface.(type) {
case jsonNumber:
integer, err := m.Int64()
if err == nil {
// In this case the json.Number is a valid int64
in = reflect.ValueOf(integer)
break
}
float, err := m.Float64()
if err == nil {
// In this case the json.Number is a valid float64
in = reflect.ValueOf(float)
break
}
// fallback case - no number could be obtained
in = reflect.ValueOf(m.String())
case time.Time, *time.Time:
// Although time.Time implements TextMarshaler,
// we don't want to treat it as a string for YAML
// purposes because YAML has special support for
// timestamps.
case Marshaler:
v, err := m.MarshalYAML()
if err != nil {
fail(err)
}
if v == nil {
e.nilv()
return
}
in = reflect.ValueOf(v)
case encoding.TextMarshaler:
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
case nil:
e.nilv()
return
}
switch in.Kind() {
case reflect.Interface:
e.marshal(tag, in.Elem())
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.Type() == ptrTimeType {
e.timev(tag, in.Elem())
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
if in.Type() == timeType {
e.timev(tag, in)
} else {
e.structv(tag, in)
}
case reflect.Slice, reflect.Array:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
e.slicev(tag, in)
}
case reflect.String:
e.stringv(tag, in)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if in.Type() == durationType {
e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
} else {
e.intv(tag, in)
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.uintv(tag, in)
case reflect.Float32, reflect.Float64:
e.floatv(tag, in)
case reflect.Bool:
e.boolv(tag, in)
default:
panic("cannot marshal type: " + in.Type().String())
}
}
func (e *encoder) mapv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
keys := keyList(in.MapKeys())
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k)
e.marshal("", in.MapIndex(k))
}
})
}
func (e *encoder) itemsv(tag string, in reflect.Value) {
e.mappingv(tag, func() {
slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
for _, item := range slice {
e.marshal("", reflect.ValueOf(item.Key))
e.marshal("", reflect.ValueOf(item.Value))
}
})
}
func (e *encoder) structv(tag string, in reflect.Value) {
sinfo, err := getStructInfo(in.Type())
if err != nil {
panic(err)
}
e.mappingv(tag, func() {
for _, info := range sinfo.FieldsList {
var value reflect.Value
if info.Inline == nil {
value = in.Field(info.Num)
} else {
value = in.FieldByIndex(info.Inline)
}
if info.OmitEmpty && isZero(value) {
continue
}
e.marshal("", reflect.ValueOf(info.Key))
e.flow = info.Flow
e.marshal("", value)
}
if sinfo.InlineMap >= 0 {
m := in.Field(sinfo.InlineMap)
if m.Len() > 0 {
e.flow = false
keys := keyList(m.MapKeys())
sort.Sort(keys)
for _, k := range keys {
if _, found := sinfo.FieldsMap[k.String()]; found {
panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
}
e.marshal("", k)
e.flow = false
e.marshal("", m.MapIndex(k))
}
}
}
})
}
func (e *encoder) mappingv(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *encoder) slicev(tag string, in reflect.Value) {
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
e.emit()
n := in.Len()
for i := 0; i < n; i++ {
e.marshal("", in.Index(i))
}
e.must(yaml_sequence_end_event_initialize(&e.event))
e.emit()
}
// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
//
// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
// in YAML 1.2 and by this package, but these should be marshalled quoted for
// the time being for compatibility with other parsers.
func isBase60Float(s string) (result bool) {
// Fast path.
if s == "" {
return false
}
c := s[0]
if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
return false
}
// Do the full match.
return base60float.MatchString(s)
}
// From http://yaml.org/type/float.html, except the regular expression there
// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
canUsePlain := true
switch {
case !utf8.ValidString(s):
if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
}
if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = yaml_BINARY_TAG
s = encodeBase64(s)
case tag == "":
// Check to see if it would resolve to a specific
// tag when encoded unquoted. If it doesn't,
// there's no need to quote it.
rtag, _ := resolve("", s)
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
}
// Note: it's possible for user code to emit invalid YAML
// if they explicitly specify a tag and a string containing
// text that's incompatible with that tag.
switch {
case strings.Contains(s, "\n"):
style = yaml_LITERAL_SCALAR_STYLE
case canUsePlain:
style = yaml_PLAIN_SCALAR_STYLE
default:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
func (e *encoder) boolv(tag string, in reflect.Value) {
var s string
if in.Bool() {
s = "true"
} else {
s = "false"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) intv(tag string, in reflect.Value) {
s := strconv.FormatInt(in.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) uintv(tag string, in reflect.Value) {
s := strconv.FormatUint(in.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) timev(tag string, in reflect.Value) {
t := in.Interface().(time.Time)
s := t.Format(time.RFC3339Nano)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// Issue #352: When formatting, use the precision of the underlying value
precision := 64
if in.Kind() == reflect.Float32 {
precision = 32
}
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) nilv() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
e.emit()
}

1095
vendor/go.yaml.in/yaml/v2/parserc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

412
vendor/go.yaml.in/yaml/v2/readerc.go generated vendored Normal file
View File

@ -0,0 +1,412 @@
package yaml
import (
"io"
)
// Set the reader error and return 0.
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
// Byte order marks.
const (
bom_UTF8 = "\xef\xbb\xbf"
bom_UTF16LE = "\xff\xfe"
bom_UTF16BE = "\xfe\xff"
)
// Determine the input stream encoding by checking the BOM symbol. If no BOM is
// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
// Ensure that we had enough bytes in the raw buffer.
for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
// Determine the encoding.
buf := parser.raw_buffer
pos := parser.raw_buffer_pos
avail := len(buf) - pos
if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
// Update the raw buffer.
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
// Return if the raw buffer is full.
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
// Return on EOF.
if parser.eof {
return true
}
// Move the remaining bytes in the raw buffer to the beginning.
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
// Call the read handler to fill the buffer.
size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
}
return true
}
// Ensure that the buffer contains at least `length` characters.
// Return true on success, false on failure.
//
// The length is supposed to be significantly less that the buffer size.
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
if parser.read_handler == nil {
panic("read handler must be set")
}
// [Go] This function was changed to guarantee the requested length size at EOF.
// The fact we need to do this is pretty awful, but the description above implies
// for that to be the case, and there are tests
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
// [Go] ACTUALLY! Read the documentation of this function above.
// This is just broken. To return true, we need to have the
// given length in the buffer. Not doing that means every single
// check that calls this function to make sure the buffer has a
// given length is Go) panicking; or C) accessing invalid memory.
//return true
}
// Return if the buffer contains enough characters.
if parser.unread >= length {
return true
}
// Determine the input encoding if it is not known yet.
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
// Move the unread characters to the beginning of the buffer.
buffer_len := len(parser.buffer)
if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_len -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_len {
buffer_len = 0
parser.buffer_pos = 0
}
// Open the whole buffer for writing, and cut it before returning.
parser.buffer = parser.buffer[:cap(parser.buffer)]
// Fill the buffer until it has enough characters.
first := true
for parser.unread < length {
// Fill the raw buffer if necessary.
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_len]
return false
}
}
first = false
// Decode the raw buffer.
inner:
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var width int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
// Decode the next character.
switch parser.encoding {
case yaml_UTF8_ENCODING:
// Decode a UTF-8 character. Check RFC 3629
// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
//
// The following table (taken from the RFC) is used for
// decoding.
//
// Char. number range | UTF-8 octet sequence
// (hexadecimal) | (binary)
// --------------------+------------------------------------
// 0000 0000-0000 007F | 0xxxxxxx
// 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
// 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
// 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
//
// Additionally, the characters in the range 0xD800-0xDFFF
// are prohibited as they are reserved for use with UTF-16
// surrogate pairs.
// Determine the length of the UTF-8 sequence.
octet := parser.raw_buffer[parser.raw_buffer_pos]
switch {
case octet&0x80 == 0x00:
width = 1
case octet&0xE0 == 0xC0:
width = 2
case octet&0xF0 == 0xE0:
width = 3
case octet&0xF8 == 0xF0:
width = 4
default:
// The leading octet is invalid.
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
// Check if the raw buffer contains an incomplete character.
if width > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
break inner
}
// Decode the leading octet.
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
// Check and decode the trailing octets.
for k := 1; k < width; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
// Check if the octet is valid.
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
// Decode the octet.
value = (value << 6) + rune(octet&0x3F)
}
// Check the length of the sequence against the value.
switch {
case width == 1:
case width == 2 && value >= 0x80:
case width == 3 && value >= 0x800:
case width == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
// Check the range of the value.
if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
low, high = 1, 0
}
// The UTF-16 encoding is not as simple as one might
// naively think. Check RFC 2781
// (http://www.ietf.org/rfc/rfc2781.txt).
//
// Normally, two subsequent bytes describe a Unicode
// character. However a special technique (called a
// surrogate pair) is used for specifying character
// values larger than 0xFFFF.
//
// A surrogate pair consists of two pseudo-characters:
// high surrogate area (0xD800-0xDBFF)
// low surrogate area (0xDC00-0xDFFF)
//
// The following formulas are used for decoding
// and encoding characters using surrogate pairs:
//
// U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
// U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
// W1 = 110110yyyyyyyyyy
// W2 = 110111xxxxxxxxxx
//
// where U is the character value, W1 is the high surrogate
// area, W2 is the low surrogate area.
// Check for incomplete UTF-16 character.
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
break inner
}
// Get the character.
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
// Check for unexpected low surrogate area.
if value&0xFC00 == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
// Check for a high surrogate area.
if value&0xFC00 == 0xD800 {
width = 4
// Check for incomplete surrogate pair.
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
break inner
}
// Get the next character.
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
// Check for a low surrogate area.
if value2&0xFC00 != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
// Generate the value of the surrogate pair.
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
width = 2
}
default:
panic("impossible")
}
// Check if the character is in the allowed range:
// #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
// | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
// | [#x10000-#x10FFFF] (32 bit)
switch {
case value == 0x09:
case value == 0x0A:
case value == 0x0D:
case value >= 0x20 && value <= 0x7E:
case value == 0x85:
case value >= 0xA0 && value <= 0xD7FF:
case value >= 0xE000 && value <= 0xFFFD:
case value >= 0x10000 && value <= 0x10FFFF:
default:
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
// Move the raw pointers.
parser.raw_buffer_pos += width
parser.offset += width
// Finally put the character into the buffer.
if value <= 0x7F {
// 0000 0000-0000 007F . 0xxxxxxx
parser.buffer[buffer_len+0] = byte(value)
buffer_len += 1
} else if value <= 0x7FF {
// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
buffer_len += 2
} else if value <= 0xFFFF {
// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
buffer_len += 3
} else {
// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
buffer_len += 4
}
parser.unread++
}
// On EOF, put NUL into the buffer and return.
if parser.eof {
parser.buffer[buffer_len] = 0
buffer_len++
parser.unread++
break
}
}
// [Go] Read the documentation of this function above. To return true,
// we need to have the given length in the buffer. Not doing that means
// every single check that calls this function to make sure the buffer
// has a given length is Go) panicking; or C) accessing invalid memory.
// This happens here due to the EOF above breaking early.
for buffer_len < length {
parser.buffer[buffer_len] = 0
buffer_len++
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

258
vendor/go.yaml.in/yaml/v2/resolve.go generated vendored Normal file
View File

@ -0,0 +1,258 @@
package yaml
import (
"encoding/base64"
"math"
"regexp"
"strconv"
"strings"
"time"
)
type resolveMapItem struct {
value interface{}
tag string
}
var resolveTable = make([]byte, 256)
var resolveMap = make(map[string]resolveMapItem)
func init() {
t := resolveTable
t[int('+')] = 'S' // Sign
t[int('-')] = 'S'
for _, c := range "0123456789" {
t[int(c)] = 'D' // Digit
}
for _, c := range "yYnNtTfFoO~" {
t[int(c)] = 'M' // In map
}
t[int('.')] = '.' // Float (potentially in map)
var resolveMapList = []struct {
v interface{}
tag string
l []string
}{
{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
{"<<", yaml_MERGE_TAG, []string{"<<"}},
}
m := resolveMap
for _, item := range resolveMapList {
for _, s := range item.l {
m[s] = resolveMapItem{item.v, item.tag}
}
}
}
const longTagPrefix = "tag:yaml.org,2002:"
func shortTag(tag string) string {
// TODO This can easily be made faster and produce less garbage.
if strings.HasPrefix(tag, longTagPrefix) {
return "!!" + tag[len(longTagPrefix):]
}
return tag
}
func longTag(tag string) string {
if strings.HasPrefix(tag, "!!") {
return longTagPrefix + tag[2:]
}
return tag
}
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
return true
}
return false
}
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
func resolve(tag string, in string) (rtag string, out interface{}) {
if !resolvableTag(tag) {
return tag, in
}
defer func() {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
case yaml_FLOAT_TAG:
if rtag == yaml_INT_TAG {
switch v := out.(type) {
case int64:
rtag = yaml_FLOAT_TAG
out = float64(v)
return
case int:
rtag = yaml_FLOAT_TAG
out = float64(v)
return
}
}
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
// Any data is accepted as a !!str or !!binary.
// Otherwise, the prefix is enough of a hint about what it might be.
hint := byte('N')
if in != "" {
hint = resolveTable[in[0]]
}
if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
// Handle things we can lookup in a map.
if item, ok := resolveMap[in]; ok {
return item.tag, item.value
}
// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
// are purposefully unsupported here. They're still quoted on
// the way out for compatibility with other parser, though.
switch hint {
case 'M':
// We've already checked the map above.
case '.':
// Not in the map, so maybe a normal float.
floatv, err := strconv.ParseFloat(in, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
case 'D', 'S':
// Int, float, or timestamp.
// Only try values as a timestamp if the value is unquoted or there's an explicit
// !!timestamp tag.
if tag == "" || tag == yaml_TIMESTAMP_TAG {
t, ok := parseTimestamp(in)
if ok {
return yaml_TIMESTAMP_TAG, t
}
}
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain, 0, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
if yamlStyleFloat.MatchString(plain) {
floatv, err := strconv.ParseFloat(plain, 64)
if err == nil {
return yaml_FLOAT_TAG, floatv
}
}
if strings.HasPrefix(plain, "0b") {
intv, err := strconv.ParseInt(plain[2:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
uintv, err := strconv.ParseUint(plain[2:], 2, 64)
if err == nil {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
if err == nil {
if true || intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, intv
}
}
}
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
return yaml_STR_TAG, in
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
// as appropriate for the resulting length.
func encodeBase64(s string) string {
const lineLen = 70
encLen := base64.StdEncoding.EncodedLen(len(s))
lines := encLen/lineLen + 1
buf := make([]byte, encLen*2+lines)
in := buf[0:encLen]
out := buf[encLen:]
base64.StdEncoding.Encode(in, []byte(s))
k := 0
for i := 0; i < len(in); i += lineLen {
j := i + lineLen
if j > len(in) {
j = len(in)
}
k += copy(out[k:], in[i:j])
if lines > 1 {
out[k] = '\n'
k++
}
}
return string(out[:k])
}
// This is a subset of the formats allowed by the regular expression
// defined at http://yaml.org/type/timestamp.html.
var allowedTimestampFormats = []string{
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
"2006-1-2 15:4:5.999999999", // space separated with no time zone
"2006-1-2", // date only
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
// from the set of examples.
}
// parseTimestamp parses s as a timestamp string and
// returns the timestamp and reports whether it succeeded.
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
func parseTimestamp(s string) (time.Time, bool) {
// TODO write code to check all the formats supported by
// http://yaml.org/type/timestamp.html instead of using time.Parse.
// Quick check: all date formats start with YYYY-.
i := 0
for ; i < len(s); i++ {
if c := s[i]; c < '0' || c > '9' {
break
}
}
if i != 4 || i == len(s) || s[i] != '-' {
return time.Time{}, false
}
for _, format := range allowedTimestampFormats {
if t, err := time.Parse(format, s); err == nil {
return t, true
}
}
return time.Time{}, false
}

2711
vendor/go.yaml.in/yaml/v2/scannerc.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

113
vendor/go.yaml.in/yaml/v2/sorter.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
package yaml
import (
"reflect"
"unicode"
)
type keyList []reflect.Value
func (l keyList) Len() int { return len(l) }
func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
func (l keyList) Less(i, j int) bool {
a := l[i]
b := l[j]
ak := a.Kind()
bk := b.Kind()
for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
a = a.Elem()
ak = a.Kind()
}
for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
b = b.Elem()
bk = b.Kind()
}
af, aok := keyFloat(a)
bf, bok := keyFloat(b)
if aok && bok {
if af != bf {
return af < bf
}
if ak != bk {
return ak < bk
}
return numLess(a, b)
}
if ak != reflect.String || bk != reflect.String {
return ak < bk
}
ar, br := []rune(a.String()), []rune(b.String())
for i := 0; i < len(ar) && i < len(br); i++ {
if ar[i] == br[i] {
continue
}
al := unicode.IsLetter(ar[i])
bl := unicode.IsLetter(br[i])
if al && bl {
return ar[i] < br[i]
}
if al || bl {
return bl
}
var ai, bi int
var an, bn int64
if ar[i] == '0' || br[i] == '0' {
for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
if ar[j] != '0' {
an = 1
bn = 1
break
}
}
}
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}
for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
bn = bn*10 + int64(br[bi]-'0')
}
if an != bn {
return an < bn
}
if ai != bi {
return ai < bi
}
return ar[i] < br[i]
}
return len(ar) < len(br)
}
// keyFloat returns a float value for v if it is a number/bool
// and whether it is a number/bool or not.
func keyFloat(v reflect.Value) (f float64, ok bool) {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return float64(v.Int()), true
case reflect.Float32, reflect.Float64:
return v.Float(), true
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return float64(v.Uint()), true
case reflect.Bool:
if v.Bool() {
return 1, true
}
return 0, true
}
return 0, false
}
// numLess returns whether a < b.
// a and b must necessarily have the same kind.
func numLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return a.Int() < b.Int()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Bool:
return !a.Bool() && b.Bool()
}
panic("not a number")
}

26
vendor/go.yaml.in/yaml/v2/writerc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
package yaml
// Set the writer error and return false.
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
// Flush the output buffer.
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("write handler not set")
}
// Check if the buffer is empty.
if emitter.buffer_pos == 0 {
return true
}
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}

478
vendor/go.yaml.in/yaml/v2/yaml.go generated vendored Normal file
View File

@ -0,0 +1,478 @@
// Package yaml implements YAML support for the Go language.
//
// Source code and other details for the project are available at GitHub:
//
// https://github.com/yaml/go-yaml
//
package yaml
import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
)
// MapSlice encodes and decodes as a YAML map.
// The order of keys is preserved when encoding and decoding.
type MapSlice []MapItem
// MapItem is an item in a MapSlice.
type MapItem struct {
Key, Value interface{}
}
// The Unmarshaler interface may be implemented by types to customize their
// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
// method receives a function that may be called to unmarshal the original
// YAML value into a field or variable. It is safe to call the unmarshal
// function parameter more than once if necessary.
type Unmarshaler interface {
UnmarshalYAML(unmarshal func(interface{}) error) error
}
// The Marshaler interface may be implemented by types to customize their
// behavior when being marshaled into a YAML document. The returned value
// is marshaled in place of the original value implementing Marshaler.
//
// If an error is returned by MarshalYAML, the marshaling procedure stops
// and returns with the provided error.
type Marshaler interface {
MarshalYAML() (interface{}, error)
}
// Unmarshal decodes the first document found within the in byte slice
// and assigns decoded values into the out value.
//
// Maps and pointers (to a struct, string, int, etc) are accepted as out
// values. If an internal pointer within a struct is not initialized,
// the yaml package will initialize it if necessary for unmarshalling
// the provided data. The out parameter must not be nil.
//
// The type of the decoded values should be compatible with the respective
// values in out. If one or more values cannot be decoded due to a type
// mismatches, decoding continues partially until the end of the YAML
// content, and a *yaml.TypeError is returned with details for all
// missed values.
//
// Struct fields are only unmarshalled if they are exported (have an
// upper case first letter), and are unmarshalled using the field name
// lowercased as the default key. Custom keys may be defined via the
// "yaml" name in the field tag: the content preceding the first comma
// is used as the key, and the following comma-separated options are
// used to tweak the marshalling process (see Marshal).
// Conflicting names result in a runtime error.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// var t T
// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
//
// See the documentation of Marshal for the format of tags and a list of
// supported tag options.
//
func Unmarshal(in []byte, out interface{}) (err error) {
return unmarshal(in, out, false)
}
// UnmarshalStrict is like Unmarshal except that any fields that are found
// in the data that do not have corresponding struct members, or mapping
// keys that are duplicates, will result in
// an error.
func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true)
}
// A Decoder reads and decodes YAML values from an input stream.
type Decoder struct {
strict bool
parser *parser
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read
// data from r beyond the YAML values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
parser: newParserFromReader(r),
}
}
// SetStrict sets whether strict decoding behaviour is enabled when
// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
func (dec *Decoder) SetStrict(strict bool) {
dec.strict = strict
}
// Decode reads the next YAML-encoded value from its input
// and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (dec *Decoder) Decode(v interface{}) (err error) {
d := newDecoder(dec.strict)
defer handleErr(&err)
node := dec.parser.parse()
if node == nil {
return io.EOF
}
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(node, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder(strict)
p := newParser(in)
defer p.destroy()
node := p.parse()
if node != nil {
v := reflect.ValueOf(out)
if v.Kind() == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
d.unmarshal(node, v)
}
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
// Marshal serializes the value provided into a YAML document. The structure
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only marshalled if they are exported (have an upper case
// first letter), and are marshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
// Conflicting names result in a runtime error.
//
// The field tag format accepted is:
//
// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
//
// The following flags are currently supported:
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Zero valued structs will be omitted if all their public
// fields are zero, unless they implement an IsZero
// method (see the IsZeroer interface type), in which
// case the field will be excluded if IsZero returns true.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
//
// inline Inline the field, which must be a struct or a map,
// causing all of its fields or keys to be processed as if
// they were part of the outer struct. For maps, keys must
// not conflict with the yaml keys of other struct fields.
//
// In addition, if the key is "-", the field is ignored.
//
// For example:
//
// type T struct {
// F int `yaml:"a,omitempty"`
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
//
func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshalDoc("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
// An Encoder writes YAML values to an output stream.
type Encoder struct {
encoder *encoder
}
// NewEncoder returns a new encoder that writes to w.
// The Encoder should be closed after use to flush all data
// to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
encoder: newEncoderWithWriter(w),
}
}
// Encode writes the YAML encoding of v to the stream.
// If multiple items are encoded to the stream, the
// second and subsequent document will be preceded
// with a "---" document separator, but the first will not.
//
// See the documentation for Marshal for details about the conversion of Go
// values to YAML.
func (e *Encoder) Encode(v interface{}) (err error) {
defer handleErr(&err)
e.encoder.marshalDoc("", reflect.ValueOf(v))
return nil
}
// Close closes the encoder by writing any remaining data.
// It does not write a stream terminating string "...".
func (e *Encoder) Close() (err error) {
defer handleErr(&err)
e.encoder.finish()
return nil
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
*err = e.err
} else {
panic(v)
}
}
}
type yamlError struct {
err error
}
func fail(err error) {
panic(yamlError{err})
}
func failf(format string, args ...interface{}) {
panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
}
// A TypeError is returned by Unmarshal when one or more fields in
// the YAML document cannot be properly decoded into the requested
// types. When this error is returned, the value is still
// unmarshaled partially.
type TypeError struct {
Errors []string
}
func (e *TypeError) Error() string {
return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
}
// --------------------------------------------------------------------------
// Maintain a mapping of keys to structure field indexes
// The code in this section was copied from mgo/bson.
// structInfo holds details for the serialization of fields of
// a given struct.
type structInfo struct {
FieldsMap map[string]fieldInfo
FieldsList []fieldInfo
// InlineMap is the number of the field in the struct that
// contains an ,inline map, or -1 if there's none.
InlineMap int
}
type fieldInfo struct {
Key string
Num int
OmitEmpty bool
Flow bool
// Id holds the unique field identifier, so we can cheaply
// check for field duplicates without maintaining an extra map.
Id int
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
}
var structMap = make(map[reflect.Type]*structInfo)
var fieldMapMutex sync.RWMutex
func getStructInfo(st reflect.Type) (*structInfo, error) {
fieldMapMutex.RLock()
sinfo, found := structMap[st]
fieldMapMutex.RUnlock()
if found {
return sinfo, nil
}
n := st.NumField()
fieldsMap := make(map[string]fieldInfo)
fieldsList := make([]fieldInfo, 0, n)
inlineMap := -1
for i := 0; i != n; i++ {
field := st.Field(i)
if field.PkgPath != "" && !field.Anonymous {
continue // Private field
}
info := fieldInfo{Num: i}
tag := field.Tag.Get("yaml")
if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
tag = string(field.Tag)
}
if tag == "-" {
continue
}
inline := false
fields := strings.Split(tag, ",")
if len(fields) > 1 {
for _, flag := range fields[1:] {
switch flag {
case "omitempty":
info.OmitEmpty = true
case "flow":
info.Flow = true
case "inline":
inline = true
default:
return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
}
}
tag = fields[0]
}
if inline {
switch field.Type.Kind() {
case reflect.Map:
if inlineMap >= 0 {
return nil, errors.New("Multiple ,inline maps in struct " + st.String())
}
if field.Type.Key() != reflect.TypeOf("") {
return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
}
inlineMap = info.Num
case reflect.Struct:
sinfo, err := getStructInfo(field.Type)
if err != nil {
return nil, err
}
for _, finfo := range sinfo.FieldsList {
if _, found := fieldsMap[finfo.Key]; found {
msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
if finfo.Inline == nil {
finfo.Inline = []int{i, finfo.Num}
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
finfo.Id = len(fieldsList)
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
default:
//return nil, errors.New("Option ,inline needs a struct value or map field")
return nil, errors.New("Option ,inline needs a struct value field")
}
continue
}
if tag != "" {
info.Key = tag
} else {
info.Key = strings.ToLower(field.Name)
}
if _, found = fieldsMap[info.Key]; found {
msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
return nil, errors.New(msg)
}
info.Id = len(fieldsList)
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{
FieldsMap: fieldsMap,
FieldsList: fieldsList,
InlineMap: inlineMap,
}
fieldMapMutex.Lock()
structMap[st] = sinfo
fieldMapMutex.Unlock()
return sinfo, nil
}
// IsZeroer is used to check whether an object is zero to
// determine whether it should be omitted when marshaling
// with the omitempty flag. One notable implementation
// is time.Time.
type IsZeroer interface {
IsZero() bool
}
func isZero(v reflect.Value) bool {
kind := v.Kind()
if z, ok := v.Interface().(IsZeroer); ok {
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
return true
}
return z.IsZero()
}
switch kind {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Slice:
return v.Len() == 0
case reflect.Map:
return v.Len() == 0
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Struct:
vt := v.Type()
for i := v.NumField() - 1; i >= 0; i-- {
if vt.Field(i).PkgPath != "" {
continue // Private field
}
if !isZero(v.Field(i)) {
return false
}
}
return true
}
return false
}
// FutureLineWrap globally disables line wrapping when encoding long strings.
// This is a temporary and thus deprecated method introduced to faciliate
// migration towards v3, which offers more control of line lengths on
// individual encodings, and has a default matching the behavior introduced
// by this function.
//
// The default formatting of v2 was erroneously changed in v2.3.0 and reverted
// in v2.4.0, at which point this function was introduced to help migration.
func FutureLineWrap() {
disableLineWrapping = true
}

739
vendor/go.yaml.in/yaml/v2/yamlh.go generated vendored Normal file
View File

@ -0,0 +1,739 @@
package yaml
import (
"fmt"
"io"
)
// The version directive data.
type yaml_version_directive_t struct {
major int8 // The major version number.
minor int8 // The minor version number.
}
// The tag directive data.
type yaml_tag_directive_t struct {
handle []byte // The tag handle.
prefix []byte // The tag prefix.
}
type yaml_encoding_t int
// The stream encoding.
const (
// Let the parser choose the encoding.
yaml_ANY_ENCODING yaml_encoding_t = iota
yaml_UTF8_ENCODING // The default UTF-8 encoding.
yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
)
type yaml_break_t int
// Line break types.
const (
// Let the parser choose the break type.
yaml_ANY_BREAK yaml_break_t = iota
yaml_CR_BREAK // Use CR for line breaks (Mac style).
yaml_LN_BREAK // Use LN for line breaks (Unix style).
yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
)
type yaml_error_type_t int
// Many bad things could happen with the parser and emitter.
const (
// No error is produced.
yaml_NO_ERROR yaml_error_type_t = iota
yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
yaml_READER_ERROR // Cannot read or decode the input stream.
yaml_SCANNER_ERROR // Cannot scan the input stream.
yaml_PARSER_ERROR // Cannot parse the input stream.
yaml_COMPOSER_ERROR // Cannot compose a YAML document.
yaml_WRITER_ERROR // Cannot write to the output stream.
yaml_EMITTER_ERROR // Cannot emit a YAML stream.
)
// The pointer position.
type yaml_mark_t struct {
index int // The position index.
line int // The position line.
column int // The position column.
}
// Node Styles
type yaml_style_t int8
type yaml_scalar_style_t yaml_style_t
// Scalar styles.
const (
// Let the emitter choose the style.
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
)
type yaml_sequence_style_t yaml_style_t
// Sequence styles.
const (
// Let the emitter choose the style.
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
)
type yaml_mapping_style_t yaml_style_t
// Mapping styles.
const (
// Let the emitter choose the style.
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
yaml_BLOCK_MAPPING_STYLE // The block mapping style.
yaml_FLOW_MAPPING_STYLE // The flow mapping style.
)
// Tokens
type yaml_token_type_t int
// Token types.
const (
// An empty token.
yaml_NO_TOKEN yaml_token_type_t = iota
yaml_STREAM_START_TOKEN // A STREAM-START token.
yaml_STREAM_END_TOKEN // A STREAM-END token.
yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
yaml_BLOCK_END_TOKEN // A BLOCK-END token.
yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
yaml_KEY_TOKEN // A KEY token.
yaml_VALUE_TOKEN // A VALUE token.
yaml_ALIAS_TOKEN // An ALIAS token.
yaml_ANCHOR_TOKEN // An ANCHOR token.
yaml_TAG_TOKEN // A TAG token.
yaml_SCALAR_TOKEN // A SCALAR token.
)
func (tt yaml_token_type_t) String() string {
switch tt {
case yaml_NO_TOKEN:
return "yaml_NO_TOKEN"
case yaml_STREAM_START_TOKEN:
return "yaml_STREAM_START_TOKEN"
case yaml_STREAM_END_TOKEN:
return "yaml_STREAM_END_TOKEN"
case yaml_VERSION_DIRECTIVE_TOKEN:
return "yaml_VERSION_DIRECTIVE_TOKEN"
case yaml_TAG_DIRECTIVE_TOKEN:
return "yaml_TAG_DIRECTIVE_TOKEN"
case yaml_DOCUMENT_START_TOKEN:
return "yaml_DOCUMENT_START_TOKEN"
case yaml_DOCUMENT_END_TOKEN:
return "yaml_DOCUMENT_END_TOKEN"
case yaml_BLOCK_SEQUENCE_START_TOKEN:
return "yaml_BLOCK_SEQUENCE_START_TOKEN"
case yaml_BLOCK_MAPPING_START_TOKEN:
return "yaml_BLOCK_MAPPING_START_TOKEN"
case yaml_BLOCK_END_TOKEN:
return "yaml_BLOCK_END_TOKEN"
case yaml_FLOW_SEQUENCE_START_TOKEN:
return "yaml_FLOW_SEQUENCE_START_TOKEN"
case yaml_FLOW_SEQUENCE_END_TOKEN:
return "yaml_FLOW_SEQUENCE_END_TOKEN"
case yaml_FLOW_MAPPING_START_TOKEN:
return "yaml_FLOW_MAPPING_START_TOKEN"
case yaml_FLOW_MAPPING_END_TOKEN:
return "yaml_FLOW_MAPPING_END_TOKEN"
case yaml_BLOCK_ENTRY_TOKEN:
return "yaml_BLOCK_ENTRY_TOKEN"
case yaml_FLOW_ENTRY_TOKEN:
return "yaml_FLOW_ENTRY_TOKEN"
case yaml_KEY_TOKEN:
return "yaml_KEY_TOKEN"
case yaml_VALUE_TOKEN:
return "yaml_VALUE_TOKEN"
case yaml_ALIAS_TOKEN:
return "yaml_ALIAS_TOKEN"
case yaml_ANCHOR_TOKEN:
return "yaml_ANCHOR_TOKEN"
case yaml_TAG_TOKEN:
return "yaml_TAG_TOKEN"
case yaml_SCALAR_TOKEN:
return "yaml_SCALAR_TOKEN"
}
return "<unknown token>"
}
// The token structure.
type yaml_token_t struct {
// The token type.
typ yaml_token_type_t
// The start/end of the token.
start_mark, end_mark yaml_mark_t
// The stream encoding (for yaml_STREAM_START_TOKEN).
encoding yaml_encoding_t
// The alias/anchor/scalar value or tag/tag directive handle
// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
value []byte
// The tag suffix (for yaml_TAG_TOKEN).
suffix []byte
// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
prefix []byte
// The scalar style (for yaml_SCALAR_TOKEN).
style yaml_scalar_style_t
// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
major, minor int8
}
// Events
type yaml_event_type_t int8
// Event types.
const (
// An empty event.
yaml_NO_EVENT yaml_event_type_t = iota
yaml_STREAM_START_EVENT // A STREAM-START event.
yaml_STREAM_END_EVENT // A STREAM-END event.
yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
yaml_ALIAS_EVENT // An ALIAS event.
yaml_SCALAR_EVENT // A SCALAR event.
yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
yaml_MAPPING_START_EVENT // A MAPPING-START event.
yaml_MAPPING_END_EVENT // A MAPPING-END event.
)
var eventStrings = []string{
yaml_NO_EVENT: "none",
yaml_STREAM_START_EVENT: "stream start",
yaml_STREAM_END_EVENT: "stream end",
yaml_DOCUMENT_START_EVENT: "document start",
yaml_DOCUMENT_END_EVENT: "document end",
yaml_ALIAS_EVENT: "alias",
yaml_SCALAR_EVENT: "scalar",
yaml_SEQUENCE_START_EVENT: "sequence start",
yaml_SEQUENCE_END_EVENT: "sequence end",
yaml_MAPPING_START_EVENT: "mapping start",
yaml_MAPPING_END_EVENT: "mapping end",
}
func (e yaml_event_type_t) String() string {
if e < 0 || int(e) >= len(eventStrings) {
return fmt.Sprintf("unknown event %d", e)
}
return eventStrings[e]
}
// The event structure.
type yaml_event_t struct {
// The event type.
typ yaml_event_type_t
// The start and end of the event.
start_mark, end_mark yaml_mark_t
// The document encoding (for yaml_STREAM_START_EVENT).
encoding yaml_encoding_t
// The version directive (for yaml_DOCUMENT_START_EVENT).
version_directive *yaml_version_directive_t
// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
tag_directives []yaml_tag_directive_t
// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
anchor []byte
// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
tag []byte
// The scalar value (for yaml_SCALAR_EVENT).
value []byte
// Is the document start/end indicator implicit, or the tag optional?
// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
implicit bool
// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
quoted_implicit bool
// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
style yaml_style_t
}
func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
// Nodes
const (
yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
// Not in original libyaml.
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
)
type yaml_node_type_t int
// Node types.
const (
// An empty node.
yaml_NO_NODE yaml_node_type_t = iota
yaml_SCALAR_NODE // A scalar node.
yaml_SEQUENCE_NODE // A sequence node.
yaml_MAPPING_NODE // A mapping node.
)
// An element of a sequence node.
type yaml_node_item_t int
// An element of a mapping node.
type yaml_node_pair_t struct {
key int // The key of the element.
value int // The value of the element.
}
// The node structure.
type yaml_node_t struct {
typ yaml_node_type_t // The node type.
tag []byte // The node tag.
// The node data.
// The scalar parameters (for yaml_SCALAR_NODE).
scalar struct {
value []byte // The scalar value.
length int // The length of the scalar value.
style yaml_scalar_style_t // The scalar style.
}
// The sequence parameters (for YAML_SEQUENCE_NODE).
sequence struct {
items_data []yaml_node_item_t // The stack of sequence items.
style yaml_sequence_style_t // The sequence style.
}
// The mapping parameters (for yaml_MAPPING_NODE).
mapping struct {
pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
pairs_start *yaml_node_pair_t // The beginning of the stack.
pairs_end *yaml_node_pair_t // The end of the stack.
pairs_top *yaml_node_pair_t // The top of the stack.
style yaml_mapping_style_t // The mapping style.
}
start_mark yaml_mark_t // The beginning of the node.
end_mark yaml_mark_t // The end of the node.
}
// The document structure.
type yaml_document_t struct {
// The document nodes.
nodes []yaml_node_t
// The version directive.
version_directive *yaml_version_directive_t
// The list of tag directives.
tag_directives_data []yaml_tag_directive_t
tag_directives_start int // The beginning of the tag directives list.
tag_directives_end int // The end of the tag directives list.
start_implicit int // Is the document start indicator implicit?
end_implicit int // Is the document end indicator implicit?
// The start/end of the document.
start_mark, end_mark yaml_mark_t
}
// The prototype of a read handler.
//
// The read handler is called when the parser needs to read more bytes from the
// source. The handler should write not more than size bytes to the buffer.
// The number of written bytes should be set to the size_read variable.
//
// [in,out] data A pointer to an application data specified by
// yaml_parser_set_input().
// [out] buffer The buffer to write the data from the source.
// [in] size The size of the buffer.
// [out] size_read The actual number of bytes read from the source.
//
// On success, the handler should return 1. If the handler failed,
// the returned value should be 0. On EOF, the handler should set the
// size_read to 0 and return 1.
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
// This structure holds information about a potential simple key.
type yaml_simple_key_t struct {
possible bool // Is a simple key possible?
required bool // Is a simple key required?
token_number int // The number of the token.
mark yaml_mark_t // The position mark.
}
// The states of the parser.
type yaml_parser_state_t int
const (
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
yaml_PARSE_END_STATE // Expect nothing.
)
func (ps yaml_parser_state_t) String() string {
switch ps {
case yaml_PARSE_STREAM_START_STATE:
return "yaml_PARSE_STREAM_START_STATE"
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_START_STATE:
return "yaml_PARSE_DOCUMENT_START_STATE"
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
case yaml_PARSE_DOCUMENT_END_STATE:
return "yaml_PARSE_DOCUMENT_END_STATE"
case yaml_PARSE_BLOCK_NODE_STATE:
return "yaml_PARSE_BLOCK_NODE_STATE"
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
case yaml_PARSE_FLOW_NODE_STATE:
return "yaml_PARSE_FLOW_NODE_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
case yaml_PARSE_END_STATE:
return "yaml_PARSE_END_STATE"
}
return "<unknown parser state>"
}
// This structure holds aliases data.
type yaml_alias_data_t struct {
anchor []byte // The anchor.
index int // The node id.
mark yaml_mark_t // The anchor mark.
}
// The parser structure.
//
// All members are internal. Manage the structure using the
// yaml_parser_ family of functions.
type yaml_parser_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// The byte about which the problem occurred.
problem_offset int
problem_value int
problem_mark yaml_mark_t
// The error context.
context string
context_mark yaml_mark_t
// Reader stuff
read_handler yaml_read_handler_t // Read handler.
input_reader io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
unread int // The number of unread characters in the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The input encoding.
offset int // The offset of the current position (in bytes).
mark yaml_mark_t // The mark of the current position.
// Scanner stuff
stream_start_produced bool // Have we started to scan the input stream?
stream_end_produced bool // Have we reached the end of the input stream?
flow_level int // The number of unclosed '[' and '{' indicators.
tokens []yaml_token_t // The tokens queue.
tokens_head int // The head of the tokens queue.
tokens_parsed int // The number of tokens fetched from the queue.
token_available bool // Does the tokens queue contain a token ready for dequeueing.
indent int // The current indentation level.
indents []int // The indentation levels stack.
simple_key_allowed bool // May a simple key occur at the current position?
simple_keys []yaml_simple_key_t // The stack of simple keys.
simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
// Parser stuff
state yaml_parser_state_t // The current parser state.
states []yaml_parser_state_t // The parser states stack.
marks []yaml_mark_t // The stack of marks.
tag_directives []yaml_tag_directive_t // The list of TAG directives.
// Dumper stuff
aliases []yaml_alias_data_t // The alias data.
document *yaml_document_t // The currently parsed document.
}
// Emitter Definitions
// The prototype of a write handler.
//
// The write handler is called when the emitter needs to flush the accumulated
// characters to the output. The handler should write @a size bytes of the
// @a buffer to the output.
//
// @param[in,out] data A pointer to an application data specified by
// yaml_emitter_set_output().
// @param[in] buffer The buffer with bytes to be written.
// @param[in] size The size of the buffer.
//
// @returns On success, the handler should return @c 1. If the handler failed,
// the returned value should be @c 0.
//
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
type yaml_emitter_state_t int
// The emitter states.
const (
// Expect STREAM-START.
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
yaml_EMIT_END_STATE // Expect nothing.
)
// The emitter structure.
//
// All members are internal. Manage the structure using the @c yaml_emitter_
// family of functions.
type yaml_emitter_t struct {
// Error handling
error yaml_error_type_t // Error type.
problem string // Error description.
// Writer stuff
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_writer io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.
raw_buffer []byte // The raw buffer.
raw_buffer_pos int // The current position of the buffer.
encoding yaml_encoding_t // The stream encoding.
// Emitter stuff
canonical bool // If the output is in the canonical style?
best_indent int // The number of indentation spaces.
best_width int // The preferred width of the output lines.
unicode bool // Allow unescaped non-ASCII characters?
line_break yaml_break_t // The preferred line break.
state yaml_emitter_state_t // The current emitter state.
states []yaml_emitter_state_t // The stack of states.
events []yaml_event_t // The event queue.
events_head int // The head of the event queue.
indents []int // The stack of indentation levels.
tag_directives []yaml_tag_directive_t // The list of tag directives.
indent int // The current indentation level.
flow_level int // The current flow level.
root_context bool // Is it the document root context?
sequence_context bool // Is it a sequence context?
mapping_context bool // Is it a mapping context?
simple_key_context bool // Is it a simple mapping key context?
line int // The current line.
column int // The current column.
whitespace bool // If the last character was a whitespace?
indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
open_ended bool // If an explicit document end is required?
// Anchor analysis.
anchor_data struct {
anchor []byte // The anchor value.
alias bool // Is it an alias?
}
// Tag analysis.
tag_data struct {
handle []byte // The tag handle.
suffix []byte // The tag suffix.
}
// Scalar analysis.
scalar_data struct {
value []byte // The scalar value.
multiline bool // Does the scalar contain line breaks?
flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
block_plain_allowed bool // Can the scalar be expressed in the block plain style?
single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
block_allowed bool // Can the scalar be expressed in the literal or folded styles?
style yaml_scalar_style_t // The output style.
}
// Dumper stuff
opened bool // If the stream was already opened?
closed bool // If the stream was already closed?
// The information associated with the document nodes.
anchors *struct {
references int // The number of references.
anchor int // The anchor id.
serialized bool // If the node has been emitted?
}
last_anchor_id int // The last assigned anchor id.
document *yaml_document_t // The currently emitted document.
}

173
vendor/go.yaml.in/yaml/v2/yamlprivateh.go generated vendored Normal file
View File

@ -0,0 +1,173 @@
package yaml
const (
// The size of the input raw buffer.
input_raw_buffer_size = 512
// The size of the input buffer.
// It should be possible to decode the whole raw buffer.
input_buffer_size = input_raw_buffer_size * 3
// The size of the output buffer.
output_buffer_size = 128
// The size of the output raw buffer.
// It should be possible to encode the whole output buffer.
output_raw_buffer_size = (output_buffer_size*2 + 2)
// The size of other stacks and queues.
initial_stack_size = 16
initial_queue_size = 16
initial_string_size = 16
)
// Check if the character at the specified position is an alphabetical
// character, a digit, '_', or '-'.
func is_alpha(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
}
// Check if the character at the specified position is a digit.
func is_digit(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9'
}
// Get the value of a digit.
func as_digit(b []byte, i int) int {
return int(b[i]) - '0'
}
// Check if the character at the specified position is a hex-digit.
func is_hex(b []byte, i int) bool {
return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
}
// Get the value of a hex-digit.
func as_hex(b []byte, i int) int {
bi := b[i]
if bi >= 'A' && bi <= 'F' {
return int(bi) - 'A' + 10
}
if bi >= 'a' && bi <= 'f' {
return int(bi) - 'a' + 10
}
return int(bi) - '0'
}
// Check if the character is ASCII.
func is_ascii(b []byte, i int) bool {
return b[i] <= 0x7F
}
// Check if the character at the start of the buffer can be printed unescaped.
func is_printable(b []byte, i int) bool {
return ((b[i] == 0x0A) || // . == #x0A
(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
// Check if the character at the specified position is NUL.
func is_z(b []byte, i int) bool {
return b[i] == 0x00
}
// Check if the beginning of the buffer is a BOM.
func is_bom(b []byte, i int) bool {
return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
}
// Check if the character at the specified position is space.
func is_space(b []byte, i int) bool {
return b[i] == ' '
}
// Check if the character at the specified position is tab.
func is_tab(b []byte, i int) bool {
return b[i] == '\t'
}
// Check if the character at the specified position is blank (space or tab).
func is_blank(b []byte, i int) bool {
//return is_space(b, i) || is_tab(b, i)
return b[i] == ' ' || b[i] == '\t'
}
// Check if the character at the specified position is a line break.
func is_break(b []byte, i int) bool {
return (b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
}
func is_crlf(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// Check if the character is a line break or NUL.
func is_breakz(b []byte, i int) bool {
//return is_break(b, i) || is_z(b, i)
return ( // is_break:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
// is_z:
b[i] == 0)
}
// Check if the character is a line break, space, or NUL.
func is_spacez(b []byte, i int) bool {
//return is_space(b, i) || is_breakz(b, i)
return ( // is_space:
b[i] == ' ' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Check if the character is a line break, space, tab, or NUL.
func is_blankz(b []byte, i int) bool {
//return is_blank(b, i) || is_breakz(b, i)
return ( // is_blank:
b[i] == ' ' || b[i] == '\t' ||
// is_breakz:
b[i] == '\r' || // CR (#xD)
b[i] == '\n' || // LF (#xA)
b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
b[i] == 0)
}
// Determine the width of the character.
func width(b byte) int {
// Don't replace these by a switch without first
// confirming that it is being inlined.
if b&0x80 == 0x00 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}

View File

@ -0,0 +1,147 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"crypto/tls"
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
)
var (
// ciphers maps strings into tls package cipher constants in
// https://golang.org/pkg/crypto/tls/#pkg-constants
ciphers = map[string]uint16{}
insecureCiphers = map[string]uint16{}
)
func init() {
for _, suite := range tls.CipherSuites() {
ciphers[suite.Name] = suite.ID
}
// keep legacy names for backward compatibility
ciphers["TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
ciphers["TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305"] = tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
for _, suite := range tls.InsecureCipherSuites() {
insecureCiphers[suite.Name] = suite.ID
}
}
// InsecureTLSCiphers returns the cipher suites implemented by crypto/tls which have
// security issues.
func InsecureTLSCiphers() map[string]uint16 {
cipherKeys := make(map[string]uint16, len(insecureCiphers))
for k, v := range insecureCiphers {
cipherKeys[k] = v
}
return cipherKeys
}
// InsecureTLSCipherNames returns a list of cipher suite names implemented by crypto/tls
// which have security issues.
func InsecureTLSCipherNames() []string {
cipherKeys := sets.NewString()
for key := range insecureCiphers {
cipherKeys.Insert(key)
}
return cipherKeys.List()
}
// PreferredTLSCipherNames returns a list of cipher suite names implemented by crypto/tls.
func PreferredTLSCipherNames() []string {
cipherKeys := sets.NewString()
for key := range ciphers {
cipherKeys.Insert(key)
}
return cipherKeys.List()
}
func allCiphers() map[string]uint16 {
acceptedCiphers := make(map[string]uint16, len(ciphers)+len(insecureCiphers))
for k, v := range ciphers {
acceptedCiphers[k] = v
}
for k, v := range insecureCiphers {
acceptedCiphers[k] = v
}
return acceptedCiphers
}
// TLSCipherPossibleValues returns all acceptable cipher suite names.
// This is a combination of both InsecureTLSCipherNames() and PreferredTLSCipherNames().
func TLSCipherPossibleValues() []string {
cipherKeys := sets.NewString()
acceptedCiphers := allCiphers()
for key := range acceptedCiphers {
cipherKeys.Insert(key)
}
return cipherKeys.List()
}
// TLSCipherSuites returns a list of cipher suite IDs from the cipher suite names passed.
func TLSCipherSuites(cipherNames []string) ([]uint16, error) {
if len(cipherNames) == 0 {
return nil, nil
}
ciphersIntSlice := make([]uint16, 0)
possibleCiphers := allCiphers()
for _, cipher := range cipherNames {
intValue, ok := possibleCiphers[cipher]
if !ok {
return nil, fmt.Errorf("Cipher suite %s not supported or doesn't exist", cipher)
}
ciphersIntSlice = append(ciphersIntSlice, intValue)
}
return ciphersIntSlice, nil
}
var versions = map[string]uint16{
"VersionTLS10": tls.VersionTLS10,
"VersionTLS11": tls.VersionTLS11,
"VersionTLS12": tls.VersionTLS12,
"VersionTLS13": tls.VersionTLS13,
}
// TLSPossibleVersions returns all acceptable values for TLS Version.
func TLSPossibleVersions() []string {
versionsKeys := sets.NewString()
for key := range versions {
versionsKeys.Insert(key)
}
return versionsKeys.List()
}
// TLSVersion returns the TLS Version ID for the version name passed.
func TLSVersion(versionName string) (uint16, error) {
if len(versionName) == 0 {
return DefaultTLSVersion(), nil
}
if version, ok := versions[versionName]; ok {
return version, nil
}
return 0, fmt.Errorf("unknown tls version %q", versionName)
}
// DefaultTLSVersion defines the default TLS Version.
func DefaultTLSVersion() uint16 {
// Can't use SSLv3 because of POODLE and BEAST
// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
// Can't use TLSv1.1 because of RC4 cipher usage
return tls.VersionTLS12
}

View File

@ -0,0 +1,114 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"fmt"
"sort"
"strings"
)
// ColonSeparatedMultimapStringString supports setting a map[string][]string from an encoding
// that separates keys from values with ':' and separates key-value pairs with ','.
// A key can be repeated multiple times, in which case the values are appended to a
// slice of strings associated with that key. Items in the list associated with a given
// key will appear in the order provided.
// For example: `a:hello,b:again,c:world,b:beautiful` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}`
// The first call to Set will clear the map before adding entries; subsequent calls will simply append to the map.
// This makes it possible to override default values with a command-line option rather than appending to defaults,
// while still allowing the distribution of key-value pairs across multiple flag invocations.
// For example: `--flag "a:hello" --flag "b:again" --flag "b:beautiful" --flag "c:world"` results in `{"a": ["hello"], "b": ["again", "beautiful"], "c": ["world"]}`
type ColonSeparatedMultimapStringString struct {
Multimap *map[string][]string
initialized bool // set to true after the first Set call
allowDefaultEmptyKey bool
}
// NewColonSeparatedMultimapStringString takes a pointer to a map[string][]string and returns the
// ColonSeparatedMultimapStringString flag parsing shim for that map.
func NewColonSeparatedMultimapStringString(m *map[string][]string) *ColonSeparatedMultimapStringString {
return &ColonSeparatedMultimapStringString{Multimap: m}
}
// NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey takes a pointer to a map[string][]string and returns the
// ColonSeparatedMultimapStringString flag parsing shim for that map. It allows default empty key with no colon in the flag.
func NewColonSeparatedMultimapStringStringAllowDefaultEmptyKey(m *map[string][]string) *ColonSeparatedMultimapStringString {
return &ColonSeparatedMultimapStringString{Multimap: m, allowDefaultEmptyKey: true}
}
// Set implements github.com/spf13/pflag.Value
func (m *ColonSeparatedMultimapStringString) Set(value string) error {
if m.Multimap == nil {
return fmt.Errorf("no target (nil pointer to map[string][]string)")
}
if !m.initialized || *m.Multimap == nil {
// clear default values, or allocate if no existing map
*m.Multimap = make(map[string][]string)
m.initialized = true
}
for _, pair := range strings.Split(value, ",") {
if len(pair) == 0 {
continue
}
kv := strings.SplitN(pair, ":", 2)
var k, v string
if m.allowDefaultEmptyKey && len(kv) == 1 {
v = strings.TrimSpace(kv[0])
} else {
if len(kv) != 2 {
return fmt.Errorf("malformed pair, expect string:string")
}
k = strings.TrimSpace(kv[0])
v = strings.TrimSpace(kv[1])
}
(*m.Multimap)[k] = append((*m.Multimap)[k], v)
}
return nil
}
// String implements github.com/spf13/pflag.Value
func (m *ColonSeparatedMultimapStringString) String() string {
type kv struct {
k string
v string
}
kvs := make([]kv, 0, len(*m.Multimap))
for k, vs := range *m.Multimap {
for i := range vs {
kvs = append(kvs, kv{k: k, v: vs[i]})
}
}
// stable sort by keys, order of values should be preserved
sort.SliceStable(kvs, func(i, j int) bool {
return kvs[i].k < kvs[j].k
})
pairs := make([]string, 0, len(kvs))
for i := range kvs {
pairs = append(pairs, fmt.Sprintf("%s:%s", kvs[i].k, kvs[i].v))
}
return strings.Join(pairs, ",")
}
// Type implements github.com/spf13/pflag.Value
func (m *ColonSeparatedMultimapStringString) Type() string {
return "colonSeparatedMultimapStringString"
}
// Empty implements OmitEmpty
func (m *ColonSeparatedMultimapStringString) Empty() bool {
return len(*m.Multimap) == 0
}

View File

@ -0,0 +1,53 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"fmt"
"sort"
"strings"
)
type ConfigurationMap map[string]string
func (m *ConfigurationMap) String() string {
pairs := []string{}
for k, v := range *m {
pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
func (m *ConfigurationMap) Set(value string) error {
for _, s := range strings.Split(value, ",") {
if len(s) == 0 {
continue
}
arr := strings.SplitN(s, "=", 2)
if len(arr) == 2 {
(*m)[strings.TrimSpace(arr[0])] = strings.TrimSpace(arr[1])
} else {
(*m)[strings.TrimSpace(arr[0])] = ""
}
}
return nil
}
func (*ConfigurationMap) Type() string {
return "mapStringString"
}

66
vendor/k8s.io/component-base/cli/flag/flags.go generated vendored Normal file
View File

@ -0,0 +1,66 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
goflag "flag"
"strings"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
)
var underscoreWarnings = make(map[string]bool)
// WordSepNormalizeFunc changes all flags that contain "_" separators
func WordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
if strings.Contains(name, "_") {
return pflag.NormalizedName(strings.Replace(name, "_", "-", -1))
}
return pflag.NormalizedName(name)
}
// WarnWordSepNormalizeFunc changes and warns for flags that contain "_" separators
func WarnWordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
if strings.Contains(name, "_") {
nname := strings.Replace(name, "_", "-", -1)
if _, alreadyWarned := underscoreWarnings[name]; !alreadyWarned {
klog.Warningf("using an underscore in a flag name is not supported. %s has been converted to %s.", name, nname)
underscoreWarnings[name] = true
}
return pflag.NormalizedName(nname)
}
return pflag.NormalizedName(name)
}
// InitFlags normalizes, parses, then logs the command line flags
func InitFlags() {
pflag.CommandLine.SetNormalizeFunc(WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
pflag.Parse()
pflag.VisitAll(func(flag *pflag.Flag) {
klog.V(2).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
})
}
// PrintFlags logs the flags in the flagset
func PrintFlags(flags *pflag.FlagSet) {
flags.VisitAll(func(flag *pflag.Flag) {
klog.V(1).Infof("FLAG: --%s=%q", flag.Name, flag.Value)
})
}

View File

@ -0,0 +1,82 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"fmt"
"sort"
"strings"
)
// LangleSeparatedMapStringString can be set from the command line with the format `--flag "string<string"`.
// Multiple comma-separated key-value pairs in a single invocation are supported. For example: `--flag "a<foo,b<bar"`.
// Multiple flag invocations are supported. For example: `--flag "a<foo" --flag "b<foo"`.
type LangleSeparatedMapStringString struct {
Map *map[string]string
initialized bool // set to true after first Set call
}
// NewLangleSeparatedMapStringString takes a pointer to a map[string]string and returns the
// LangleSeparatedMapStringString flag parsing shim for that map
func NewLangleSeparatedMapStringString(m *map[string]string) *LangleSeparatedMapStringString {
return &LangleSeparatedMapStringString{Map: m}
}
// String implements github.com/spf13/pflag.Value
func (m *LangleSeparatedMapStringString) String() string {
pairs := []string{}
for k, v := range *m.Map {
pairs = append(pairs, fmt.Sprintf("%s<%s", k, v))
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
// Set implements github.com/spf13/pflag.Value
func (m *LangleSeparatedMapStringString) Set(value string) error {
if m.Map == nil {
return fmt.Errorf("no target (nil pointer to map[string]string)")
}
if !m.initialized || *m.Map == nil {
// clear default values, or allocate if no existing map
*m.Map = make(map[string]string)
m.initialized = true
}
for _, s := range strings.Split(value, ",") {
if len(s) == 0 {
continue
}
arr := strings.SplitN(s, "<", 2)
if len(arr) != 2 {
return fmt.Errorf("malformed pair, expect string<string")
}
k := strings.TrimSpace(arr[0])
v := strings.TrimSpace(arr[1])
(*m.Map)[k] = v
}
return nil
}
// Type implements github.com/spf13/pflag.Value
func (*LangleSeparatedMapStringString) Type() string {
return "mapStringString"
}
// Empty implements OmitEmpty
func (m *LangleSeparatedMapStringString) Empty() bool {
return len(*m.Map) == 0
}

View File

@ -0,0 +1,90 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"fmt"
"sort"
"strconv"
"strings"
)
// MapStringBool can be set from the command line with the format `--flag "string=bool"`.
// Multiple comma-separated key-value pairs in a single invocation are supported. For example: `--flag "a=true,b=false"`.
// Multiple flag invocations are supported. For example: `--flag "a=true" --flag "b=false"`.
type MapStringBool struct {
Map *map[string]bool
initialized bool
}
// NewMapStringBool takes a pointer to a map[string]string and returns the
// MapStringBool flag parsing shim for that map
func NewMapStringBool(m *map[string]bool) *MapStringBool {
return &MapStringBool{Map: m}
}
// String implements github.com/spf13/pflag.Value
func (m *MapStringBool) String() string {
if m == nil || m.Map == nil {
return ""
}
pairs := []string{}
for k, v := range *m.Map {
pairs = append(pairs, fmt.Sprintf("%s=%t", k, v))
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
// Set implements github.com/spf13/pflag.Value
func (m *MapStringBool) Set(value string) error {
if m.Map == nil {
return fmt.Errorf("no target (nil pointer to map[string]bool)")
}
if !m.initialized || *m.Map == nil {
// clear default values, or allocate if no existing map
*m.Map = make(map[string]bool)
m.initialized = true
}
for _, s := range strings.Split(value, ",") {
if len(s) == 0 {
continue
}
arr := strings.SplitN(s, "=", 2)
if len(arr) != 2 {
return fmt.Errorf("malformed pair, expect string=bool")
}
k := strings.TrimSpace(arr[0])
v := strings.TrimSpace(arr[1])
boolValue, err := strconv.ParseBool(v)
if err != nil {
return fmt.Errorf("invalid value of %s: %s, err: %v", k, v, err)
}
(*m.Map)[k] = boolValue
}
return nil
}
// Type implements github.com/spf13/pflag.Value
func (*MapStringBool) Type() string {
return "mapStringBool"
}
// Empty implements OmitEmpty
func (m *MapStringBool) Empty() bool {
return len(*m.Map) == 0
}

View File

@ -0,0 +1,112 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"fmt"
"sort"
"strings"
)
// MapStringString can be set from the command line with the format `--flag "string=string"`.
// Multiple flag invocations are supported. For example: `--flag "a=foo" --flag "b=bar"`. If this is desired
// to be the only type invocation `NoSplit` should be set to true.
// Multiple comma-separated key-value pairs in a single invocation are supported if `NoSplit`
// is set to false. For example: `--flag "a=foo,b=bar"`.
type MapStringString struct {
Map *map[string]string
initialized bool
NoSplit bool
}
// NewMapStringString takes a pointer to a map[string]string and returns the
// MapStringString flag parsing shim for that map
func NewMapStringString(m *map[string]string) *MapStringString {
return &MapStringString{Map: m}
}
// NewMapStringString takes a pointer to a map[string]string and sets `NoSplit`
// value to `true` and returns the MapStringString flag parsing shim for that map
func NewMapStringStringNoSplit(m *map[string]string) *MapStringString {
return &MapStringString{
Map: m,
NoSplit: true,
}
}
// String implements github.com/spf13/pflag.Value
func (m *MapStringString) String() string {
if m == nil || m.Map == nil {
return ""
}
pairs := []string{}
for k, v := range *m.Map {
pairs = append(pairs, fmt.Sprintf("%s=%s", k, v))
}
sort.Strings(pairs)
return strings.Join(pairs, ",")
}
// Set implements github.com/spf13/pflag.Value
func (m *MapStringString) Set(value string) error {
if m.Map == nil {
return fmt.Errorf("no target (nil pointer to map[string]string)")
}
if !m.initialized || *m.Map == nil {
// clear default values, or allocate if no existing map
*m.Map = make(map[string]string)
m.initialized = true
}
// account for comma-separated key-value pairs in a single invocation
if !m.NoSplit {
for _, s := range strings.Split(value, ",") {
if len(s) == 0 {
continue
}
arr := strings.SplitN(s, "=", 2)
if len(arr) != 2 {
return fmt.Errorf("malformed pair, expect string=string")
}
k := strings.TrimSpace(arr[0])
v := strings.TrimSpace(arr[1])
(*m.Map)[k] = v
}
return nil
}
// account for only one key-value pair in a single invocation
arr := strings.SplitN(value, "=", 2)
if len(arr) != 2 {
return fmt.Errorf("malformed pair, expect string=string")
}
k := strings.TrimSpace(arr[0])
v := strings.TrimSpace(arr[1])
(*m.Map)[k] = v
return nil
}
// Type implements github.com/spf13/pflag.Value
func (*MapStringString) Type() string {
return "mapStringString"
}
// Empty implements OmitEmpty
func (m *MapStringString) Empty() bool {
return len(*m.Map) == 0
}

View File

@ -0,0 +1,113 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"errors"
"flag"
"strings"
)
// NamedCertKey is a flag value parsing "certfile,keyfile" and "certfile,keyfile:name,name,name".
type NamedCertKey struct {
Names []string
CertFile, KeyFile string
}
var _ flag.Value = &NamedCertKey{}
func (nkc *NamedCertKey) String() string {
s := nkc.CertFile + "," + nkc.KeyFile
if len(nkc.Names) > 0 {
s = s + ":" + strings.Join(nkc.Names, ",")
}
return s
}
func (nkc *NamedCertKey) Set(value string) error {
cs := strings.SplitN(value, ":", 2)
var keycert string
if len(cs) == 2 {
var names string
keycert, names = strings.TrimSpace(cs[0]), strings.TrimSpace(cs[1])
if names == "" {
return errors.New("empty names list is not allowed")
}
nkc.Names = nil
for _, name := range strings.Split(names, ",") {
nkc.Names = append(nkc.Names, strings.TrimSpace(name))
}
} else {
nkc.Names = nil
keycert = strings.TrimSpace(cs[0])
}
cs = strings.Split(keycert, ",")
if len(cs) != 2 {
return errors.New("expected comma separated certificate and key file paths")
}
nkc.CertFile = strings.TrimSpace(cs[0])
nkc.KeyFile = strings.TrimSpace(cs[1])
return nil
}
func (*NamedCertKey) Type() string {
return "namedCertKey"
}
// NamedCertKeyArray is a flag value parsing NamedCertKeys, each passed with its own
// flag instance (in contrast to comma separated slices).
type NamedCertKeyArray struct {
value *[]NamedCertKey
changed bool
}
var _ flag.Value = &NamedCertKeyArray{}
// NewNamedKeyCertArray creates a new NamedCertKeyArray with the internal value
// pointing to p.
func NewNamedCertKeyArray(p *[]NamedCertKey) *NamedCertKeyArray {
return &NamedCertKeyArray{
value: p,
}
}
func (a *NamedCertKeyArray) Set(val string) error {
nkc := NamedCertKey{}
err := nkc.Set(val)
if err != nil {
return err
}
if !a.changed {
*a.value = []NamedCertKey{nkc}
a.changed = true
} else {
*a.value = append(*a.value, nkc)
}
return nil
}
func (a *NamedCertKeyArray) Type() string {
return "namedCertKey"
}
func (a *NamedCertKeyArray) String() string {
nkcs := make([]string, 0, len(*a.value))
for i := range *a.value {
nkcs = append(nkcs, (*a.value)[i].String())
}
return "[" + strings.Join(nkcs, ";") + "]"
}

41
vendor/k8s.io/component-base/cli/flag/noop.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
goflag "flag"
"github.com/spf13/pflag"
)
// NoOp implements goflag.Value and plfag.Value,
// but has a noop Set implementation
type NoOp struct{}
var _ goflag.Value = NoOp{}
var _ pflag.Value = NoOp{}
func (NoOp) String() string {
return ""
}
func (NoOp) Set(val string) error {
return nil
}
func (NoOp) Type() string {
return "NoOp"
}

24
vendor/k8s.io/component-base/cli/flag/omitempty.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
// OmitEmpty is an interface for flags to report whether their underlying value
// is "empty." If a flag implements OmitEmpty and returns true for a call to Empty(),
// it is assumed that flag may be omitted from the command line.
type OmitEmpty interface {
Empty() bool
}

105
vendor/k8s.io/component-base/cli/flag/sectioned.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"bytes"
"fmt"
"io"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
const (
usageFmt = "Usage:\n %s\n"
)
// NamedFlagSets stores named flag sets in the order of calling FlagSet.
type NamedFlagSets struct {
// Order is an ordered list of flag set names.
Order []string
// FlagSets stores the flag sets by name.
FlagSets map[string]*pflag.FlagSet
// NormalizeNameFunc is the normalize function which used to initialize FlagSets created by NamedFlagSets.
NormalizeNameFunc func(f *pflag.FlagSet, name string) pflag.NormalizedName
}
// FlagSet returns the flag set with the given name and adds it to the
// ordered name list if it is not in there yet.
func (nfs *NamedFlagSets) FlagSet(name string) *pflag.FlagSet {
if nfs.FlagSets == nil {
nfs.FlagSets = map[string]*pflag.FlagSet{}
}
if _, ok := nfs.FlagSets[name]; !ok {
flagSet := pflag.NewFlagSet(name, pflag.ExitOnError)
flagSet.SetNormalizeFunc(pflag.CommandLine.GetNormalizeFunc())
if nfs.NormalizeNameFunc != nil {
flagSet.SetNormalizeFunc(nfs.NormalizeNameFunc)
}
nfs.FlagSets[name] = flagSet
nfs.Order = append(nfs.Order, name)
}
return nfs.FlagSets[name]
}
// PrintSections prints the given names flag sets in sections, with the maximal given column number.
// If cols is zero, lines are not wrapped.
func PrintSections(w io.Writer, fss NamedFlagSets, cols int) {
for _, name := range fss.Order {
fs := fss.FlagSets[name]
if !fs.HasFlags() {
continue
}
wideFS := pflag.NewFlagSet("", pflag.ExitOnError)
wideFS.AddFlagSet(fs)
var zzz string
if cols > 24 {
zzz = strings.Repeat("z", cols-24)
wideFS.Int(zzz, 0, strings.Repeat("z", cols-24))
}
var buf bytes.Buffer
fmt.Fprintf(&buf, "\n%s flags:\n\n%s", strings.ToUpper(name[:1])+name[1:], wideFS.FlagUsagesWrapped(cols))
if cols > 24 {
i := strings.Index(buf.String(), zzz)
lines := strings.Split(buf.String()[:i], "\n")
fmt.Fprint(w, strings.Join(lines[:len(lines)-1], "\n"))
fmt.Fprintln(w)
} else {
fmt.Fprint(w, buf.String())
}
}
}
// SetUsageAndHelpFunc set both usage and help function.
// Print the flag sets we need instead of all of them.
func SetUsageAndHelpFunc(cmd *cobra.Command, fss NamedFlagSets, cols int) {
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine())
PrintSections(cmd.OutOrStderr(), fss, cols)
return nil
})
cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine())
PrintSections(cmd.OutOrStdout(), fss, cols)
})
}

56
vendor/k8s.io/component-base/cli/flag/string_flag.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
// StringFlag is a string flag compatible with flags and pflags that keeps track of whether it had a value supplied or not.
type StringFlag struct {
// If Set has been invoked this value is true
provided bool
// The exact value provided on the flag
value string
}
func NewStringFlag(defaultVal string) StringFlag {
return StringFlag{value: defaultVal}
}
func (f *StringFlag) Default(value string) {
f.value = value
}
func (f StringFlag) String() string {
return f.value
}
func (f StringFlag) Value() string {
return f.value
}
func (f *StringFlag) Set(value string) error {
f.value = value
f.provided = true
return nil
}
func (f StringFlag) Provided() bool {
return f.provided
}
func (f *StringFlag) Type() string {
return "string"
}

View File

@ -0,0 +1,62 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
goflag "flag"
"fmt"
"strings"
"github.com/spf13/pflag"
)
// StringSlice implements goflag.Value and plfag.Value,
// and allows set to be invoked repeatedly to accumulate values.
type StringSlice struct {
value *[]string
changed bool
}
func NewStringSlice(s *[]string) *StringSlice {
return &StringSlice{value: s}
}
var _ goflag.Value = &StringSlice{}
var _ pflag.Value = &StringSlice{}
func (s *StringSlice) String() string {
if s == nil || s.value == nil {
return ""
}
return strings.Join(*s.value, " ")
}
func (s *StringSlice) Set(val string) error {
if s.value == nil {
return fmt.Errorf("no target (nil pointer to []string)")
}
if !s.changed {
*s.value = make([]string, 0)
}
*s.value = append(*s.value, val)
s.changed = true
return nil
}
func (StringSlice) Type() string {
return "sliceString"
}

83
vendor/k8s.io/component-base/cli/flag/tristate.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"fmt"
"strconv"
)
// Tristate is a flag compatible with flags and pflags that
// keeps track of whether it had a value supplied or not.
type Tristate int
const (
Unset Tristate = iota // 0
True
False
)
func (f *Tristate) Default(value bool) {
*f = triFromBool(value)
}
func (f Tristate) String() string {
b := boolFromTri(f)
return fmt.Sprintf("%t", b)
}
func (f Tristate) Value() bool {
b := boolFromTri(f)
return b
}
func (f *Tristate) Set(value string) error {
boolVal, err := strconv.ParseBool(value)
if err != nil {
return err
}
*f = triFromBool(boolVal)
return nil
}
func (f Tristate) Provided() bool {
if f != Unset {
return true
}
return false
}
func (f *Tristate) Type() string {
return "tristate"
}
func boolFromTri(t Tristate) bool {
if t == True {
return true
} else {
return false
}
}
func triFromBool(b bool) Tristate {
if b {
return True
} else {
return False
}
}

32
vendor/k8s.io/component-base/logs/api/v1/doc.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// Package v1 contains the configuration API for logging.
//
// The intention is to only have a single version of this API, potentially with
// new fields added over time in a backwards-compatible manner. Fields for
// alpha or beta features are allowed as long as they are defined so that not
// changing the defaults leaves those features disabled.
//
// The "v1" package name is just a reminder that API compatibility rules apply,
// not an indication of the stability of all features covered by it.
// The LoggingAlphaOptions and LoggingBetaOptions feature gates control whether
// these unstable features can get enabled. This can be used to ensure that
// command invocations do not accidentally rely on unstable features.
package v1 // import "k8s.io/component-base/logs/api/v1"

View File

@ -0,0 +1,72 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"k8s.io/component-base/featuregate"
)
const (
// owner: @pohly
// kep: https://kep.k8s.io/3077
// alpha: v1.24
// beta: v1.30
//
// Enables looking up a logger from a context.Context instead of using
// the global fallback logger and manipulating the logger that is
// used by a call chain.
ContextualLogging featuregate.Feature = "ContextualLogging"
// contextualLoggingDefault is now true because the feature reached beta
// and performance comparisons showed no relevant degradation when
// enabling it.
contextualLoggingDefault = true
// Allow fine-tuning of experimental, alpha-quality logging options.
//
// Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// we want to avoid a proliferation of feature gates. This feature gate:
// - will guard *a group* of logging options whose quality level is alpha.
// - will never graduate to beta or stable.
LoggingAlphaOptions featuregate.Feature = "LoggingAlphaOptions"
// Allow fine-tuning of experimental, beta-quality logging options.
//
// Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ
// we want to avoid a proliferation of feature gates. This feature gate:
// - will guard *a group* of logging options whose quality level is beta.
// - is thus *introduced* as beta
// - will never graduate to stable.
LoggingBetaOptions featuregate.Feature = "LoggingBetaOptions"
// Stable logging options. Always enabled.
LoggingStableOptions featuregate.Feature = "LoggingStableOptions"
)
func featureGates() map[featuregate.Feature]featuregate.FeatureSpec {
return map[featuregate.Feature]featuregate.FeatureSpec{
ContextualLogging: {Default: contextualLoggingDefault, PreRelease: featuregate.Beta},
LoggingAlphaOptions: {Default: false, PreRelease: featuregate.Alpha},
LoggingBetaOptions: {Default: true, PreRelease: featuregate.Beta},
}
}
// AddFeatureGates adds all feature gates used by this package.
func AddFeatureGates(mutableFeatureGate featuregate.MutableFeatureGate) error {
return mutableFeatureGate.Add(featureGates())
}

449
vendor/k8s.io/component-base/logs/api/v1/options.go generated vendored Normal file
View File

@ -0,0 +1,449 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"errors"
"flag"
"fmt"
"io"
"math"
"os"
"strings"
"sync/atomic"
"time"
"github.com/google/go-cmp/cmp"
"github.com/spf13/pflag"
"k8s.io/klog/v2"
"k8s.io/klog/v2/textlogger"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/validation/field"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/featuregate"
"k8s.io/component-base/logs/internal/setverbositylevel"
"k8s.io/component-base/logs/klogflags"
)
const (
// LogFlushFreqDefault is the default for the corresponding command line
// parameter.
LogFlushFreqDefault = 5 * time.Second
)
const (
// LogFlushFreqFlagName is the name of the command line parameter.
// Depending on how flags get added, it is either a stand-alone
// value (logs.AddFlags) or part of LoggingConfiguration.
LogFlushFreqFlagName = "log-flush-frequency"
)
// NewLoggingConfiguration returns a struct holding the default logging configuration.
func NewLoggingConfiguration() *LoggingConfiguration {
c := LoggingConfiguration{}
SetRecommendedLoggingConfiguration(&c)
return &c
}
// Applying configurations multiple times is not safe unless it's guaranteed that there
// are no goroutines which might call logging functions. The default for ValidateAndApply
// and ValidateAndApplyWithOptions is to return an error when called more than once.
// Binaries and unit tests can override that behavior.
var ReapplyHandling = ReapplyHandlingError
type ReapplyHandlingType int
const (
// ReapplyHandlingError is the default: calling ValidateAndApply or
// ValidateAndApplyWithOptions again returns an error.
ReapplyHandlingError ReapplyHandlingType = iota
// ReapplyHandlingIgnoreUnchanged silently ignores any additional calls of
// ValidateAndApply or ValidateAndApplyWithOptions if the configuration
// is unchanged, otherwise they return an error.
ReapplyHandlingIgnoreUnchanged
)
// ValidateAndApply combines validation and application of the logging configuration.
// This should be invoked as early as possible because then the rest of the program
// startup (including validation of other options) will already run with the final
// logging configuration.
//
// The optional FeatureGate controls logging features. If nil, the default for
// these features is used.
//
// Logging options must be applied as early as possible during the program
// startup. Some changes are global and cannot be done safely when there are
// already goroutines running.
func ValidateAndApply(c *LoggingConfiguration, featureGate featuregate.FeatureGate) error {
return validateAndApply(c, nil, featureGate, nil)
}
// ValidateAndApplyWithOptions is a variant of ValidateAndApply which accepts
// additional options beyond those that can be configured through the API. This
// is meant for testing.
//
// Logging options must be applied as early as possible during the program
// startup. Some changes are global and cannot be done safely when there are
// already goroutines running.
func ValidateAndApplyWithOptions(c *LoggingConfiguration, options *LoggingOptions, featureGate featuregate.FeatureGate) error {
return validateAndApply(c, options, featureGate, nil)
}
// +k8s:deepcopy-gen=false
// LoggingOptions can be used with ValidateAndApplyWithOptions to override
// certain global defaults.
type LoggingOptions struct {
// ErrorStream can be used to override the os.Stderr default.
ErrorStream io.Writer
// InfoStream can be used to override the os.Stdout default.
InfoStream io.Writer
}
// ValidateAndApplyAsField is a variant of ValidateAndApply that should be used
// when the LoggingConfiguration is embedded in some larger configuration
// structure.
func ValidateAndApplyAsField(c *LoggingConfiguration, featureGate featuregate.FeatureGate, fldPath *field.Path) error {
return validateAndApply(c, nil, featureGate, fldPath)
}
func validateAndApply(c *LoggingConfiguration, options *LoggingOptions, featureGate featuregate.FeatureGate, fldPath *field.Path) error {
errs := Validate(c, featureGate, fldPath)
if len(errs) > 0 {
return errs.ToAggregate()
}
return apply(c, options, featureGate)
}
// Validate can be used to check for invalid settings without applying them.
// Most binaries should validate and apply the logging configuration as soon
// as possible via ValidateAndApply. The field path is optional: nil
// can be passed when the struct is not embedded in some larger struct.
func Validate(c *LoggingConfiguration, featureGate featuregate.FeatureGate, fldPath *field.Path) field.ErrorList {
errs := field.ErrorList{}
if c.Format != DefaultLogFormat {
// WordSepNormalizeFunc is just a guess. Commands should use it,
// but we cannot know for sure.
allFlags := unsupportedLoggingFlags(cliflag.WordSepNormalizeFunc)
for _, f := range allFlags {
if f.DefValue != f.Value.String() {
errs = append(errs, field.Invalid(fldPath.Child("format"), c.Format, fmt.Sprintf("Non-default format doesn't honor flag: %s", f.Name)))
}
}
}
format, err := logRegistry.get(c.Format)
if err != nil {
errs = append(errs, field.Invalid(fldPath.Child("format"), c.Format, "Unsupported log format"))
} else if format != nil {
if format.feature != LoggingStableOptions {
enabled := featureGates()[format.feature].Default
if featureGate != nil {
enabled = featureGate.Enabled(format.feature)
}
if !enabled {
errs = append(errs, field.Forbidden(fldPath.Child("format"), fmt.Sprintf("Log format %s is disabled, see %s feature", c.Format, format.feature)))
}
}
}
// The type in our struct is uint32, but klog only accepts positive int32.
if c.Verbosity > math.MaxInt32 {
errs = append(errs, field.Invalid(fldPath.Child("verbosity"), c.Verbosity, fmt.Sprintf("Must be <= %d", math.MaxInt32)))
}
vmoduleFldPath := fldPath.Child("vmodule")
if len(c.VModule) > 0 && c.Format != "" && c.Format != "text" {
errs = append(errs, field.Forbidden(vmoduleFldPath, "Only supported for text log format"))
}
for i, item := range c.VModule {
if item.FilePattern == "" {
errs = append(errs, field.Required(vmoduleFldPath.Index(i), "File pattern must not be empty"))
}
if strings.ContainsAny(item.FilePattern, "=,") {
errs = append(errs, field.Invalid(vmoduleFldPath.Index(i), item.FilePattern, "File pattern must not contain equal sign or comma"))
}
if item.Verbosity > math.MaxInt32 {
errs = append(errs, field.Invalid(vmoduleFldPath.Index(i), item.Verbosity, fmt.Sprintf("Must be <= %d", math.MaxInt32)))
}
}
errs = append(errs, validateFormatOptions(c, featureGate, fldPath.Child("options"))...)
return errs
}
func validateFormatOptions(c *LoggingConfiguration, featureGate featuregate.FeatureGate, fldPath *field.Path) field.ErrorList {
errs := field.ErrorList{}
errs = append(errs, validateTextOptions(c, featureGate, fldPath.Child("text"))...)
errs = append(errs, validateJSONOptions(c, featureGate, fldPath.Child("json"))...)
return errs
}
func validateTextOptions(c *LoggingConfiguration, featureGate featuregate.FeatureGate, fldPath *field.Path) field.ErrorList {
errs := field.ErrorList{}
if gate := LoggingAlphaOptions; c.Options.Text.SplitStream && !featureEnabled(featureGate, gate) {
errs = append(errs, field.Forbidden(fldPath.Child("splitStream"), fmt.Sprintf("Feature %s is disabled", gate)))
}
if gate := LoggingAlphaOptions; c.Options.Text.InfoBufferSize.Value() != 0 && !featureEnabled(featureGate, gate) {
errs = append(errs, field.Forbidden(fldPath.Child("infoBufferSize"), fmt.Sprintf("Feature %s is disabled", gate)))
}
return errs
}
func validateJSONOptions(c *LoggingConfiguration, featureGate featuregate.FeatureGate, fldPath *field.Path) field.ErrorList {
errs := field.ErrorList{}
if gate := LoggingAlphaOptions; c.Options.JSON.SplitStream && !featureEnabled(featureGate, gate) {
errs = append(errs, field.Forbidden(fldPath.Child("splitStream"), fmt.Sprintf("Feature %s is disabled", gate)))
}
if gate := LoggingAlphaOptions; c.Options.JSON.InfoBufferSize.Value() != 0 && !featureEnabled(featureGate, gate) {
errs = append(errs, field.Forbidden(fldPath.Child("infoBufferSize"), fmt.Sprintf("Feature %s is disabled", gate)))
}
return errs
}
func featureEnabled(featureGate featuregate.FeatureGate, feature featuregate.Feature) bool {
enabled := false
if featureGate != nil {
enabled = featureGate.Enabled(feature)
}
return enabled
}
func apply(c *LoggingConfiguration, options *LoggingOptions, featureGate featuregate.FeatureGate) error {
p := &parameters{
C: c,
Options: options,
ContextualLoggingEnabled: contextualLoggingDefault,
}
if featureGate != nil {
p.ContextualLoggingEnabled = featureGate.Enabled(ContextualLogging)
}
oldP := applyParameters.Load()
if oldP != nil {
switch ReapplyHandling {
case ReapplyHandlingError:
return errors.New("logging configuration was already applied earlier, changing it is not allowed")
case ReapplyHandlingIgnoreUnchanged:
if diff := cmp.Diff(oldP, p); diff != "" {
return fmt.Errorf("the logging configuration should not be changed after setting it once (- old setting, + new setting):\n%s", diff)
}
return nil
default:
return fmt.Errorf("invalid value %d for ReapplyHandling", ReapplyHandling)
}
}
applyParameters.Store(p)
// if log format not exists, use nil loggr
format, _ := logRegistry.get(c.Format)
if format.factory == nil {
klog.ClearLogger()
} else {
if options == nil {
options = &LoggingOptions{
ErrorStream: os.Stderr,
InfoStream: os.Stdout,
}
}
log, control := format.factory.Create(*c, *options)
if control.SetVerbosityLevel != nil {
setverbositylevel.Mutex.Lock()
defer setverbositylevel.Mutex.Unlock()
setverbositylevel.Callbacks = append(setverbositylevel.Callbacks, control.SetVerbosityLevel)
}
opts := []klog.LoggerOption{
klog.ContextualLogger(p.ContextualLoggingEnabled),
klog.FlushLogger(control.Flush),
}
if writer, ok := log.GetSink().(textlogger.KlogBufferWriter); ok {
opts = append(opts, klog.WriteKlogBuffer(writer.WriteKlogBuffer))
}
klog.SetLoggerWithOptions(log, opts...)
}
if err := loggingFlags.Lookup("v").Value.Set(VerbosityLevelPflag(&c.Verbosity).String()); err != nil {
return fmt.Errorf("internal error while setting klog verbosity: %v", err)
}
if err := loggingFlags.Lookup("vmodule").Value.Set(VModuleConfigurationPflag(&c.VModule).String()); err != nil {
return fmt.Errorf("internal error while setting klog vmodule: %v", err)
}
setSlogDefaultLogger()
klog.StartFlushDaemon(c.FlushFrequency.Duration.Duration)
klog.EnableContextualLogging(p.ContextualLoggingEnabled)
return nil
}
type parameters struct {
C *LoggingConfiguration
Options *LoggingOptions
ContextualLoggingEnabled bool
}
var applyParameters atomic.Pointer[parameters]
// ResetForTest restores the default settings. This is not thread-safe and should only
// be used when there are no goroutines running. The intended users are unit
// tests in other packages.
func ResetForTest(featureGate featuregate.FeatureGate) error {
oldP := applyParameters.Load()
if oldP == nil {
// Nothing to do.
return nil
}
// This makes it possible to call apply again without triggering errors.
applyParameters.Store(nil)
// Restore defaults. Shouldn't fail, but check anyway.
config := NewLoggingConfiguration()
if err := ValidateAndApply(config, featureGate); err != nil {
return fmt.Errorf("apply default configuration: %v", err)
}
// And again...
applyParameters.Store(nil)
return nil
}
// AddFlags adds command line flags for the configuration.
func AddFlags(c *LoggingConfiguration, fs *pflag.FlagSet) {
addFlags(c, fs)
}
// AddGoFlags is a variant of AddFlags for a standard FlagSet.
func AddGoFlags(c *LoggingConfiguration, fs *flag.FlagSet) {
addFlags(c, goFlagSet{FlagSet: fs})
}
// flagSet is the interface implemented by pflag.FlagSet, with
// just those methods defined which are needed by addFlags.
type flagSet interface {
BoolVar(p *bool, name string, value bool, usage string)
DurationVar(p *time.Duration, name string, value time.Duration, usage string)
StringVar(p *string, name string, value string, usage string)
Var(value pflag.Value, name string, usage string)
VarP(value pflag.Value, name, shorthand, usage string)
}
// goFlagSet implements flagSet for a stdlib flag.FlagSet.
type goFlagSet struct {
*flag.FlagSet
}
func (fs goFlagSet) Var(value pflag.Value, name string, usage string) {
fs.FlagSet.Var(value, name, usage)
}
func (fs goFlagSet) VarP(value pflag.Value, name, shorthand, usage string) {
// Ignore shorthand, it's not needed and not supported.
fs.FlagSet.Var(value, name, usage)
}
// addFlags can be used with both flag.FlagSet and pflag.FlagSet. The internal
// interface definition avoids duplicating this code.
func addFlags(c *LoggingConfiguration, fs flagSet) {
formats := logRegistry.list()
fs.StringVar(&c.Format, "logging-format", c.Format, fmt.Sprintf("Sets the log format. Permitted formats: %s.", formats))
// No new log formats should be added after generation is of flag options
logRegistry.freeze()
fs.DurationVar(&c.FlushFrequency.Duration.Duration, LogFlushFreqFlagName, c.FlushFrequency.Duration.Duration, "Maximum number of seconds between log flushes")
fs.VarP(VerbosityLevelPflag(&c.Verbosity), "v", "v", "number for the log level verbosity")
fs.Var(VModuleConfigurationPflag(&c.VModule), "vmodule", "comma-separated list of pattern=N settings for file-filtered logging (only works for text log format)")
fs.BoolVar(&c.Options.Text.SplitStream, "log-text-split-stream", false, "[Alpha] In text format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.")
fs.Var(&c.Options.Text.InfoBufferSize, "log-text-info-buffer-size", "[Alpha] In text format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.")
// JSON options. We only register them if "json" is a valid format. The
// config file API however always has them.
if _, err := logRegistry.get("json"); err == nil {
fs.BoolVar(&c.Options.JSON.SplitStream, "log-json-split-stream", false, "[Alpha] In JSON format, write error messages to stderr and info messages to stdout. The default is to write a single stream to stdout. Enable the LoggingAlphaOptions feature gate to use this.")
fs.Var(&c.Options.JSON.InfoBufferSize, "log-json-info-buffer-size", "[Alpha] In JSON format with split output streams, the info messages can be buffered for a while to increase performance. The default value of zero bytes disables buffering. The size can be specified as number of bytes (512), multiples of 1000 (1K), multiples of 1024 (2Ki), or powers of those (3M, 4G, 5Mi, 6Gi). Enable the LoggingAlphaOptions feature gate to use this.")
}
}
// SetRecommendedLoggingConfiguration sets the default logging configuration
// for fields that are unset.
//
// Consumers who embed LoggingConfiguration in their own configuration structs
// may set custom defaults and then should call this function to add the
// global defaults.
func SetRecommendedLoggingConfiguration(c *LoggingConfiguration) {
if c.Format == "" {
c.Format = "text"
}
if c.FlushFrequency.Duration.Duration == 0 {
c.FlushFrequency.Duration.Duration = LogFlushFreqDefault
c.FlushFrequency.SerializeAsString = true
}
setRecommendedOutputRouting(&c.Options.Text.OutputRoutingOptions)
setRecommendedOutputRouting(&c.Options.JSON.OutputRoutingOptions)
}
func setRecommendedOutputRouting(o *OutputRoutingOptions) {
var empty resource.QuantityValue
if o.InfoBufferSize == empty {
o.InfoBufferSize = resource.QuantityValue{
// This is similar, but not quite the same as a default
// constructed instance.
Quantity: *resource.NewQuantity(0, resource.DecimalSI),
}
// This sets the unexported Quantity.s which will be compared
// by reflect.DeepEqual in some tests.
_ = o.InfoBufferSize.String()
}
}
// loggingFlags captures the state of the logging flags, in particular their default value
// before flag parsing. It is used by unsupportedLoggingFlags.
var loggingFlags pflag.FlagSet
func init() {
var fs flag.FlagSet
klogflags.Init(&fs)
loggingFlags.AddGoFlagSet(&fs)
}
// List of logs (k8s.io/klog + k8s.io/component-base/logs) flags supported by all logging formats
var supportedLogsFlags = map[string]struct{}{
"v": {},
}
// unsupportedLoggingFlags lists unsupported logging flags. The normalize
// function is optional.
func unsupportedLoggingFlags(normalizeFunc func(f *pflag.FlagSet, name string) pflag.NormalizedName) []*pflag.Flag {
// k8s.io/component-base/logs and klog flags
pfs := &pflag.FlagSet{}
loggingFlags.VisitAll(func(flag *pflag.Flag) {
if _, found := supportedLogsFlags[flag.Name]; !found {
// Normalization changes flag.Name, so make a copy.
clone := *flag
pfs.AddFlag(&clone)
}
})
// Apply normalization.
pfs.SetNormalizeFunc(normalizeFunc)
var allFlags []*pflag.Flag
pfs.VisitAll(func(flag *pflag.Flag) {
allFlags = append(allFlags, flag)
})
return allFlags
}

View File

@ -0,0 +1,24 @@
//go:build !go1.21
// +build !go1.21
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
func setSlogDefaultLogger() {
// Do nothing when build with Go < 1.21.
}

View File

@ -0,0 +1,37 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"log/slog"
"github.com/go-logr/logr"
"k8s.io/klog/v2"
)
// setSlogDefaultLogger sets the global slog default logger to the same default
// that klog currently uses.
func setSlogDefaultLogger() {
// klog.Background() always returns a valid logr.Logger, regardless of
// how logging was configured. We just need to turn it into a
// slog.Handler. SetDefault then needs a slog.Logger.
handler := logr.ToSlogHandler(klog.Background())
slog.SetDefault(slog.New(handler))
}

113
vendor/k8s.io/component-base/logs/api/v1/pflags.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"strconv"
"strings"
"github.com/spf13/pflag"
)
// VModuleConfigurationPflag implements the pflag.Value interface for a
// VModuleConfiguration. The value pointer must not be nil.
func VModuleConfigurationPflag(value *VModuleConfiguration) pflag.Value {
return vmoduleConfigurationPFlag{value}
}
type vmoduleConfigurationPFlag struct {
value *VModuleConfiguration
}
// String returns the -vmodule parameter (comma-separated list of pattern=N).
func (wrapper vmoduleConfigurationPFlag) String() string {
if wrapper.value == nil {
return ""
}
var patterns []string
for _, item := range *wrapper.value {
patterns = append(patterns, fmt.Sprintf("%s=%d", item.FilePattern, item.Verbosity))
}
return strings.Join(patterns, ",")
}
// Set parses the -vmodule parameter (comma-separated list of pattern=N).
func (wrapper vmoduleConfigurationPFlag) Set(value string) error {
// This code mirrors https://github.com/kubernetes/klog/blob/9ad246211af1ed84621ee94a26fcce0038b69cd1/klog.go#L287-L313
for _, pat := range strings.Split(value, ",") {
if len(pat) == 0 {
// Empty strings such as from a trailing comma can be ignored.
continue
}
patLev := strings.Split(pat, "=")
if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
return fmt.Errorf("%q does not have the pattern=N format", pat)
}
pattern := patLev[0]
// 31 instead of 32 to ensure that it also fits into int32.
v, err := strconv.ParseUint(patLev[1], 10, 31)
if err != nil {
return fmt.Errorf("parsing verbosity in %q: %v", pat, err)
}
*wrapper.value = append(*wrapper.value, VModuleItem{FilePattern: pattern, Verbosity: VerbosityLevel(v)})
}
return nil
}
func (wrapper vmoduleConfigurationPFlag) Type() string {
return "pattern=N,..."
}
// VerbosityLevelPflag implements the pflag.Value interface for a verbosity
// level value.
func VerbosityLevelPflag(value *VerbosityLevel) pflag.Value {
return verbosityLevelPflag{value}
}
type verbosityLevelPflag struct {
value *VerbosityLevel
}
func (wrapper verbosityLevelPflag) String() string {
if wrapper.value == nil {
return "0"
}
return strconv.FormatInt(int64(*wrapper.value), 10)
}
func (wrapper verbosityLevelPflag) Get() interface{} {
if wrapper.value == nil {
return VerbosityLevel(0)
}
return *wrapper.value
}
func (wrapper verbosityLevelPflag) Set(value string) error {
// Limited to int32 for compatibility with klog.
v, err := strconv.ParseUint(value, 10, 31)
if err != nil {
return err
}
*wrapper.value = VerbosityLevel(v)
return nil
}
func (wrapper verbosityLevelPflag) Type() string {
return "Level"
}

135
vendor/k8s.io/component-base/logs/api/v1/registry.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"fmt"
"sort"
"strings"
"sync"
"github.com/go-logr/logr"
"k8s.io/component-base/featuregate"
)
var logRegistry = newLogFormatRegistry()
// logFormatRegistry stores factories for all supported logging formats.
type logFormatRegistry struct {
mutex sync.Mutex
registry map[string]logFormat
frozen bool
}
type logFormat struct {
factory LogFormatFactory
feature featuregate.Feature
}
// +k8s:deepcopy-gen=false
// RuntimeControl provides operations that aren't available through the normal
// Logger or LogSink API.
type RuntimeControl struct {
// Flush ensures that all in-memory data is written.
// May be nil.
Flush func()
// SetVerbosityLevel changes the level for all Logger instances
// derived from the initial one. May be nil.
//
// The parameter is intentionally a plain uint32 instead of
// VerbosityLevel to enable implementations that don't need to import
// the API (helps avoid circular dependencies).
SetVerbosityLevel func(v uint32) error
}
// LogFormatFactory provides support for a certain additional,
// non-default log format.
type LogFormatFactory interface {
// Create returns a logger with the requested configuration.
Create(c LoggingConfiguration, o LoggingOptions) (logr.Logger, RuntimeControl)
}
// RegisterLogFormat registers support for a new logging format. This must be called
// before using any of the methods in LoggingConfiguration. The feature must
// be one of those defined in this package (typically LoggingAlphaOptions,
// LoggingBetaOptions or LoggingStableOptions).
func RegisterLogFormat(name string, factory LogFormatFactory, feature featuregate.Feature) error {
return logRegistry.register(name, logFormat{factory, feature})
}
func newLogFormatRegistry() *logFormatRegistry {
registry := &logFormatRegistry{
registry: make(map[string]logFormat),
frozen: false,
}
_ = registry.register(DefaultLogFormat, logFormat{factory: textFactory{}, feature: LoggingStableOptions})
return registry
}
// register adds a new log format. It's an error to modify an existing one.
func (lfr *logFormatRegistry) register(name string, format logFormat) error {
lfr.mutex.Lock()
defer lfr.mutex.Unlock()
if lfr.frozen {
return fmt.Errorf("log format registry is frozen, unable to register log format %s", name)
}
if _, ok := lfr.registry[name]; ok {
return fmt.Errorf("log format: %s already exists", name)
}
if _, ok := featureGates()[format.feature]; !ok && format.feature != LoggingStableOptions {
return fmt.Errorf("log format %s: unsupported feature gate %s", name, format.feature)
}
lfr.registry[name] = format
return nil
}
// get specified log format factory
func (lfr *logFormatRegistry) get(name string) (*logFormat, error) {
lfr.mutex.Lock()
defer lfr.mutex.Unlock()
format, ok := lfr.registry[name]
if !ok {
return nil, fmt.Errorf("log format: %s does not exists", name)
}
return &format, nil
}
// list names of registered log formats, including feature gates (sorted)
func (lfr *logFormatRegistry) list() string {
lfr.mutex.Lock()
defer lfr.mutex.Unlock()
formats := make([]string, 0, len(lfr.registry))
for name, format := range lfr.registry {
item := fmt.Sprintf(`"%s"`, name)
if format.feature != LoggingStableOptions {
item += fmt.Sprintf(" (gated by %s)", format.feature)
}
formats = append(formats, item)
}
sort.Strings(formats)
return strings.Join(formats, ", ")
}
// freeze prevents further modifications of the registered log formats.
func (lfr *logFormatRegistry) freeze() {
lfr.mutex.Lock()
defer lfr.mutex.Unlock()
lfr.frozen = true
}

142
vendor/k8s.io/component-base/logs/api/v1/text.go generated vendored Normal file
View File

@ -0,0 +1,142 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"bufio"
"fmt"
"io"
"sync"
"github.com/go-logr/logr"
"k8s.io/component-base/featuregate"
"k8s.io/klog/v2/textlogger"
)
// textFactory produces klog text logger instances.
type textFactory struct{}
var _ LogFormatFactory = textFactory{}
func (f textFactory) Feature() featuregate.Feature {
return LoggingStableOptions
}
func (f textFactory) Create(c LoggingConfiguration, o LoggingOptions) (logr.Logger, RuntimeControl) {
output := o.ErrorStream
var flush func()
if c.Options.Text.SplitStream {
r := &klogMsgRouter{
info: o.InfoStream,
error: o.ErrorStream,
}
size := c.Options.Text.InfoBufferSize.Value()
if size > 0 {
// Prevent integer overflow.
if size > 2*1024*1024*1024 {
size = 2 * 1024 * 1024 * 1024
}
info := newBufferedWriter(r.info, int(size))
flush = info.Flush
r.info = info
}
output = r
}
options := []textlogger.ConfigOption{
textlogger.Verbosity(int(c.Verbosity)),
textlogger.Output(output),
}
loggerConfig := textlogger.NewConfig(options...)
// This should never fail, we produce a valid string here.
_ = loggerConfig.VModule().Set(VModuleConfigurationPflag(&c.VModule).String())
return textlogger.NewLogger(loggerConfig),
RuntimeControl{
SetVerbosityLevel: func(v uint32) error {
return loggerConfig.Verbosity().Set(fmt.Sprintf("%d", v))
},
Flush: flush,
}
}
type klogMsgRouter struct {
info, error io.Writer
}
var _ io.Writer = &klogMsgRouter{}
// Write redirects the message into either the info or error
// stream, depending on its type as indicated in text format
// by the first byte.
func (r *klogMsgRouter) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
if p[0] == 'I' {
return r.info.Write(p)
}
return r.error.Write(p)
}
// bufferedWriter is an io.Writer that buffers writes in-memory before
// flushing them to a wrapped io.Writer after reaching some limit
// or getting flushed.
type bufferedWriter struct {
mu sync.Mutex
writer *bufio.Writer
out io.Writer
}
func newBufferedWriter(out io.Writer, size int) *bufferedWriter {
return &bufferedWriter{
writer: bufio.NewWriterSize(out, size),
out: out,
}
}
func (b *bufferedWriter) Write(p []byte) (int, error) {
b.mu.Lock()
defer b.mu.Unlock()
// To avoid partial writes into the underlying writer, we ensure that
// the entire new data fits into the buffer or flush first.
if len(p) > b.writer.Available() && b.writer.Buffered() > 0 {
if err := b.writer.Flush(); err != nil {
return 0, err
}
}
// If it still doesn't fit, then we bypass the now empty buffer
// and write directly.
if len(p) > b.writer.Available() {
return b.out.Write(p)
}
// This goes into the buffer.
return b.writer.Write(p)
}
func (b *bufferedWriter) Flush() {
b.mu.Lock()
defer b.mu.Unlock()
_ = b.writer.Flush()
}

146
vendor/k8s.io/component-base/logs/api/v1/types.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Supported output formats.
const (
// DefaultLogFormat is the traditional klog output format.
DefaultLogFormat = "text"
// JSONLogFormat emits each log message as a JSON struct.
JSONLogFormat = "json"
)
// The alpha or beta level of structs is the highest stability level of any field
// inside it. Feature gates will get checked during LoggingConfiguration.ValidateAndApply.
// LoggingConfiguration contains logging options.
type LoggingConfiguration struct {
// Format Flag specifies the structure of log messages.
// default value of format is `text`
Format string `json:"format,omitempty"`
// Maximum time between log flushes.
// If a string, parsed as a duration (i.e. "1s")
// If an int, the maximum number of nanoseconds (i.e. 1s = 1000000000).
// Ignored if the selected logging backend writes log messages without buffering.
FlushFrequency TimeOrMetaDuration `json:"flushFrequency"`
// Verbosity is the threshold that determines which log messages are
// logged. Default is zero which logs only the most important
// messages. Higher values enable additional messages. Error messages
// are always logged.
Verbosity VerbosityLevel `json:"verbosity"`
// VModule overrides the verbosity threshold for individual files.
// Only supported for "text" log format.
VModule VModuleConfiguration `json:"vmodule,omitempty"`
// [Alpha] Options holds additional parameters that are specific
// to the different logging formats. Only the options for the selected
// format get used, but all of them get validated.
// Only available when the LoggingAlphaOptions feature gate is enabled.
Options FormatOptions `json:"options,omitempty"`
}
// TimeOrMetaDuration is present only for backwards compatibility for the
// flushFrequency field, and new fields should use metav1.Duration.
type TimeOrMetaDuration struct {
// Duration holds the duration
Duration metav1.Duration
// SerializeAsString controls whether the value is serialized as a string or an integer
SerializeAsString bool `json:"-"`
}
func (t TimeOrMetaDuration) MarshalJSON() ([]byte, error) {
if t.SerializeAsString {
return t.Duration.MarshalJSON()
} else {
// Marshal as integer for backwards compatibility
return json.Marshal(t.Duration.Duration)
}
}
func (t *TimeOrMetaDuration) UnmarshalJSON(b []byte) error {
if len(b) > 0 && b[0] == '"' {
// string values unmarshal as metav1.Duration
t.SerializeAsString = true
return json.Unmarshal(b, &t.Duration)
}
t.SerializeAsString = false
if err := json.Unmarshal(b, &t.Duration.Duration); err != nil {
return fmt.Errorf("invalid duration %q: %w", string(b), err)
}
return nil
}
// FormatOptions contains options for the different logging formats.
type FormatOptions struct {
// [Alpha] Text contains options for logging format "text".
// Only available when the LoggingAlphaOptions feature gate is enabled.
Text TextOptions `json:"text,omitempty"`
// [Alpha] JSON contains options for logging format "json".
// Only available when the LoggingAlphaOptions feature gate is enabled.
JSON JSONOptions `json:"json,omitempty"`
}
// TextOptions contains options for logging format "text".
type TextOptions struct {
OutputRoutingOptions `json:",inline"`
}
// JSONOptions contains options for logging format "json".
type JSONOptions struct {
OutputRoutingOptions `json:",inline"`
}
// OutputRoutingOptions contains options that are supported by both "text" and "json".
type OutputRoutingOptions struct {
// [Alpha] SplitStream redirects error messages to stderr while
// info messages go to stdout, with buffering. The default is to write
// both to stdout, without buffering. Only available when
// the LoggingAlphaOptions feature gate is enabled.
SplitStream bool `json:"splitStream,omitempty"`
// [Alpha] InfoBufferSize sets the size of the info stream when
// using split streams. The default is zero, which disables buffering.
// Only available when the LoggingAlphaOptions feature gate is enabled.
InfoBufferSize resource.QuantityValue `json:"infoBufferSize,omitempty"`
}
// VModuleConfiguration is a collection of individual file names or patterns
// and the corresponding verbosity threshold.
type VModuleConfiguration []VModuleItem
// VModuleItem defines verbosity for one or more files which match a certain
// glob pattern.
type VModuleItem struct {
// FilePattern is a base file name (i.e. minus the ".go" suffix and
// directory) or a "glob" pattern for such a name. It must not contain
// comma and equal signs because those are separators for the
// corresponding klog command line argument.
FilePattern string `json:"filePattern"`
// Verbosity is the threshold for log messages emitted inside files
// that match the pattern.
Verbosity VerbosityLevel `json:"verbosity"`
}
// VerbosityLevel represents a klog or logr verbosity threshold.
type VerbosityLevel uint32

View File

@ -0,0 +1,167 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FormatOptions) DeepCopyInto(out *FormatOptions) {
*out = *in
in.Text.DeepCopyInto(&out.Text)
in.JSON.DeepCopyInto(&out.JSON)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FormatOptions.
func (in *FormatOptions) DeepCopy() *FormatOptions {
if in == nil {
return nil
}
out := new(FormatOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *JSONOptions) DeepCopyInto(out *JSONOptions) {
*out = *in
in.OutputRoutingOptions.DeepCopyInto(&out.OutputRoutingOptions)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONOptions.
func (in *JSONOptions) DeepCopy() *JSONOptions {
if in == nil {
return nil
}
out := new(JSONOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoggingConfiguration) DeepCopyInto(out *LoggingConfiguration) {
*out = *in
out.FlushFrequency = in.FlushFrequency
if in.VModule != nil {
in, out := &in.VModule, &out.VModule
*out = make(VModuleConfiguration, len(*in))
copy(*out, *in)
}
in.Options.DeepCopyInto(&out.Options)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoggingConfiguration.
func (in *LoggingConfiguration) DeepCopy() *LoggingConfiguration {
if in == nil {
return nil
}
out := new(LoggingConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OutputRoutingOptions) DeepCopyInto(out *OutputRoutingOptions) {
*out = *in
in.InfoBufferSize.DeepCopyInto(&out.InfoBufferSize)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutputRoutingOptions.
func (in *OutputRoutingOptions) DeepCopy() *OutputRoutingOptions {
if in == nil {
return nil
}
out := new(OutputRoutingOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TextOptions) DeepCopyInto(out *TextOptions) {
*out = *in
in.OutputRoutingOptions.DeepCopyInto(&out.OutputRoutingOptions)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TextOptions.
func (in *TextOptions) DeepCopy() *TextOptions {
if in == nil {
return nil
}
out := new(TextOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TimeOrMetaDuration) DeepCopyInto(out *TimeOrMetaDuration) {
*out = *in
out.Duration = in.Duration
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeOrMetaDuration.
func (in *TimeOrMetaDuration) DeepCopy() *TimeOrMetaDuration {
if in == nil {
return nil
}
out := new(TimeOrMetaDuration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in VModuleConfiguration) DeepCopyInto(out *VModuleConfiguration) {
{
in := &in
*out = make(VModuleConfiguration, len(*in))
copy(*out, *in)
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VModuleConfiguration.
func (in VModuleConfiguration) DeepCopy() VModuleConfiguration {
if in == nil {
return nil
}
out := new(VModuleConfiguration)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VModuleItem) DeepCopyInto(out *VModuleItem) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VModuleItem.
func (in *VModuleItem) DeepCopy() *VModuleItem {
if in == nil {
return nil
}
out := new(VModuleItem)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,34 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package setverbositylevel stores callbacks that will be invoked by logs.GlogLevel.
//
// This is a separate package to avoid a dependency from
// k8s.io/component-base/logs (uses the callbacks) to
// k8s.io/component-base/logs/api/v1 (adds them). Not all users of the logs
// package also use the API.
package setverbositylevel
import (
"sync"
)
var (
// Mutex controls access to the callbacks.
Mutex sync.Mutex
Callbacks []func(v uint32) error
)

View File

@ -0,0 +1,41 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package klogflags
import (
"flag"
"k8s.io/klog/v2"
)
// Init is a replacement for klog.InitFlags which only adds those flags
// that are still supported for Kubernetes components (i.e. -v and -vmodule).
// See
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components.
func Init(fs *flag.FlagSet) {
var allFlags flag.FlagSet
klog.InitFlags(&allFlags)
if fs == nil {
fs = flag.CommandLine
}
allFlags.VisitAll(func(f *flag.Flag) {
switch f.Name {
case "v", "vmodule":
fs.Var(f.Value, f.Name, f.Usage)
}
})
}

303
vendor/k8s.io/klog/v2/internal/verbosity/verbosity.go generated vendored Normal file
View File

@ -0,0 +1,303 @@
/*
Copyright 2013 Google Inc. All Rights Reserved.
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package verbosity
import (
"bytes"
"errors"
"flag"
"fmt"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
)
// New returns a struct that implements -v and -vmodule support. Changing and
// checking these settings is thread-safe, with all concurrency issues handled
// internally.
func New() *VState {
vs := new(VState)
// The two fields must have a pointer to the overal struct for their
// implementation of Set.
vs.vmodule.vs = vs
vs.verbosity.vs = vs
return vs
}
// Value is an extension that makes it possible to use the values in pflag.
type Value interface {
flag.Value
Type() string
}
func (vs *VState) V() Value {
return &vs.verbosity
}
func (vs *VState) VModule() Value {
return &vs.vmodule
}
// VState contains settings and state. Some of its fields can be accessed
// through atomic read/writes, in other cases a mutex must be held.
type VState struct {
mu sync.Mutex
// These flags are modified only under lock, although verbosity may be fetched
// safely using atomic.LoadInt32.
vmodule moduleSpec // The state of the -vmodule flag.
verbosity levelSpec // V logging level, the value of the -v flag/
// pcs is used in V to avoid an allocation when computing the caller's PC.
pcs [1]uintptr
// vmap is a cache of the V Level for each V() call site, identified by PC.
// It is wiped whenever the vmodule flag changes state.
vmap map[uintptr]Level
// filterLength stores the length of the vmodule filter chain. If greater
// than zero, it means vmodule is enabled. It may be read safely
// using sync.LoadInt32, but is only modified under mu.
filterLength int32
}
// Level must be an int32 to support atomic read/writes.
type Level int32
type levelSpec struct {
vs *VState
l Level
}
// get returns the value of the level.
func (l *levelSpec) get() Level {
return Level(atomic.LoadInt32((*int32)(&l.l)))
}
// set sets the value of the level.
func (l *levelSpec) set(val Level) {
atomic.StoreInt32((*int32)(&l.l), int32(val))
}
// String is part of the flag.Value interface.
func (l *levelSpec) String() string {
return strconv.FormatInt(int64(l.l), 10)
}
// Get is part of the flag.Getter interface. It returns the
// verbosity level as int32.
func (l *levelSpec) Get() interface{} {
return int32(l.l)
}
// Type is part of pflag.Value.
func (l *levelSpec) Type() string {
return "Level"
}
// Set is part of the flag.Value interface.
func (l *levelSpec) Set(value string) error {
v, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return err
}
l.vs.mu.Lock()
defer l.vs.mu.Unlock()
l.vs.set(Level(v), l.vs.vmodule.filter, false)
return nil
}
// moduleSpec represents the setting of the -vmodule flag.
type moduleSpec struct {
vs *VState
filter []modulePat
}
// modulePat contains a filter for the -vmodule flag.
// It holds a verbosity level and a file pattern to match.
type modulePat struct {
pattern string
literal bool // The pattern is a literal string
level Level
}
// match reports whether the file matches the pattern. It uses a string
// comparison if the pattern contains no metacharacters.
func (m *modulePat) match(file string) bool {
if m.literal {
return file == m.pattern
}
match, _ := filepath.Match(m.pattern, file)
return match
}
func (m *moduleSpec) String() string {
// Lock because the type is not atomic. TODO: clean this up.
// Empty instances don't have and don't need a lock (can
// happen when flag uses introspection).
if m.vs != nil {
m.vs.mu.Lock()
defer m.vs.mu.Unlock()
}
var b bytes.Buffer
for i, f := range m.filter {
if i > 0 {
b.WriteRune(',')
}
fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
}
return b.String()
}
// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
// struct is not exported.
func (m *moduleSpec) Get() interface{} {
return nil
}
// Type is part of pflag.Value
func (m *moduleSpec) Type() string {
return "pattern=N,..."
}
var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
// Set will sets module value
// Syntax: -vmodule=recordio=2,file=1,gfs*=3
func (m *moduleSpec) Set(value string) error {
var filter []modulePat
for _, pat := range strings.Split(value, ",") {
if len(pat) == 0 {
// Empty strings such as from a trailing comma can be ignored.
continue
}
patLev := strings.Split(pat, "=")
if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
return errVmoduleSyntax
}
pattern := patLev[0]
v, err := strconv.ParseInt(patLev[1], 10, 32)
if err != nil {
return errors.New("syntax error: expect comma-separated list of filename=N")
}
if v < 0 {
return errors.New("negative value for vmodule level")
}
if v == 0 {
continue // Ignore. It's harmless but no point in paying the overhead.
}
// TODO: check syntax of filter?
filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
}
m.vs.mu.Lock()
defer m.vs.mu.Unlock()
m.vs.set(m.vs.verbosity.l, filter, true)
return nil
}
// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
// that require filepath.Match to be called to match the pattern.
func isLiteral(pattern string) bool {
return !strings.ContainsAny(pattern, `\*?[]`)
}
// set sets a consistent state for V logging.
// The mutex must be held.
func (vs *VState) set(l Level, filter []modulePat, setFilter bool) {
// Turn verbosity off so V will not fire while we are in transition.
vs.verbosity.set(0)
// Ditto for filter length.
atomic.StoreInt32(&vs.filterLength, 0)
// Set the new filters and wipe the pc->Level map if the filter has changed.
if setFilter {
vs.vmodule.filter = filter
vs.vmap = make(map[uintptr]Level)
}
// Things are consistent now, so enable filtering and verbosity.
// They are enabled in order opposite to that in V.
atomic.StoreInt32(&vs.filterLength, int32(len(filter)))
vs.verbosity.set(l)
}
// Enabled checks whether logging is enabled at the given level. This must be
// called with depth=0 when the caller of enabled will do the logging and
// higher values when more stack levels need to be skipped.
//
// The mutex will be locked only if needed.
func (vs *VState) Enabled(level Level, depth int) bool {
// This function tries hard to be cheap unless there's work to do.
// The fast path is two atomic loads and compares.
// Here is a cheap but safe test to see if V logging is enabled globally.
if vs.verbosity.get() >= level {
return true
}
// It's off globally but vmodule may still be set.
// Here is another cheap but safe test to see if vmodule is enabled.
if atomic.LoadInt32(&vs.filterLength) > 0 {
// Now we need a proper lock to use the logging structure. The pcs field
// is shared so we must lock before accessing it. This is fairly expensive,
// but if V logging is enabled we're slow anyway.
vs.mu.Lock()
defer vs.mu.Unlock()
if runtime.Callers(depth+2, vs.pcs[:]) == 0 {
return false
}
// runtime.Callers returns "return PCs", but we want
// to look up the symbolic information for the call,
// so subtract 1 from the PC. runtime.CallersFrames
// would be cleaner, but allocates.
pc := vs.pcs[0] - 1
v, ok := vs.vmap[pc]
if !ok {
v = vs.setV(pc)
}
return v >= level
}
return false
}
// setV computes and remembers the V level for a given PC
// when vmodule is enabled.
// File pattern matching takes the basename of the file, stripped
// of its .go suffix, and uses filepath.Match, which is a little more
// general than the *? matching used in C++.
// Mutex is held.
func (vs *VState) setV(pc uintptr) Level {
fn := runtime.FuncForPC(pc)
file, _ := fn.FileLine(pc)
// The file is something like /a/b/c/d.go. We want just the d.
file = strings.TrimSuffix(file, ".go")
if slash := strings.LastIndex(file, "/"); slash >= 0 {
file = file[slash+1:]
}
for _, filter := range vs.vmodule.filter {
if filter.match(file) {
vs.vmap[pc] = filter.level
return filter.level
}
}
vs.vmap[pc] = 0
return 0
}

154
vendor/k8s.io/klog/v2/textlogger/options.go generated vendored Normal file
View File

@ -0,0 +1,154 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package textlogger
import (
"flag"
"io"
"os"
"strconv"
"time"
"k8s.io/klog/v2/internal/verbosity"
)
// Config influences logging in a text logger. To make this configurable via
// command line flags, instantiate this once per program and use AddFlags to
// bind command line flags to the instance before passing it to NewTestContext.
//
// Must be constructed with NewConfig.
type Config struct {
vstate *verbosity.VState
co configOptions
}
// Verbosity returns a value instance that can be used to query (via String) or
// modify (via Set) the verbosity threshold. This is thread-safe and can be
// done at runtime.
func (c *Config) Verbosity() flag.Value {
return c.vstate.V()
}
// VModule returns a value instance that can be used to query (via String) or
// modify (via Set) the vmodule settings. This is thread-safe and can be done
// at runtime.
func (c *Config) VModule() flag.Value {
return c.vstate.VModule()
}
// ConfigOption implements functional parameters for NewConfig.
type ConfigOption func(co *configOptions)
type configOptions struct {
verbosityFlagName string
vmoduleFlagName string
verbosityDefault int
fixedTime *time.Time
unwind func(int) (string, int)
output io.Writer
}
// VerbosityFlagName overrides the default -v for the verbosity level.
func VerbosityFlagName(name string) ConfigOption {
return func(co *configOptions) {
co.verbosityFlagName = name
}
}
// VModulFlagName overrides the default -vmodule for the per-module
// verbosity levels.
func VModuleFlagName(name string) ConfigOption {
return func(co *configOptions) {
co.vmoduleFlagName = name
}
}
// Verbosity overrides the default verbosity level of 0.
// See https://github.com/kubernetes/community/blob/9406b4352fe2d5810cb21cc3cb059ce5886de157/contributors/devel/sig-instrumentation/logging.md#logging-conventions
// for log level conventions in Kubernetes.
func Verbosity(level int) ConfigOption {
return func(co *configOptions) {
co.verbosityDefault = level
}
}
// Output overrides stderr as the output stream.
func Output(output io.Writer) ConfigOption {
return func(co *configOptions) {
co.output = output
}
}
// FixedTime overrides the actual time with a fixed time. Useful only for testing.
//
// # Experimental
//
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
// later release.
func FixedTime(ts time.Time) ConfigOption {
return func(co *configOptions) {
co.fixedTime = &ts
}
}
// Backtrace overrides the default mechanism for determining the call site.
// The callback is invoked with the number of function calls between itself
// and the call site. It must return the file name and line number. An empty
// file name indicates that the information is unknown.
//
// # Experimental
//
// Notice: This function is EXPERIMENTAL and may be changed or removed in a
// later release.
func Backtrace(unwind func(skip int) (filename string, line int)) ConfigOption {
return func(co *configOptions) {
co.unwind = unwind
}
}
// NewConfig returns a configuration with recommended defaults and optional
// modifications. Command line flags are not bound to any FlagSet yet.
func NewConfig(opts ...ConfigOption) *Config {
c := &Config{
vstate: verbosity.New(),
co: configOptions{
verbosityFlagName: "v",
vmoduleFlagName: "vmodule",
verbosityDefault: 0,
unwind: runtimeBacktrace,
output: os.Stderr,
},
}
for _, opt := range opts {
opt(&c.co)
}
// Cannot fail for this input.
_ = c.Verbosity().Set(strconv.FormatInt(int64(c.co.verbosityDefault), 10))
return c
}
// AddFlags registers the command line flags that control the configuration.
//
// The default flag names are the same as in klog, so unless those defaults
// are changed, either klog.InitFlags or Config.AddFlags can be used for the
// same flag set, but not both.
func (c *Config) AddFlags(fs *flag.FlagSet) {
fs.Var(c.Verbosity(), c.co.verbosityFlagName, "number for the log level verbosity of the testing logger")
fs.Var(c.VModule(), c.co.vmoduleFlagName, "comma-separated list of pattern=N log level settings for files matching the patterns")
}

187
vendor/k8s.io/klog/v2/textlogger/textlogger.go generated vendored Normal file
View File

@ -0,0 +1,187 @@
/*
Copyright 2019 The Kubernetes Authors.
Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package textlogger contains an implementation of the logr interface which is
// producing the exact same output as klog. It does not route output through
// klog (i.e. ignores [k8s.io/klog/v2.InitFlags]). Instead, all settings must be
// configured through its own [NewConfig] and [Config.AddFlags].
package textlogger
import (
"runtime"
"strconv"
"strings"
"time"
"github.com/go-logr/logr"
"k8s.io/klog/v2/internal/buffer"
"k8s.io/klog/v2/internal/serialize"
"k8s.io/klog/v2/internal/severity"
"k8s.io/klog/v2/internal/verbosity"
)
var (
// TimeNow is used to retrieve the current time. May be changed for testing.
TimeNow = time.Now
)
const (
// nameKey is used to log the `WithName` values as an additional attribute.
nameKey = "logger"
)
// NewLogger constructs a new logger.
//
// Verbosity can be modified at any time through the Config.V and
// Config.VModule API.
func NewLogger(c *Config) logr.Logger {
return logr.New(&tlogger{
values: nil,
config: c,
})
}
type tlogger struct {
callDepth int
// hasPrefix is true if the first entry in values is the special
// nameKey key/value. Such an entry gets added and later updated in
// WithName.
hasPrefix bool
values []interface{}
groups string
config *Config
}
func (l *tlogger) Init(info logr.RuntimeInfo) {
l.callDepth = info.CallDepth
}
func (l *tlogger) WithCallDepth(depth int) logr.LogSink {
newLogger := *l
newLogger.callDepth += depth
return &newLogger
}
func (l *tlogger) Enabled(level int) bool {
return l.config.vstate.Enabled(verbosity.Level(level), 1+l.callDepth)
}
func (l *tlogger) Info(_ int, msg string, kvList ...interface{}) {
l.print(nil, severity.InfoLog, msg, kvList)
}
func (l *tlogger) Error(err error, msg string, kvList ...interface{}) {
l.print(err, severity.ErrorLog, msg, kvList)
}
func (l *tlogger) print(err error, s severity.Severity, msg string, kvList []interface{}) {
// Determine caller.
// +1 for this frame, +1 for Info/Error.
skip := l.callDepth + 2
file, line := l.config.co.unwind(skip)
if file == "" {
file = "???"
line = 1
} else if slash := strings.LastIndex(file, "/"); slash >= 0 {
file = file[slash+1:]
}
l.printWithInfos(file, line, time.Now(), err, s, msg, kvList)
}
func runtimeBacktrace(skip int) (string, int) {
_, file, line, ok := runtime.Caller(skip + 1)
if !ok {
return "", 0
}
return file, line
}
func (l *tlogger) printWithInfos(file string, line int, now time.Time, err error, s severity.Severity, msg string, kvList []interface{}) {
// Only create a new buffer if we don't have one cached.
b := buffer.GetBuffer()
defer buffer.PutBuffer(b)
// Format header.
if l.config.co.fixedTime != nil {
now = *l.config.co.fixedTime
}
b.FormatHeader(s, file, line, now)
// The message is always quoted, even if it contains line breaks.
// If developers want multi-line output, they should use a small, fixed
// message and put the multi-line output into a value.
b.WriteString(strconv.Quote(msg))
if err != nil {
serialize.KVFormat(&b.Buffer, "err", err)
}
serialize.MergeAndFormatKVs(&b.Buffer, l.values, kvList)
if b.Len() == 0 || b.Bytes()[b.Len()-1] != '\n' {
b.WriteByte('\n')
}
_, _ = l.config.co.output.Write(b.Bytes())
}
func (l *tlogger) WriteKlogBuffer(data []byte) {
_, _ = l.config.co.output.Write(data)
}
// WithName returns a new logr.Logger with the specified name appended. klogr
// uses '/' characters to separate name elements. Callers should not pass '/'
// in the provided name string, but this library does not actually enforce that.
func (l *tlogger) WithName(name string) logr.LogSink {
clone := *l
if l.hasPrefix {
// Copy slice and modify value. No length checks and type
// assertions are needed because hasPrefix is only true if the
// first two elements exist and are key/value strings.
v := make([]interface{}, 0, len(l.values))
v = append(v, l.values...)
prefix, _ := v[1].(string)
v[1] = prefix + "." + name
clone.values = v
} else {
// Preprend new key/value pair.
v := make([]interface{}, 0, 2+len(l.values))
v = append(v, nameKey, name)
v = append(v, l.values...)
clone.values = v
clone.hasPrefix = true
}
return &clone
}
func (l *tlogger) WithValues(kvList ...interface{}) logr.LogSink {
clone := *l
clone.values = serialize.WithValues(l.values, kvList)
return &clone
}
// KlogBufferWriter is implemented by the textlogger LogSink.
type KlogBufferWriter interface {
// WriteKlogBuffer takes a pre-formatted buffer prepared by klog and
// writes it unchanged to the output stream. Can be used with
// klog.WriteKlogBuffer when setting a logger through
// klog.SetLoggerWithOptions.
WriteKlogBuffer([]byte)
}
var _ logr.LogSink = &tlogger{}
var _ logr.CallDepthLogSink = &tlogger{}
var _ KlogBufferWriter = &tlogger{}

52
vendor/k8s.io/klog/v2/textlogger/textlogger_slog.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package textlogger
import (
"context"
"log/slog"
"github.com/go-logr/logr"
"k8s.io/klog/v2/internal/serialize"
"k8s.io/klog/v2/internal/sloghandler"
)
func (l *tlogger) Handle(ctx context.Context, record slog.Record) error {
return sloghandler.Handle(ctx, record, l.groups, l.printWithInfos)
}
func (l *tlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
clone := *l
clone.values = serialize.WithValues(l.values, sloghandler.Attrs2KVList(l.groups, attrs))
return &clone
}
func (l *tlogger) WithGroup(name string) logr.SlogSink {
clone := *l
if clone.groups != "" {
clone.groups += "." + name
} else {
clone.groups = name
}
return &clone
}
var _ logr.SlogSink = &tlogger{}

202
vendor/k8s.io/kubelet/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

21
vendor/k8s.io/kubelet/config/v1beta1/doc.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +groupName=kubelet.config.k8s.io
package v1beta1 // import "k8s.io/kubelet/config/v1beta1"

45
vendor/k8s.io/kubelet/config/v1beta1/register.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// GroupName is the group name used in this package
const GroupName = "kubelet.config.k8s.io"
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
var (
// SchemeBuilder is the scheme builder with scheme init functions to run for this API package
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// addKnownTypes registers known types to the given scheme
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&KubeletConfiguration{},
&SerializedNodeConfigSource{},
&CredentialProviderConfig{},
)
return nil
}

1017
vendor/k8s.io/kubelet/config/v1beta1/types.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,634 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
apiv1 "k8s.io/component-base/tracing/api/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialProvider) DeepCopyInto(out *CredentialProvider) {
*out = *in
if in.MatchImages != nil {
in, out := &in.MatchImages, &out.MatchImages
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.DefaultCacheDuration != nil {
in, out := &in.DefaultCacheDuration, &out.DefaultCacheDuration
*out = new(v1.Duration)
**out = **in
}
if in.Args != nil {
in, out := &in.Args, &out.Args
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Env != nil {
in, out := &in.Env, &out.Env
*out = make([]ExecEnvVar, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialProvider.
func (in *CredentialProvider) DeepCopy() *CredentialProvider {
if in == nil {
return nil
}
out := new(CredentialProvider)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CredentialProviderConfig) DeepCopyInto(out *CredentialProviderConfig) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.Providers != nil {
in, out := &in.Providers, &out.Providers
*out = make([]CredentialProvider, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialProviderConfig.
func (in *CredentialProviderConfig) DeepCopy() *CredentialProviderConfig {
if in == nil {
return nil
}
out := new(CredentialProviderConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *CredentialProviderConfig) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar.
func (in *ExecEnvVar) DeepCopy() *ExecEnvVar {
if in == nil {
return nil
}
out := new(ExecEnvVar)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAnonymousAuthentication) DeepCopyInto(out *KubeletAnonymousAuthentication) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAnonymousAuthentication.
func (in *KubeletAnonymousAuthentication) DeepCopy() *KubeletAnonymousAuthentication {
if in == nil {
return nil
}
out := new(KubeletAnonymousAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAuthentication) DeepCopyInto(out *KubeletAuthentication) {
*out = *in
out.X509 = in.X509
in.Webhook.DeepCopyInto(&out.Webhook)
in.Anonymous.DeepCopyInto(&out.Anonymous)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthentication.
func (in *KubeletAuthentication) DeepCopy() *KubeletAuthentication {
if in == nil {
return nil
}
out := new(KubeletAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletAuthorization) DeepCopyInto(out *KubeletAuthorization) {
*out = *in
out.Webhook = in.Webhook
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletAuthorization.
func (in *KubeletAuthorization) DeepCopy() *KubeletAuthorization {
if in == nil {
return nil
}
out := new(KubeletAuthorization)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.EnableServer != nil {
in, out := &in.EnableServer, &out.EnableServer
*out = new(bool)
**out = **in
}
out.SyncFrequency = in.SyncFrequency
out.FileCheckFrequency = in.FileCheckFrequency
out.HTTPCheckFrequency = in.HTTPCheckFrequency
if in.StaticPodURLHeader != nil {
in, out := &in.StaticPodURLHeader, &out.StaticPodURLHeader
*out = make(map[string][]string, len(*in))
for key, val := range *in {
var outVal []string
if val == nil {
(*out)[key] = nil
} else {
in, out := &val, &outVal
*out = make([]string, len(*in))
copy(*out, *in)
}
(*out)[key] = outVal
}
}
if in.TLSCipherSuites != nil {
in, out := &in.TLSCipherSuites, &out.TLSCipherSuites
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Authentication.DeepCopyInto(&out.Authentication)
out.Authorization = in.Authorization
if in.RegistryPullQPS != nil {
in, out := &in.RegistryPullQPS, &out.RegistryPullQPS
*out = new(int32)
**out = **in
}
if in.EventRecordQPS != nil {
in, out := &in.EventRecordQPS, &out.EventRecordQPS
*out = new(int32)
**out = **in
}
if in.EnableDebuggingHandlers != nil {
in, out := &in.EnableDebuggingHandlers, &out.EnableDebuggingHandlers
*out = new(bool)
**out = **in
}
if in.HealthzPort != nil {
in, out := &in.HealthzPort, &out.HealthzPort
*out = new(int32)
**out = **in
}
if in.OOMScoreAdj != nil {
in, out := &in.OOMScoreAdj, &out.OOMScoreAdj
*out = new(int32)
**out = **in
}
if in.ClusterDNS != nil {
in, out := &in.ClusterDNS, &out.ClusterDNS
*out = make([]string, len(*in))
copy(*out, *in)
}
out.StreamingConnectionIdleTimeout = in.StreamingConnectionIdleTimeout
out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency
out.NodeStatusReportFrequency = in.NodeStatusReportFrequency
out.ImageMinimumGCAge = in.ImageMinimumGCAge
out.ImageMaximumGCAge = in.ImageMaximumGCAge
if in.ImageGCHighThresholdPercent != nil {
in, out := &in.ImageGCHighThresholdPercent, &out.ImageGCHighThresholdPercent
*out = new(int32)
**out = **in
}
if in.ImageGCLowThresholdPercent != nil {
in, out := &in.ImageGCLowThresholdPercent, &out.ImageGCLowThresholdPercent
*out = new(int32)
**out = **in
}
out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod
if in.CgroupsPerQOS != nil {
in, out := &in.CgroupsPerQOS, &out.CgroupsPerQOS
*out = new(bool)
**out = **in
}
if in.CPUManagerPolicyOptions != nil {
in, out := &in.CPUManagerPolicyOptions, &out.CPUManagerPolicyOptions
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
if in.TopologyManagerPolicyOptions != nil {
in, out := &in.TopologyManagerPolicyOptions, &out.TopologyManagerPolicyOptions
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.QOSReserved != nil {
in, out := &in.QOSReserved, &out.QOSReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
if in.PodPidsLimit != nil {
in, out := &in.PodPidsLimit, &out.PodPidsLimit
*out = new(int64)
**out = **in
}
if in.ResolverConfig != nil {
in, out := &in.ResolverConfig, &out.ResolverConfig
*out = new(string)
**out = **in
}
if in.CPUCFSQuota != nil {
in, out := &in.CPUCFSQuota, &out.CPUCFSQuota
*out = new(bool)
**out = **in
}
if in.CPUCFSQuotaPeriod != nil {
in, out := &in.CPUCFSQuotaPeriod, &out.CPUCFSQuotaPeriod
*out = new(v1.Duration)
**out = **in
}
if in.NodeStatusMaxImages != nil {
in, out := &in.NodeStatusMaxImages, &out.NodeStatusMaxImages
*out = new(int32)
**out = **in
}
if in.KubeAPIQPS != nil {
in, out := &in.KubeAPIQPS, &out.KubeAPIQPS
*out = new(int32)
**out = **in
}
if in.SerializeImagePulls != nil {
in, out := &in.SerializeImagePulls, &out.SerializeImagePulls
*out = new(bool)
**out = **in
}
if in.MaxParallelImagePulls != nil {
in, out := &in.MaxParallelImagePulls, &out.MaxParallelImagePulls
*out = new(int32)
**out = **in
}
if in.EvictionHard != nil {
in, out := &in.EvictionHard, &out.EvictionHard
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EvictionSoft != nil {
in, out := &in.EvictionSoft, &out.EvictionSoft
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EvictionSoftGracePeriod != nil {
in, out := &in.EvictionSoftGracePeriod, &out.EvictionSoftGracePeriod
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
out.EvictionPressureTransitionPeriod = in.EvictionPressureTransitionPeriod
if in.EvictionMinimumReclaim != nil {
in, out := &in.EvictionMinimumReclaim, &out.EvictionMinimumReclaim
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EnableControllerAttachDetach != nil {
in, out := &in.EnableControllerAttachDetach, &out.EnableControllerAttachDetach
*out = new(bool)
**out = **in
}
if in.MakeIPTablesUtilChains != nil {
in, out := &in.MakeIPTablesUtilChains, &out.MakeIPTablesUtilChains
*out = new(bool)
**out = **in
}
if in.IPTablesMasqueradeBit != nil {
in, out := &in.IPTablesMasqueradeBit, &out.IPTablesMasqueradeBit
*out = new(int32)
**out = **in
}
if in.IPTablesDropBit != nil {
in, out := &in.IPTablesDropBit, &out.IPTablesDropBit
*out = new(int32)
**out = **in
}
if in.FeatureGates != nil {
in, out := &in.FeatureGates, &out.FeatureGates
*out = make(map[string]bool, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.FailSwapOn != nil {
in, out := &in.FailSwapOn, &out.FailSwapOn
*out = new(bool)
**out = **in
}
out.MemorySwap = in.MemorySwap
if in.ContainerLogMaxFiles != nil {
in, out := &in.ContainerLogMaxFiles, &out.ContainerLogMaxFiles
*out = new(int32)
**out = **in
}
if in.SystemReserved != nil {
in, out := &in.SystemReserved, &out.SystemReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.KubeReserved != nil {
in, out := &in.KubeReserved, &out.KubeReserved
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.EnforceNodeAllocatable != nil {
in, out := &in.EnforceNodeAllocatable, &out.EnforceNodeAllocatable
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllowedUnsafeSysctls != nil {
in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls
*out = make([]string, len(*in))
copy(*out, *in)
}
in.Logging.DeepCopyInto(&out.Logging)
if in.EnableSystemLogHandler != nil {
in, out := &in.EnableSystemLogHandler, &out.EnableSystemLogHandler
*out = new(bool)
**out = **in
}
if in.EnableSystemLogQuery != nil {
in, out := &in.EnableSystemLogQuery, &out.EnableSystemLogQuery
*out = new(bool)
**out = **in
}
out.ShutdownGracePeriod = in.ShutdownGracePeriod
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
if in.ShutdownGracePeriodByPodPriority != nil {
in, out := &in.ShutdownGracePeriodByPodPriority, &out.ShutdownGracePeriodByPodPriority
*out = make([]ShutdownGracePeriodByPodPriority, len(*in))
copy(*out, *in)
}
if in.ReservedMemory != nil {
in, out := &in.ReservedMemory, &out.ReservedMemory
*out = make([]MemoryReservation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.EnableProfilingHandler != nil {
in, out := &in.EnableProfilingHandler, &out.EnableProfilingHandler
*out = new(bool)
**out = **in
}
if in.EnableDebugFlagsHandler != nil {
in, out := &in.EnableDebugFlagsHandler, &out.EnableDebugFlagsHandler
*out = new(bool)
**out = **in
}
if in.SeccompDefault != nil {
in, out := &in.SeccompDefault, &out.SeccompDefault
*out = new(bool)
**out = **in
}
if in.MemoryThrottlingFactor != nil {
in, out := &in.MemoryThrottlingFactor, &out.MemoryThrottlingFactor
*out = new(float64)
**out = **in
}
if in.RegisterWithTaints != nil {
in, out := &in.RegisterWithTaints, &out.RegisterWithTaints
*out = make([]corev1.Taint, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.RegisterNode != nil {
in, out := &in.RegisterNode, &out.RegisterNode
*out = new(bool)
**out = **in
}
if in.Tracing != nil {
in, out := &in.Tracing, &out.Tracing
*out = new(apiv1.TracingConfiguration)
(*in).DeepCopyInto(*out)
}
if in.LocalStorageCapacityIsolation != nil {
in, out := &in.LocalStorageCapacityIsolation, &out.LocalStorageCapacityIsolation
*out = new(bool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConfiguration.
func (in *KubeletConfiguration) DeepCopy() *KubeletConfiguration {
if in == nil {
return nil
}
out := new(KubeletConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *KubeletConfiguration) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletWebhookAuthentication) DeepCopyInto(out *KubeletWebhookAuthentication) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
out.CacheTTL = in.CacheTTL
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthentication.
func (in *KubeletWebhookAuthentication) DeepCopy() *KubeletWebhookAuthentication {
if in == nil {
return nil
}
out := new(KubeletWebhookAuthentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletWebhookAuthorization) DeepCopyInto(out *KubeletWebhookAuthorization) {
*out = *in
out.CacheAuthorizedTTL = in.CacheAuthorizedTTL
out.CacheUnauthorizedTTL = in.CacheUnauthorizedTTL
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletWebhookAuthorization.
func (in *KubeletWebhookAuthorization) DeepCopy() *KubeletWebhookAuthorization {
if in == nil {
return nil
}
out := new(KubeletWebhookAuthorization)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubeletX509Authentication) DeepCopyInto(out *KubeletX509Authentication) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletX509Authentication.
func (in *KubeletX509Authentication) DeepCopy() *KubeletX509Authentication {
if in == nil {
return nil
}
out := new(KubeletX509Authentication)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemoryReservation) DeepCopyInto(out *MemoryReservation) {
*out = *in
if in.Limits != nil {
in, out := &in.Limits, &out.Limits
*out = make(corev1.ResourceList, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemoryReservation.
func (in *MemoryReservation) DeepCopy() *MemoryReservation {
if in == nil {
return nil
}
out := new(MemoryReservation)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MemorySwapConfiguration) DeepCopyInto(out *MemorySwapConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemorySwapConfiguration.
func (in *MemorySwapConfiguration) DeepCopy() *MemorySwapConfiguration {
if in == nil {
return nil
}
out := new(MemorySwapConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SerializedNodeConfigSource) DeepCopyInto(out *SerializedNodeConfigSource) {
*out = *in
out.TypeMeta = in.TypeMeta
in.Source.DeepCopyInto(&out.Source)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedNodeConfigSource.
func (in *SerializedNodeConfigSource) DeepCopy() *SerializedNodeConfigSource {
if in == nil {
return nil
}
out := new(SerializedNodeConfigSource)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ShutdownGracePeriodByPodPriority) DeepCopyInto(out *ShutdownGracePeriodByPodPriority) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShutdownGracePeriodByPodPriority.
func (in *ShutdownGracePeriodByPodPriority) DeepCopy() *ShutdownGracePeriodByPodPriority {
if in == nil {
return nil
}
out := new(ShutdownGracePeriodByPodPriority)
in.DeepCopyInto(out)
return out
}

21
vendor/modules.txt vendored
View File

@ -263,6 +263,9 @@ github.com/grafana/grafana-plugin-sdk-go/data/utils/jsoniter
github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
github.com/grpc-ecosystem/grpc-gateway/v2/runtime
github.com/grpc-ecosystem/grpc-gateway/v2/utilities
# github.com/inconshreveable/mousetrap v1.1.0
## explicit; go 1.18
github.com/inconshreveable/mousetrap
# github.com/josharian/native v1.1.0
## explicit; go 1.13
github.com/josharian/native
@ -387,6 +390,9 @@ github.com/shirou/gopsutil/process
# github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13
github.com/sirupsen/logrus
# github.com/spf13/cobra v1.8.1
## explicit; go 1.15
github.com/spf13/cobra
# github.com/spf13/pflag v1.0.5
## explicit; go 1.12
github.com/spf13/pflag
@ -499,6 +505,9 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1
go.opentelemetry.io/proto/otlp/common/v1
go.opentelemetry.io/proto/otlp/resource/v1
go.opentelemetry.io/proto/otlp/trace/v1
# go.yaml.in/yaml/v2 v2.4.2
## explicit; go 1.15
go.yaml.in/yaml/v2
# golang.org/x/arch v0.12.0
## explicit; go 1.18
golang.org/x/arch/x86/x86asm
@ -735,7 +744,11 @@ k8s.io/client-go/util/connrotation
k8s.io/client-go/util/workqueue
# k8s.io/component-base v0.31.3
## explicit; go 1.22.0
k8s.io/component-base/cli/flag
k8s.io/component-base/featuregate
k8s.io/component-base/logs/api/v1
k8s.io/component-base/logs/internal/setverbositylevel
k8s.io/component-base/logs/klogflags
k8s.io/component-base/logs/logreduction
k8s.io/component-base/metrics
k8s.io/component-base/metrics/legacyregistry
@ -762,6 +775,11 @@ k8s.io/klog/v2/internal/dbg
k8s.io/klog/v2/internal/serialize
k8s.io/klog/v2/internal/severity
k8s.io/klog/v2/internal/sloghandler
k8s.io/klog/v2/internal/verbosity
k8s.io/klog/v2/textlogger
# k8s.io/kubelet v0.29.0
## explicit; go 1.21
k8s.io/kubelet/config/v1beta1
# k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
## explicit; go 1.18
k8s.io/utils/clock
@ -778,3 +796,6 @@ sigs.k8s.io/json/internal/golang/encoding/json
# sigs.k8s.io/structured-merge-diff/v4 v4.4.1
## explicit; go 1.13
sigs.k8s.io/structured-merge-diff/v4/value
# sigs.k8s.io/yaml v1.5.0
## explicit; go 1.22
sigs.k8s.io/yaml

24
vendor/sigs.k8s.io/yaml/.gitignore generated vendored Normal file
View File

@ -0,0 +1,24 @@
# OSX leaves these everywhere on SMB shares
._*
# Eclipse files
.classpath
.project
.settings/**
# Idea files
.idea/**
.idea/
# Emacs save files
*~
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# Go test binaries
*.test

31
vendor/sigs.k8s.io/yaml/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,31 @@
# Contributing Guidelines
Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
## Getting Started
We have full documentation on how to get started contributing here:
<!---
If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
-->
- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing)
- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers
## Mentorship
- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
<!---
Custom Information - if you're copying this template for the first time you can add custom content here, for example:
## Contact Information
- [Slack channel](https://kubernetes.slack.com/messages/kubernetes-users) - Replace `kubernetes-users` with your slack channel string, this will send users directly to your channel.
- [Mailing list](URL)
-->

306
vendor/sigs.k8s.io/yaml/LICENSE generated vendored Normal file
View File

@ -0,0 +1,306 @@
The MIT License (MIT)
Copyright (c) 2014 Sam Ghods
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# The forked go-yaml.v3 library under this project is covered by two
different licenses (MIT and Apache):
#### MIT License ####
The following files were ported to Go from C files of libyaml, and thus
are still covered by their original MIT license, with the additional
copyright staring in 2011 when the project was ported over:
apic.go emitterc.go parserc.go readerc.go scannerc.go
writerc.go yamlh.go yamlprivateh.go
Copyright (c) 2006-2010 Kirill Simonov
Copyright (c) 2006-2011 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
### Apache License ###
All the remaining project files are covered by the Apache license:
Copyright (c) 2011-2019 Canonical Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
# The forked go-yaml.v2 library under the project is covered by an
Apache license:
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
vendor/sigs.k8s.io/yaml/OWNERS generated vendored Normal file
View File

@ -0,0 +1,23 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- dims
- jpbetz
- smarterclayton
- deads2k
- sttts
- liggitt
reviewers:
- dims
- thockin
- jpbetz
- smarterclayton
- wojtek-t
- deads2k
- derekwaynecarr
- mikedanese
- liggitt
- sttts
- tallclair
labels:
- sig/api-machinery

123
vendor/sigs.k8s.io/yaml/README.md generated vendored Normal file
View File

@ -0,0 +1,123 @@
# YAML marshaling and unmarshaling support for Go
[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml)
kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml).
## Introduction
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
## Caveats
**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
```
BAD:
exampleKey: !!binary gIGC
GOOD:
exampleKey: gIGC
... and decode the base64 data in your code.
```
**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
## Installation and usage
To install, run:
```
$ go get sigs.k8s.io/yaml
```
And import using:
```
import "sigs.k8s.io/yaml"
```
Usage is very similar to the JSON library:
```go
package main
import (
"fmt"
"sigs.k8s.io/yaml"
)
type Person struct {
Name string `json:"name"` // Affects YAML field names too.
Age int `json:"age"`
}
func main() {
// Marshal a Person struct to YAML.
p := Person{"John", 30}
y, err := yaml.Marshal(p)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
age: 30
name: John
*/
// Unmarshal the YAML back into a Person struct.
var p2 Person
err = yaml.Unmarshal(y, &p2)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(p2)
/* Output:
{John 30}
*/
}
```
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
```go
package main
import (
"fmt"
"sigs.k8s.io/yaml"
)
func main() {
j := []byte(`{"name": "John", "age": 30}`)
y, err := yaml.JSONToYAML(j)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(y))
/* Output:
age: 30
name: John
*/
j2, err := yaml.YAMLToJSON(y)
if err != nil {
fmt.Printf("err: %v\n", err)
return
}
fmt.Println(string(j2))
/* Output:
{"age":30,"name":"John"}
*/
}
```

9
vendor/sigs.k8s.io/yaml/RELEASE.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
# Release Process
The `yaml` Project is released on an as-needed basis. The process is as follows:
1. An issue is proposing a new release with a changelog since the last release
1. All [OWNERS](OWNERS) must LGTM this release
1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION`
1. The release issue is closed
1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released`

17
vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS generated vendored Normal file
View File

@ -0,0 +1,17 @@
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Team to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
cjcullen
jessfraz
liggitt
philips
tallclair

3
vendor/sigs.k8s.io/yaml/code-of-conduct.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)

501
vendor/sigs.k8s.io/yaml/fields.go generated vendored Normal file
View File

@ -0,0 +1,501 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package yaml
import (
"bytes"
"encoding"
"encoding/json"
"reflect"
"sort"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
// indirect walks down 'value' allocating pointers as needed,
// until it gets to a non-pointer.
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
func indirect(value reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
// If 'value' is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if value.Kind() != reflect.Ptr && value.Type().Name() != "" && value.CanAddr() {
value = value.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if value.Kind() == reflect.Interface && !value.IsNil() {
element := value.Elem()
if element.Kind() == reflect.Ptr && !element.IsNil() && (!decodingNull || element.Elem().Kind() == reflect.Ptr) {
value = element
continue
}
}
if value.Kind() != reflect.Ptr {
break
}
if value.Elem().Kind() != reflect.Ptr && decodingNull && value.CanSet() {
break
}
if value.IsNil() {
if value.CanSet() {
value.Set(reflect.New(value.Type().Elem()))
} else {
value = reflect.New(value.Type().Elem())
}
}
if value.Type().NumMethod() > 0 {
if u, ok := value.Interface().(json.Unmarshaler); ok {
return u, nil, reflect.Value{}
}
if u, ok := value.Interface().(encoding.TextUnmarshaler); ok {
return nil, u, reflect.Value{}
}
}
value = value.Elem()
}
return nil, nil, value
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
tag bool
index []int
typ reflect.Type
omitEmpty bool
quoted bool
}
func fillField(f field) field {
f.nameBytes = []byte(f.name)
f.equalFold = foldFunc(f.nameBytes)
return f
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
var count map[reflect.Type]int
var nextCount map[reflect.Type]int
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, fillField(field{
name: name,
tag: tagged,
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
quoted: opts.Contains("string"),
}))
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// - S maps to s and to U+017F 'ſ' Latin small letter long s
// - k maps to K and to U+212A '' Kelvin sign
//
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
return len(t) <= 0
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

426
vendor/sigs.k8s.io/yaml/yaml.go generated vendored Normal file
View File

@ -0,0 +1,426 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package yaml
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
"strconv"
"go.yaml.in/yaml/v2"
)
// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference)
func Marshal(obj interface{}) ([]byte, error) {
jsonBytes, err := json.Marshal(obj)
if err != nil {
return nil, fmt.Errorf("error marshaling into JSON: %w", err)
}
return JSONToYAML(jsonBytes)
}
// JSONOpt is a decoding option for decoding from JSON format.
type JSONOpt func(*json.Decoder) *json.Decoder
// Unmarshal first converts the given YAML to JSON, and then unmarshals the JSON into obj. Options for the
// standard library json.Decoder can be optionally specified, e.g. to decode untyped numbers into json.Number instead of float64, or to disallow unknown fields (but for that purpose, see also UnmarshalStrict). obj must be a non-nil pointer.
//
// Important notes about the Unmarshal logic:
//
// - Decoding is case-insensitive, unlike the rest of Kubernetes API machinery, as this is using the stdlib json library. This might be confusing to users.
// - This decodes any number (although it is an integer) into a float64 if the type of obj is unknown, e.g. *map[string]interface{}, *interface{}, or *[]interface{}. This means integers above +/- 2^53 will lose precision when round-tripping. Make a JSONOpt that calls d.UseNumber() to avoid this.
// - Duplicate fields, including in-case-sensitive matches, are ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See UnmarshalStrict for an alternative.
// - Unknown fields, i.e. serialized data that do not map to a field in obj, are ignored. Use d.DisallowUnknownFields() or UnmarshalStrict to override.
// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly.
// - YAML non-string keys, e.g. ints, bools and floats, are converted to strings implicitly during the YAML to JSON conversion process.
// - There are no compatibility guarantees for returned error values.
func Unmarshal(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error {
return unmarshal(yamlBytes, obj, yaml.Unmarshal, opts...)
}
// UnmarshalStrict is similar to Unmarshal (please read its documentation for reference), with the following exceptions:
//
// - Duplicate fields in an object yield an error. This is according to the YAML specification.
// - If obj, or any of its recursive children, is a struct, presence of fields in the serialized data unknown to the struct will yield an error.
func UnmarshalStrict(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error {
return unmarshal(yamlBytes, obj, yaml.UnmarshalStrict, append(opts, DisallowUnknownFields)...)
}
// unmarshal unmarshals the given YAML byte stream into the given interface,
// optionally performing the unmarshalling strictly
func unmarshal(yamlBytes []byte, obj interface{}, unmarshalFn func([]byte, interface{}) error, opts ...JSONOpt) error {
jsonTarget := reflect.ValueOf(obj)
jsonBytes, err := yamlToJSONTarget(yamlBytes, &jsonTarget, unmarshalFn)
if err != nil {
return fmt.Errorf("error converting YAML to JSON: %w", err)
}
err = jsonUnmarshal(bytes.NewReader(jsonBytes), obj, opts...)
if err != nil {
return fmt.Errorf("error unmarshaling JSON: %w", err)
}
return nil
}
// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
// object, optionally applying decoder options prior to decoding. We are not
// using json.Unmarshal directly as we want the chance to pass in non-default
// options.
func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error {
d := json.NewDecoder(reader)
for _, opt := range opts {
d = opt(d)
}
if err := d.Decode(&obj); err != nil {
return fmt.Errorf("while decoding JSON: %w", err)
}
return nil
}
// JSONToYAML converts JSON to YAML. Notable implementation details:
//
// - Duplicate fields, are case-sensitively ignored in an undefined order.
// - The sequence indentation style is compact, which means that the "- " marker for a YAML sequence will be on the same indentation level as the sequence field name.
// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip.
func JSONToYAML(j []byte) ([]byte, error) {
// Convert the JSON to an object.
var jsonObj interface{}
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshalling to interface{}, it just picks float64
// universally. go-yaml does go through the effort of picking the right
// number type, so we can preserve number type throughout this process.
err := yaml.Unmarshal(j, &jsonObj)
if err != nil {
return nil, err
}
// Marshal this object into YAML.
yamlBytes, err := yaml.Marshal(jsonObj)
if err != nil {
return nil, err
}
return yamlBytes, nil
}
// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
// passing JSON through this method should be a no-op.
//
// Some things YAML can do that are not supported by JSON:
// - In YAML you can have binary and null keys in your maps. These are invalid
// in JSON, and therefore int, bool and float keys are converted to strings implicitly.
// - Binary data in YAML with the !!binary tag is not supported. If you want to
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
// - And more... read the YAML specification for more details.
//
// Notable about the implementation:
//
// - Duplicate fields are case-sensitively ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See YAMLToJSONStrict for an alternative.
// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly.
// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip.
// - There are no compatibility guarantees for returned error values.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSONTarget(y, nil, yaml.Unmarshal)
}
// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
// returning an error on any duplicate field names.
func YAMLToJSONStrict(y []byte) ([]byte, error) {
return yamlToJSONTarget(y, nil, yaml.UnmarshalStrict)
}
func yamlToJSONTarget(yamlBytes []byte, jsonTarget *reflect.Value, unmarshalFn func([]byte, interface{}) error) ([]byte, error) {
// Convert the YAML to an object.
var yamlObj interface{}
err := unmarshalFn(yamlBytes, &yamlObj)
if err != nil {
return nil, err
}
// YAML objects are not completely compatible with JSON objects (e.g. you
// can have non-string keys in YAML). So, convert the YAML-compatible object
// to a JSON-compatible object, failing with an error if irrecoverable
// incompatibilties happen along the way.
jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
if err != nil {
return nil, err
}
// Convert this object to JSON and return the data.
jsonBytes, err := json.Marshal(jsonObj)
if err != nil {
return nil, err
}
return jsonBytes, nil
}
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
var err error
// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
// interface). We pass decodingNull as false because we're not actually
// decoding into the value, we're just checking if the ultimate target is a
// string.
if jsonTarget != nil {
jsonUnmarshaler, textUnmarshaler, pointerValue := indirect(*jsonTarget, false)
// We have a JSON or Text Umarshaler at this level, so we can't be trying
// to decode into a string.
if jsonUnmarshaler != nil || textUnmarshaler != nil {
jsonTarget = nil
} else {
jsonTarget = &pointerValue
}
}
// If yamlObj is a number or a boolean, check if jsonTarget is a string -
// if so, coerce. Else return normal.
// If yamlObj is a map or array, find the field that each key is
// unmarshaling to, and when you recurse pass the reflect.Value for that
// field back into this function.
switch typedYAMLObj := yamlObj.(type) {
case map[interface{}]interface{}:
// JSON does not support arbitrary keys in a map, so we must convert
// these keys to strings.
//
// From my reading of go-yaml v2 (specifically the resolve function),
// keys can only have the types string, int, int64, float64, binary
// (unsupported), or null (unsupported).
strMap := make(map[string]interface{})
for k, v := range typedYAMLObj {
// Resolve the key to a string first.
var keyString string
switch typedKey := k.(type) {
case string:
keyString = typedKey
case int:
keyString = strconv.Itoa(typedKey)
case int64:
// go-yaml will only return an int64 as a key if the system
// architecture is 32-bit and the key's value is between 32-bit
// and 64-bit. Otherwise the key type will simply be int.
keyString = strconv.FormatInt(typedKey, 10)
case float64:
// Stolen from go-yaml to use the same conversion to string as
// the go-yaml library uses to convert float to string when
// Marshaling.
s := strconv.FormatFloat(typedKey, 'g', -1, 32)
switch s {
case "+Inf":
s = ".inf"
case "-Inf":
s = "-.inf"
case "NaN":
s = ".nan"
}
keyString = s
case bool:
if typedKey {
keyString = "true"
} else {
keyString = "false"
}
default:
return nil, fmt.Errorf("unsupported map key of type: %s, key: %+#v, value: %+#v",
reflect.TypeOf(k), k, v)
}
// jsonTarget should be a struct or a map. If it's a struct, find
// the field it's going to map to and pass its reflect.Value. If
// it's a map, find the element type of the map and pass the
// reflect.Value created from that type. If it's neither, just pass
// nil - JSON conversion will error for us if it's a real issue.
if jsonTarget != nil {
t := *jsonTarget
if t.Kind() == reflect.Struct {
keyBytes := []byte(keyString)
// Find the field that the JSON library would use.
var f *field
fields := cachedTypeFields(t.Type())
for i := range fields {
ff := &fields[i]
if bytes.Equal(ff.nameBytes, keyBytes) {
f = ff
break
}
// Do case-insensitive comparison.
if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
f = ff
}
}
if f != nil {
// Find the reflect.Value of the most preferential
// struct field.
jtf := t.Field(f.index[0])
strMap[keyString], err = convertToJSONableObject(v, &jtf)
if err != nil {
return nil, err
}
continue
}
} else if t.Kind() == reflect.Map {
// Create a zero value of the map's element type to use as
// the JSON target.
jtv := reflect.Zero(t.Type().Elem())
strMap[keyString], err = convertToJSONableObject(v, &jtv)
if err != nil {
return nil, err
}
continue
}
}
strMap[keyString], err = convertToJSONableObject(v, nil)
if err != nil {
return nil, err
}
}
return strMap, nil
case []interface{}:
// We need to recurse into arrays in case there are any
// map[interface{}]interface{}'s inside and to convert any
// numbers to strings.
// If jsonTarget is a slice (which it really should be), find the
// thing it's going to map to. If it's not a slice, just pass nil
// - JSON conversion will error for us if it's a real issue.
var jsonSliceElemValue *reflect.Value
if jsonTarget != nil {
t := *jsonTarget
if t.Kind() == reflect.Slice {
// By default slices point to nil, but we need a reflect.Value
// pointing to a value of the slice type, so we create one here.
ev := reflect.Indirect(reflect.New(t.Type().Elem()))
jsonSliceElemValue = &ev
}
}
// Make and use a new array.
arr := make([]interface{}, len(typedYAMLObj))
for i, v := range typedYAMLObj {
arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
if err != nil {
return nil, err
}
}
return arr, nil
default:
// If the target type is a string and the YAML type is a number,
// convert the YAML type to a string.
if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
// Based on my reading of go-yaml, it may return int, int64,
// float64, or uint64.
var s string
switch typedVal := typedYAMLObj.(type) {
case int:
s = strconv.FormatInt(int64(typedVal), 10)
case int64:
s = strconv.FormatInt(typedVal, 10)
case float64:
s = strconv.FormatFloat(typedVal, 'g', -1, 32)
case uint64:
s = strconv.FormatUint(typedVal, 10)
case bool:
if typedVal {
s = "true"
} else {
s = "false"
}
}
if len(s) > 0 {
yamlObj = interface{}(s)
}
}
return yamlObj, nil
}
}
// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice,
// without going through a byte representation. A nil or empty map[string]interface{} input is
// converted to an empty map, i.e. yaml.MapSlice(nil).
//
// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice.
//
// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml:
// - float64s are down-casted as far as possible without data-loss to int, int64, uint64.
// - int64s are down-casted to int if possible without data-loss.
//
// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case.
//
// string, bool and any other types are unchanged.
func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice {
if len(j) == 0 {
return nil
}
ret := make(yaml.MapSlice, 0, len(j))
for k, v := range j {
ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)})
}
return ret
}
func jsonToYAMLValue(j interface{}) interface{} {
switch j := j.(type) {
case map[string]interface{}:
if j == nil {
return interface{}(nil)
}
return JSONObjectToYAMLObject(j)
case []interface{}:
if j == nil {
return interface{}(nil)
}
ret := make([]interface{}, len(j))
for i := range j {
ret[i] = jsonToYAMLValue(j[i])
}
return ret
case float64:
// replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151
if i64 := int64(j); j == float64(i64) {
if i := int(i64); i64 == int64(i) {
return i
}
return i64
}
if ui64 := uint64(j); j == float64(ui64) {
return ui64
}
return j
case int64:
if i := int(j); j == int64(i) {
return i
}
return j
}
return j
}
// DisallowUnknownFields configures the JSON decoder to error out if unknown
// fields come along, instead of dropping them by default.
func DisallowUnknownFields(d *json.Decoder) *json.Decoder {
d.DisallowUnknownFields()
return d
}