diff --git a/go.mod b/go.mod index e73e53e76..37cb1d9f2 100644 --- a/go.mod +++ b/go.mod @@ -34,7 +34,7 @@ require ( github.com/digitalocean/go-qemu v0.0.0-20181112162955-dd7bb9c771b8 github.com/digitalocean/godo v1.11.1 github.com/dylanmei/iso8601 v0.1.0 // indirect - github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08 + github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08 // indirect github.com/exoscale/egoscale v0.18.1 github.com/fatih/camelcase v1.0.0 github.com/fatih/structtag v1.0.0 @@ -42,11 +42,11 @@ require ( github.com/go-ole/go-ole v1.2.4 // indirect github.com/go-resty/resty/v2 v2.3.0 github.com/gobwas/glob v0.2.3 - github.com/gofrs/flock v0.7.3 + github.com/gofrs/flock v0.7.3 // indirect github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/google/go-cmp v0.5.2 github.com/google/go-querystring v1.0.0 // indirect - github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 + github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 // indirect github.com/google/uuid v1.1.1 github.com/gophercloud/gophercloud v0.12.0 github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c @@ -54,13 +54,13 @@ require ( github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 github.com/hashicorp/aws-sdk-go-base v0.6.0 - github.com/hashicorp/consul/api v1.4.0 + github.com/hashicorp/consul/api v1.4.0 // indirect github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 - github.com/hashicorp/go-getter/gcs/v2 v2.0.0-20200604122502-a6995fa1edad - github.com/hashicorp/go-getter/s3/v2 v2.0.0-20200604122502-a6995fa1edad + github.com/hashicorp/go-getter/gcs/v2 v2.0.0-20200604122502-a6995fa1edad // indirect + github.com/hashicorp/go-getter/s3/v2 v2.0.0-20200604122502-a6995fa1edad // indirect github.com/hashicorp/go-getter/v2 v2.0.0-20200604122502-a6995fa1edad github.com/hashicorp/go-immutable-radix v1.1.0 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect @@ -73,11 +73,11 @@ require ( github.com/hashicorp/packer-plugin-sdk v0.0.4 github.com/hashicorp/serf v0.9.2 // indirect github.com/hashicorp/vault/api v1.0.4 - github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d + github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect github.com/hetznercloud/hcloud-go v1.15.1 github.com/hyperonecom/h1-client-go v0.0.0-20191203060043-b46280e4c4a4 github.com/jdcloud-api/jdcloud-sdk-go v1.9.1-0.20190605102154-3d81a50ca961 - github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 + github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 // indirect github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62 github.com/json-iterator/go v1.1.6 // indirect github.com/jtolds/gls v4.2.1+incompatible // indirect @@ -91,12 +91,12 @@ require ( github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88 github.com/mattn/go-tty v0.0.0-20191112051231-74040eebce08 github.com/mitchellh/cli v1.1.0 - github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7 + github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7 // indirect github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/go-testing-interface v1.0.3 // indirect github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed github.com/mitchellh/gox v1.0.1 // indirect - github.com/mitchellh/iochan v1.0.0 + github.com/mitchellh/iochan v1.0.0 // indirect github.com/mitchellh/mapstructure v1.2.3 github.com/mitchellh/panicwrap v1.0.0 github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784 @@ -107,13 +107,13 @@ require ( github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b github.com/oracle/oci-go-sdk v18.0.0+incompatible github.com/outscale/osc-sdk-go/osc v0.0.0-20200722135656-d654809d0699 - github.com/packer-community/winrmcp v0.0.0-20180921204643-0fd363d6159a + github.com/packer-community/winrmcp v0.0.0-20180921204643-0fd363d6159a // indirect github.com/pierrec/lz4 v2.0.5+incompatible github.com/pkg/errors v0.9.1 - github.com/pkg/sftp v0.0.0-20160118190721-e84cc8c755ca + github.com/pkg/sftp v0.0.0-20160118190721-e84cc8c755ca // indirect github.com/posener/complete v1.2.3 github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible - github.com/ryanuber/go-glob v1.0.0 + github.com/ryanuber/go-glob v1.0.0 // indirect github.com/satori/go.uuid v1.2.0 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7 github.com/shirou/gopsutil v2.18.12+incompatible @@ -125,7 +125,7 @@ require ( github.com/tencentcloud/tencentcloud-sdk-go v3.0.222+incompatible github.com/ucloud/ucloud-sdk-go v0.16.3 github.com/ufilesdk-dev/ufile-gosdk v0.0.0-20190830075812-b4dbc4ef43a6 - github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1 + github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1 // indirect github.com/ulikunitz/xz v0.5.5 github.com/vmware/govmomi v0.23.1 github.com/xanzy/go-cloudstack v0.0.0-20190526095453-42f262b63ed0 diff --git a/vendor/github.com/antchfx/xpath/.gitignore b/vendor/github.com/antchfx/xpath/.gitignore deleted file mode 100644 index 4d5d27b1d..000000000 --- a/vendor/github.com/antchfx/xpath/.gitignore +++ /dev/null @@ -1,32 +0,0 @@ -# vscode -.vscode -debug -*.test - -./build - -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof \ No newline at end of file diff --git a/vendor/github.com/antchfx/xpath/.travis.yml b/vendor/github.com/antchfx/xpath/.travis.yml deleted file mode 100644 index 126848c8e..000000000 --- a/vendor/github.com/antchfx/xpath/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go - -go: - - 1.6 - - 1.7 - - 1.8 - -install: - - go get github.com/mattn/goveralls - -script: - - $HOME/gopath/bin/goveralls -service=travis-ci \ No newline at end of file diff --git a/vendor/github.com/antchfx/xpath/LICENSE b/vendor/github.com/antchfx/xpath/LICENSE deleted file mode 100644 index e14c37141..000000000 --- a/vendor/github.com/antchfx/xpath/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/antchfx/xpath/README.md b/vendor/github.com/antchfx/xpath/README.md deleted file mode 100644 index 41bf8c6ff..000000000 --- a/vendor/github.com/antchfx/xpath/README.md +++ /dev/null @@ -1,119 +0,0 @@ -XPath -==== -[![GoDoc](https://godoc.org/github.com/antchfx/xpath?status.svg)](https://godoc.org/github.com/antchfx/xpath) -[![Coverage Status](https://coveralls.io/repos/github/antchfx/xpath/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xpath?branch=master) -[![Build Status](https://travis-ci.org/antchfx/xpath.svg?branch=master)](https://travis-ci.org/antchfx/xpath) -[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xpath)](https://goreportcard.com/report/github.com/antchfx/xpath) - -XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression. - -[XQuery](https://github.com/antchfx/xquery) : lets you extract data from HTML/XML documents using XPath package. - -### Features - -#### The basic XPath patterns. - -> The basic XPath patterns cover 90% of the cases that most stylesheets will need. - -- `node` : Selects all child elements with nodeName of node. - -- `*` : Selects all child elements. - -- `@attr` : Selects the attribute attr. - -- `@*` : Selects all attributes. - -- `node()` : Matches an org.w3c.dom.Node. - -- `text()` : Matches a org.w3c.dom.Text node. - -- `comment()` : Matches a comment. - -- `.` : Selects the current node. - -- `..` : Selects the parent of current node. - -- `/` : Selects the document node. - -- `a[expr]` : Select only those nodes matching a which also satisfy the expression expr. - -- `a[n]` : Selects the nth matching node matching a When a filter's expression is a number, XPath selects based on position. - -- `a/b` : For each node matching a, add the nodes matching b to the result. - -- `a//b` : For each node matching a, add the descendant nodes matching b to the result. - -- `//b` : Returns elements in the entire document matching b. - -- `a|b` : All nodes matching a or b. - -#### Node Axes - -- `child::*` : The child axis selects children of the current node. - -- `descendant::*` : The descendant axis selects descendants of the current node. It is equivalent to '//'. - -- `descendant-or-self::*` : Selects descendants including the current node. - -- `attribute::*` : Selects attributes of the current element. It is equivalent to @* - -- `following-sibling::*` : Selects nodes after the current node. - -- `preceding-sibling::*` : Selects nodes before the current node. - -- `following::*` : Selects the first matching node following in document order, excluding descendants. - -- `preceding::*` : Selects the first matching node preceding in document order, excluding ancestors. - -- `parent::*` : Selects the parent if it matches. The '..' pattern from the core is equivalent to 'parent::node()'. - -- `ancestor::*` : Selects matching ancestors. - -- `ancestor-or-self::*` : Selects ancestors including the current node. - -- `self::*` : Selects the current node. '.' is equivalent to 'self::node()'. - -#### Expressions - - The gxpath supported three types: number, boolean, string. - -- `path` : Selects nodes based on the path. - -- `a = b` : Standard comparisons. - - * a = b True if a equals b. - * a != b True if a is not equal to b. - * a < b True if a is less than b. - * a <= b True if a is less than or equal to b. - * a > b True if a is greater than b. - * a >= b True if a is greater than or equal to b. - -- `a + b` : Arithmetic expressions. - - * `- a` Unary minus - * a + b Add - * a - b Substract - * a * b Multiply - * a div b Divide - * a mod b Floating point mod, like Java. - -- `(expr)` : Parenthesized expressions. - -- `fun(arg1, ..., argn)` : Function calls. - - * position() - * last() - * count( node-set ) - * name() - * starts-with( string, string ) - * normalize-space( string ) - * substring( string , start [, length] ) - * not( expression ) - * string-length( [string] ) - * contains( string, string ) - * sum( node-set ) - * concat( string1 , string2 [, stringn]* ) - -- `a or b` : Boolean or. - -- `a and b` : Boolean and. \ No newline at end of file diff --git a/vendor/github.com/antchfx/xpath/build.go b/vendor/github.com/antchfx/xpath/build.go deleted file mode 100644 index 544e0d49c..000000000 --- a/vendor/github.com/antchfx/xpath/build.go +++ /dev/null @@ -1,359 +0,0 @@ -package xpath - -import ( - "errors" - "fmt" -) - -type flag int - -const ( - noneFlag flag = iota - filterFlag -) - -// builder provides building an XPath expressions. -type builder struct { - depth int - flag flag - firstInput query -} - -// axisPredicate creates a predicate to predicating for this axis node. -func axisPredicate(root *axisNode) func(NodeNavigator) bool { - // get current axix node type. - typ := ElementNode - if root.AxeType == "attribute" { - typ = AttributeNode - } else { - switch root.Prop { - case "comment": - typ = CommentNode - case "text": - typ = TextNode - // case "processing-instruction": - // typ = ProcessingInstructionNode - case "node": - typ = ElementNode - } - } - predicate := func(n NodeNavigator) bool { - if typ == n.NodeType() || typ == TextNode { - if root.LocalName == "" || (root.LocalName == n.LocalName() && root.Prefix == n.Prefix()) { - return true - } - } - return false - } - - return predicate -} - -// processAxisNode processes a query for the XPath axis node. -func (b *builder) processAxisNode(root *axisNode) (query, error) { - var ( - err error - qyInput query - qyOutput query - predicate = axisPredicate(root) - ) - - if root.Input == nil { - qyInput = &contextQuery{} - } else { - if b.flag&filterFlag == 0 { - if root.AxeType == "child" && (root.Input.Type() == nodeAxis) { - if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" { - var qyGrandInput query - if input.Input != nil { - qyGrandInput, _ = b.processNode(input.Input) - } else { - qyGrandInput = &contextQuery{} - } - qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true} - return qyOutput, nil - } - } - } - qyInput, err = b.processNode(root.Input) - if err != nil { - return nil, err - } - } - - switch root.AxeType { - case "ancestor": - qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate} - case "ancestor-or-self": - qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate, Self: true} - case "attribute": - qyOutput = &attributeQuery{Input: qyInput, Predicate: predicate} - case "child": - filter := func(n NodeNavigator) bool { - v := predicate(n) - switch root.Prop { - case "text": - v = v && n.NodeType() == TextNode - case "node": - v = v && (n.NodeType() == ElementNode || n.NodeType() == TextNode) - case "comment": - v = v && n.NodeType() == CommentNode - } - return v - } - qyOutput = &childQuery{Input: qyInput, Predicate: filter} - case "descendant": - qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate} - case "descendant-or-self": - qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate, Self: true} - case "following": - qyOutput = &followingQuery{Input: qyInput, Predicate: predicate} - case "following-sibling": - qyOutput = &followingQuery{Input: qyInput, Predicate: predicate, Sibling: true} - case "parent": - qyOutput = &parentQuery{Input: qyInput, Predicate: predicate} - case "preceding": - qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate} - case "preceding-sibling": - qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate, Sibling: true} - case "self": - qyOutput = &selfQuery{Input: qyInput, Predicate: predicate} - case "namespace": - // haha,what will you do someting?? - default: - err = fmt.Errorf("unknown axe type: %s", root.AxeType) - return nil, err - } - return qyOutput, nil -} - -// processFilterNode builds query for the XPath filter predicate. -func (b *builder) processFilterNode(root *filterNode) (query, error) { - b.flag |= filterFlag - - qyInput, err := b.processNode(root.Input) - if err != nil { - return nil, err - } - qyCond, err := b.processNode(root.Condition) - if err != nil { - return nil, err - } - qyOutput := &filterQuery{Input: qyInput, Predicate: qyCond} - return qyOutput, nil -} - -// processFunctionNode processes query for the XPath function node. -func (b *builder) processFunctionNode(root *functionNode) (query, error) { - var qyOutput query - switch root.FuncName { - case "starts-with": - arg1, err := b.processNode(root.Args[0]) - if err != nil { - return nil, err - } - arg2, err := b.processNode(root.Args[1]) - if err != nil { - return nil, err - } - qyOutput = &functionQuery{Input: b.firstInput, Func: startwithFunc(arg1, arg2)} - case "contains": - arg1, err := b.processNode(root.Args[0]) - if err != nil { - return nil, err - } - arg2, err := b.processNode(root.Args[1]) - if err != nil { - return nil, err - } - - qyOutput = &functionQuery{Input: b.firstInput, Func: containsFunc(arg1, arg2)} - case "substring": - //substring( string , start [, length] ) - if len(root.Args) < 2 { - return nil, errors.New("xpath: substring function must have at least two parameter") - } - var ( - arg1, arg2, arg3 query - err error - ) - if arg1, err = b.processNode(root.Args[0]); err != nil { - return nil, err - } - if arg2, err = b.processNode(root.Args[1]); err != nil { - return nil, err - } - if len(root.Args) == 3 { - if arg3, err = b.processNode(root.Args[2]); err != nil { - return nil, err - } - } - qyOutput = &functionQuery{Input: b.firstInput, Func: substringFunc(arg1, arg2, arg3)} - case "string-length": - // string-length( [string] ) - if len(root.Args) < 1 { - return nil, errors.New("xpath: string-length function must have at least one parameter") - } - arg1, err := b.processNode(root.Args[0]) - if err != nil { - return nil, err - } - qyOutput = &functionQuery{Input: b.firstInput, Func: stringLengthFunc(arg1)} - case "normalize-space": - if len(root.Args) == 0 { - return nil, errors.New("xpath: normalize-space function must have at least one parameter") - } - argQuery, err := b.processNode(root.Args[0]) - if err != nil { - return nil, err - } - qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc} - case "not": - if len(root.Args) == 0 { - return nil, errors.New("xpath: not function must have at least one parameter") - } - argQuery, err := b.processNode(root.Args[0]) - if err != nil { - return nil, err - } - qyOutput = &functionQuery{Input: argQuery, Func: notFunc} - case "name": - qyOutput = &functionQuery{Input: b.firstInput, Func: nameFunc} - case "last": - qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc} - case "position": - qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc} - case "count": - //if b.firstInput == nil { - // return nil, errors.New("xpath: expression must evaluate to node-set") - //} - if len(root.Args) == 0 { - return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets") - } - argQuery, err := b.processNode(root.Args[0]) - if err != nil { - return nil, err - } - qyOutput = &functionQuery{Input: argQuery, Func: countFunc} - case "sum": - if len(root.Args) == 0 { - return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets") - } - argQuery, err := b.processNode(root.Args[0]) - if err != nil { - return nil, err - } - qyOutput = &functionQuery{Input: argQuery, Func: sumFunc} - case "concat": - if len(root.Args) < 2 { - return nil, fmt.Errorf("xpath: concat() must have at least two arguments") - } - var args []query - for _, v := range root.Args { - q, err := b.processNode(v) - if err != nil { - return nil, err - } - args = append(args, q) - } - qyOutput = &functionQuery{Input: b.firstInput, Func: concatFunc(args...)} - default: - return nil, fmt.Errorf("not yet support this function %s()", root.FuncName) - } - return qyOutput, nil -} - -func (b *builder) processOperatorNode(root *operatorNode) (query, error) { - left, err := b.processNode(root.Left) - if err != nil { - return nil, err - } - right, err := b.processNode(root.Right) - if err != nil { - return nil, err - } - var qyOutput query - switch root.Op { - case "+", "-", "div", "mod": // Numeric operator - var exprFunc func(interface{}, interface{}) interface{} - switch root.Op { - case "+": - exprFunc = plusFunc - case "-": - exprFunc = minusFunc - case "div": - exprFunc = divFunc - case "mod": - exprFunc = modFunc - } - qyOutput = &numericQuery{Left: left, Right: right, Do: exprFunc} - case "=", ">", ">=", "<", "<=", "!=": - var exprFunc func(iterator, interface{}, interface{}) interface{} - switch root.Op { - case "=": - exprFunc = eqFunc - case ">": - exprFunc = gtFunc - case ">=": - exprFunc = geFunc - case "<": - exprFunc = ltFunc - case "<=": - exprFunc = leFunc - case "!=": - exprFunc = neFunc - } - qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc} - case "or", "and", "|": - isOr := false - if root.Op == "or" || root.Op == "|" { - isOr = true - } - qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr} - } - return qyOutput, nil -} - -func (b *builder) processNode(root node) (q query, err error) { - if b.depth = b.depth + 1; b.depth > 1024 { - err = errors.New("the xpath expressions is too complex") - return - } - - switch root.Type() { - case nodeConstantOperand: - n := root.(*operandNode) - q = &constantQuery{Val: n.Val} - case nodeRoot: - q = &contextQuery{Root: true} - case nodeAxis: - q, err = b.processAxisNode(root.(*axisNode)) - b.firstInput = q - case nodeFilter: - q, err = b.processFilterNode(root.(*filterNode)) - case nodeFunction: - q, err = b.processFunctionNode(root.(*functionNode)) - case nodeOperator: - q, err = b.processOperatorNode(root.(*operatorNode)) - } - return -} - -// build builds a specified XPath expressions expr. -func build(expr string) (q query, err error) { - defer func() { - if e := recover(); e != nil { - switch x := e.(type) { - case string: - err = errors.New(x) - case error: - err = x - default: - err = errors.New("unknown panic") - } - } - }() - root := parse(expr) - b := &builder{} - return b.processNode(root) -} diff --git a/vendor/github.com/antchfx/xpath/func.go b/vendor/github.com/antchfx/xpath/func.go deleted file mode 100644 index e28563401..000000000 --- a/vendor/github.com/antchfx/xpath/func.go +++ /dev/null @@ -1,254 +0,0 @@ -package xpath - -import ( - "errors" - "strconv" - "strings" -) - -// The XPath function list. - -func predicate(q query) func(NodeNavigator) bool { - type Predicater interface { - Test(NodeNavigator) bool - } - if p, ok := q.(Predicater); ok { - return p.Test - } - return func(NodeNavigator) bool { return true } -} - -// positionFunc is a XPath Node Set functions position(). -func positionFunc(q query, t iterator) interface{} { - var ( - count = 1 - node = t.Current() - ) - test := predicate(q) - for node.MoveToPrevious() { - if test(node) { - count++ - } - } - return float64(count) -} - -// lastFunc is a XPath Node Set functions last(). -func lastFunc(q query, t iterator) interface{} { - var ( - count = 0 - node = t.Current() - ) - node.MoveToFirst() - test := predicate(q) - for { - if test(node) { - count++ - } - if !node.MoveToNext() { - break - } - } - return float64(count) -} - -// countFunc is a XPath Node Set functions count(node-set). -func countFunc(q query, t iterator) interface{} { - var count = 0 - test := predicate(q) - switch typ := q.Evaluate(t).(type) { - case query: - for node := typ.Select(t); node != nil; node = typ.Select(t) { - if test(node) { - count++ - } - } - } - return float64(count) -} - -// sumFunc is a XPath Node Set functions sum(node-set). -func sumFunc(q query, t iterator) interface{} { - var sum float64 - switch typ := q.Evaluate(t).(type) { - case query: - for node := typ.Select(t); node != nil; node = typ.Select(t) { - if v, err := strconv.ParseFloat(node.Value(), 64); err == nil { - sum += v - } - } - case float64: - sum = typ - case string: - if v, err := strconv.ParseFloat(typ, 64); err != nil { - sum = v - } - } - return sum -} - -// nameFunc is a XPath functions name([node-set]). -func nameFunc(q query, t iterator) interface{} { - return t.Current().LocalName() -} - -// startwithFunc is a XPath functions starts-with(string, string). -func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} { - return func(q query, t iterator) interface{} { - var ( - m, n string - ok bool - ) - switch typ := arg1.Evaluate(t).(type) { - case string: - m = typ - case query: - node := typ.Select(t) - if node == nil { - return false - } - m = node.Value() - default: - panic(errors.New("starts-with() function argument type must be string")) - } - n, ok = arg2.Evaluate(t).(string) - if !ok { - panic(errors.New("starts-with() function argument type must be string")) - } - return strings.HasPrefix(m, n) - } -} - -// containsFunc is a XPath functions contains(string or @attr, string). -func containsFunc(arg1, arg2 query) func(query, iterator) interface{} { - return func(q query, t iterator) interface{} { - var ( - m, n string - ok bool - ) - - switch typ := arg1.Evaluate(t).(type) { - case string: - m = typ - case query: - node := typ.Select(t) - if node == nil { - return false - } - m = node.Value() - default: - panic(errors.New("contains() function argument type must be string")) - } - - n, ok = arg2.Evaluate(t).(string) - if !ok { - panic(errors.New("contains() function argument type must be string")) - } - - return strings.Contains(m, n) - } -} - -// normalizespaceFunc is XPath functions normalize-space(string?) -func normalizespaceFunc(q query, t iterator) interface{} { - var m string - switch typ := q.Evaluate(t).(type) { - case string: - m = typ - case query: - node := typ.Select(t) - if node == nil { - return false - } - m = node.Value() - } - return strings.TrimSpace(m) -} - -// substringFunc is XPath functions substring function returns a part of a given string. -func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} { - return func(q query, t iterator) interface{} { - var m string - switch typ := arg1.Evaluate(t).(type) { - case string: - m = typ - case query: - node := typ.Select(t) - if node == nil { - return false - } - m = node.Value() - } - - var start, length float64 - var ok bool - - if start, ok = arg2.Evaluate(t).(float64); !ok { - panic(errors.New("substring() function first argument type must be int")) - } - if arg3 != nil { - if length, ok = arg3.Evaluate(t).(float64); !ok { - panic(errors.New("substring() function second argument type must be int")) - } - } - if (len(m) - int(start)) < int(length) { - panic(errors.New("substring() function start and length argument out of range")) - } - if length > 0 { - return m[int(start):int(length+start)] - } - return m[int(start):] - } -} - -// stringLengthFunc is XPATH string-length( [string] ) function that returns a number -// equal to the number of characters in a given string. -func stringLengthFunc(arg1 query) func(query, iterator) interface{} { - return func(q query, t iterator) interface{} { - switch v := arg1.Evaluate(t).(type) { - case string: - return float64(len(v)) - case query: - node := v.Select(t) - if node == nil { - break - } - return float64(len(node.Value())) - } - return float64(0) - } -} - -// notFunc is XPATH functions not(expression) function operation. -func notFunc(q query, t iterator) interface{} { - switch v := q.Evaluate(t).(type) { - case bool: - return !v - case query: - node := v.Select(t) - return node == nil - default: - return false - } -} - -// concatFunc is the concat function concatenates two or more -// strings and returns the resulting string. -// concat( string1 , string2 [, stringn]* ) -func concatFunc(args ...query) func(query, iterator) interface{} { - return func(q query, t iterator) interface{} { - var a []string - for _, v := range args { - switch v := v.Evaluate(t).(type) { - case string: - a = append(a, v) - case query: - node := v.Select(t) - if node != nil { - a = append(a, node.Value()) - } - } - } - return strings.Join(a, "") - } -} diff --git a/vendor/github.com/antchfx/xpath/operator.go b/vendor/github.com/antchfx/xpath/operator.go deleted file mode 100644 index 308d3cbcc..000000000 --- a/vendor/github.com/antchfx/xpath/operator.go +++ /dev/null @@ -1,295 +0,0 @@ -package xpath - -import ( - "fmt" - "reflect" - "strconv" -) - -// The XPath number operator function list. - -// valueType is a return value type. -type valueType int - -const ( - booleanType valueType = iota - numberType - stringType - nodeSetType -) - -func getValueType(i interface{}) valueType { - v := reflect.ValueOf(i) - switch v.Kind() { - case reflect.Float64: - return numberType - case reflect.String: - return stringType - case reflect.Bool: - return booleanType - default: - if _, ok := i.(query); ok { - return nodeSetType - } - } - panic(fmt.Errorf("xpath unknown value type: %v", v.Kind())) -} - -type logical func(iterator, string, interface{}, interface{}) bool - -var logicalFuncs = [][]logical{ - {cmpBooleanBoolean, nil, nil, nil}, - {nil, cmpNumericNumeric, cmpNumericString, cmpNumericNodeSet}, - {nil, cmpStringNumeric, cmpStringString, cmpStringNodeSet}, - {nil, cmpNodeSetNumeric, cmpNodeSetString, cmpNodeSetNodeSet}, -} - -// number vs number -func cmpNumberNumberF(op string, a, b float64) bool { - switch op { - case "=": - return a == b - case ">": - return a > b - case "<": - return a < b - case ">=": - return a >= b - case "<=": - return a <= b - case "!=": - return a != b - } - return false -} - -// string vs string -func cmpStringStringF(op string, a, b string) bool { - switch op { - case "=": - return a == b - case ">": - return a > b - case "<": - return a < b - case ">=": - return a >= b - case "<=": - return a <= b - case "!=": - return a != b - } - return false -} - -func cmpBooleanBooleanF(op string, a, b bool) bool { - switch op { - case "or": - return a || b - case "and": - return a && b - } - return false -} - -func cmpNumericNumeric(t iterator, op string, m, n interface{}) bool { - a := m.(float64) - b := n.(float64) - return cmpNumberNumberF(op, a, b) -} - -func cmpNumericString(t iterator, op string, m, n interface{}) bool { - a := m.(float64) - b := n.(string) - num, err := strconv.ParseFloat(b, 64) - if err != nil { - panic(err) - } - return cmpNumberNumberF(op, a, num) -} - -func cmpNumericNodeSet(t iterator, op string, m, n interface{}) bool { - a := m.(float64) - b := n.(query) - - for { - node := b.Select(t) - if node == nil { - break - } - num, err := strconv.ParseFloat(node.Value(), 64) - if err != nil { - panic(err) - } - if cmpNumberNumberF(op, a, num) { - return true - } - } - return false -} - -func cmpNodeSetNumeric(t iterator, op string, m, n interface{}) bool { - a := m.(query) - b := n.(float64) - for { - node := a.Select(t) - if node == nil { - break - } - num, err := strconv.ParseFloat(node.Value(), 64) - if err != nil { - panic(err) - } - if cmpNumberNumberF(op, num, b) { - return true - } - } - return false -} - -func cmpNodeSetString(t iterator, op string, m, n interface{}) bool { - a := m.(query) - b := n.(string) - for { - node := a.Select(t) - if node == nil { - break - } - if cmpStringStringF(op, b, node.Value()) { - return true - } - } - return false -} - -func cmpNodeSetNodeSet(t iterator, op string, m, n interface{}) bool { - return false -} - -func cmpStringNumeric(t iterator, op string, m, n interface{}) bool { - a := m.(string) - b := n.(float64) - num, err := strconv.ParseFloat(a, 64) - if err != nil { - panic(err) - } - return cmpNumberNumberF(op, b, num) -} - -func cmpStringString(t iterator, op string, m, n interface{}) bool { - a := m.(string) - b := n.(string) - return cmpStringStringF(op, a, b) -} - -func cmpStringNodeSet(t iterator, op string, m, n interface{}) bool { - a := m.(string) - b := n.(query) - for { - node := b.Select(t) - if node == nil { - break - } - if cmpStringStringF(op, a, node.Value()) { - return true - } - } - return false -} - -func cmpBooleanBoolean(t iterator, op string, m, n interface{}) bool { - a := m.(bool) - b := n.(bool) - return cmpBooleanBooleanF(op, a, b) -} - -// eqFunc is an `=` operator. -func eqFunc(t iterator, m, n interface{}) interface{} { - t1 := getValueType(m) - t2 := getValueType(n) - return logicalFuncs[t1][t2](t, "=", m, n) -} - -// gtFunc is an `>` operator. -func gtFunc(t iterator, m, n interface{}) interface{} { - t1 := getValueType(m) - t2 := getValueType(n) - return logicalFuncs[t1][t2](t, ">", m, n) -} - -// geFunc is an `>=` operator. -func geFunc(t iterator, m, n interface{}) interface{} { - t1 := getValueType(m) - t2 := getValueType(n) - return logicalFuncs[t1][t2](t, ">=", m, n) -} - -// ltFunc is an `<` operator. -func ltFunc(t iterator, m, n interface{}) interface{} { - t1 := getValueType(m) - t2 := getValueType(n) - return logicalFuncs[t1][t2](t, "<", m, n) -} - -// leFunc is an `<=` operator. -func leFunc(t iterator, m, n interface{}) interface{} { - t1 := getValueType(m) - t2 := getValueType(n) - return logicalFuncs[t1][t2](t, "<=", m, n) -} - -// neFunc is an `!=` operator. -func neFunc(t iterator, m, n interface{}) interface{} { - t1 := getValueType(m) - t2 := getValueType(n) - return logicalFuncs[t1][t2](t, "!=", m, n) -} - -// orFunc is an `or` operator. -var orFunc = func(t iterator, m, n interface{}) interface{} { - t1 := getValueType(m) - t2 := getValueType(n) - return logicalFuncs[t1][t2](t, "or", m, n) -} - -func numericExpr(m, n interface{}, cb func(float64, float64) float64) float64 { - typ := reflect.TypeOf(float64(0)) - a := reflect.ValueOf(m).Convert(typ) - b := reflect.ValueOf(n).Convert(typ) - return cb(a.Float(), b.Float()) -} - -// plusFunc is an `+` operator. -var plusFunc = func(m, n interface{}) interface{} { - return numericExpr(m, n, func(a, b float64) float64 { - return a + b - }) -} - -// minusFunc is an `-` operator. -var minusFunc = func(m, n interface{}) interface{} { - return numericExpr(m, n, func(a, b float64) float64 { - return a - b - }) -} - -// mulFunc is an `*` operator. -var mulFunc = func(m, n interface{}) interface{} { - return numericExpr(m, n, func(a, b float64) float64 { - return a * b - }) -} - -// divFunc is an `DIV` operator. -var divFunc = func(m, n interface{}) interface{} { - return numericExpr(m, n, func(a, b float64) float64 { - return a / b - }) -} - -// modFunc is an 'MOD' operator. -var modFunc = func(m, n interface{}) interface{} { - return numericExpr(m, n, func(a, b float64) float64 { - return float64(int(a) % int(b)) - }) -} diff --git a/vendor/github.com/antchfx/xpath/parse.go b/vendor/github.com/antchfx/xpath/parse.go deleted file mode 100644 index 6103131cb..000000000 --- a/vendor/github.com/antchfx/xpath/parse.go +++ /dev/null @@ -1,1164 +0,0 @@ -package xpath - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "unicode" -) - -// A XPath expression token type. -type itemType int - -const ( - itemComma itemType = iota // ',' - itemSlash // '/' - itemAt // '@' - itemDot // '.' - itemLParens // '(' - itemRParens // ')' - itemLBracket // '[' - itemRBracket // ']' - itemStar // '*' - itemPlus // '+' - itemMinus // '-' - itemEq // '=' - itemLt // '<' - itemGt // '>' - itemBang // '!' - itemDollar // '$' - itemApos // '\'' - itemQuote // '"' - itemUnion // '|' - itemNe // '!=' - itemLe // '<=' - itemGe // '>=' - itemAnd // '&&' - itemOr // '||' - itemDotDot // '..' - itemSlashSlash // '//' - itemName // XML Name - itemString // Quoted string constant - itemNumber // Number constant - itemAxe // Axe (like child::) - itemEof // END -) - -// A node is an XPath node in the parse tree. -type node interface { - Type() nodeType -} - -// nodeType identifies the type of a parse tree node. -type nodeType int - -func (t nodeType) Type() nodeType { - return t -} - -const ( - nodeRoot nodeType = iota - nodeAxis - nodeFilter - nodeFunction - nodeOperator - nodeVariable - nodeConstantOperand -) - -type parser struct { - r *scanner - d int -} - -// newOperatorNode returns new operator node OperatorNode. -func newOperatorNode(op string, left, right node) node { - return &operatorNode{nodeType: nodeOperator, Op: op, Left: left, Right: right} -} - -// newOperand returns new constant operand node OperandNode. -func newOperandNode(v interface{}) node { - return &operandNode{nodeType: nodeConstantOperand, Val: v} -} - -// newAxisNode returns new axis node AxisNode. -func newAxisNode(axeTyp, localName, prefix, prop string, n node) node { - return &axisNode{ - nodeType: nodeAxis, - LocalName: localName, - Prefix: prefix, - AxeType: axeTyp, - Prop: prop, - Input: n, - } -} - -// newVariableNode returns new variable node VariableNode. -func newVariableNode(prefix, name string) node { - return &variableNode{nodeType: nodeVariable, Name: name, Prefix: prefix} -} - -// newFilterNode returns a new filter node FilterNode. -func newFilterNode(n, m node) node { - return &filterNode{nodeType: nodeFilter, Input: n, Condition: m} -} - -// newRootNode returns a root node. -func newRootNode(s string) node { - return &rootNode{nodeType: nodeRoot, slash: s} -} - -// newFunctionNode returns function call node. -func newFunctionNode(name, prefix string, args []node) node { - return &functionNode{nodeType: nodeFunction, Prefix: prefix, FuncName: name, Args: args} -} - -// testOp reports whether current item name is an operand op. -func testOp(r *scanner, op string) bool { - return r.typ == itemName && r.prefix == "" && r.name == op -} - -func isPrimaryExpr(r *scanner) bool { - switch r.typ { - case itemString, itemNumber, itemDollar, itemLParens: - return true - case itemName: - return r.canBeFunc && !isNodeType(r) - } - return false -} - -func isNodeType(r *scanner) bool { - switch r.name { - case "node", "text", "processing-instruction", "comment": - return r.prefix == "" - } - return false -} - -func isStep(item itemType) bool { - switch item { - case itemDot, itemDotDot, itemAt, itemAxe, itemStar, itemName: - return true - } - return false -} - -func checkItem(r *scanner, typ itemType) { - if r.typ != typ { - panic(fmt.Sprintf("%s has an invalid token", r.text)) - } -} - -// parseExpression parsing the expression with input node n. -func (p *parser) parseExpression(n node) node { - if p.d = p.d + 1; p.d > 200 { - panic("the xpath query is too complex(depth > 200)") - } - n = p.parseOrExpr(n) - p.d-- - return n -} - -// next scanning next item on forward. -func (p *parser) next() bool { - return p.r.nextItem() -} - -func (p *parser) skipItem(typ itemType) { - checkItem(p.r, typ) - p.next() -} - -// OrExpr ::= AndExpr | OrExpr 'or' AndExpr -func (p *parser) parseOrExpr(n node) node { - opnd := p.parseAndExpr(n) - for { - if !testOp(p.r, "or") { - break - } - p.next() - opnd = newOperatorNode("or", opnd, p.parseAndExpr(n)) - } - return opnd -} - -// AndExpr ::= EqualityExpr | AndExpr 'and' EqualityExpr -func (p *parser) parseAndExpr(n node) node { - opnd := p.parseEqualityExpr(n) - for { - if !testOp(p.r, "and") { - break - } - p.next() - opnd = newOperatorNode("and", opnd, p.parseEqualityExpr(n)) - } - return opnd -} - -// EqualityExpr ::= RelationalExpr | EqualityExpr '=' RelationalExpr | EqualityExpr '!=' RelationalExpr -func (p *parser) parseEqualityExpr(n node) node { - opnd := p.parseRelationalExpr(n) -Loop: - for { - var op string - switch p.r.typ { - case itemEq: - op = "=" - case itemNe: - op = "!=" - default: - break Loop - } - p.next() - opnd = newOperatorNode(op, opnd, p.parseRelationalExpr(n)) - } - return opnd -} - -// RelationalExpr ::= AdditiveExpr | RelationalExpr '<' AdditiveExpr | RelationalExpr '>' AdditiveExpr -// | RelationalExpr '<=' AdditiveExpr -// | RelationalExpr '>=' AdditiveExpr -func (p *parser) parseRelationalExpr(n node) node { - opnd := p.parseAdditiveExpr(n) -Loop: - for { - var op string - switch p.r.typ { - case itemLt: - op = "<" - case itemGt: - op = ">" - case itemLe: - op = "<=" - case itemGe: - op = ">=" - default: - break Loop - } - p.next() - opnd = newOperatorNode(op, opnd, p.parseAdditiveExpr(n)) - } - return opnd -} - -// AdditiveExpr ::= MultiplicativeExpr | AdditiveExpr '+' MultiplicativeExpr | AdditiveExpr '-' MultiplicativeExpr -func (p *parser) parseAdditiveExpr(n node) node { - opnd := p.parseMultiplicativeExpr(n) -Loop: - for { - var op string - switch p.r.typ { - case itemPlus: - op = "+" - case itemMinus: - op = "-" - default: - break Loop - } - p.next() - opnd = newOperatorNode(op, opnd, p.parseMultiplicativeExpr(n)) - } - return opnd -} - -// MultiplicativeExpr ::= UnaryExpr | MultiplicativeExpr MultiplyOperator(*) UnaryExpr -// | MultiplicativeExpr 'div' UnaryExpr | MultiplicativeExpr 'mod' UnaryExpr -func (p *parser) parseMultiplicativeExpr(n node) node { - opnd := p.parseUnaryExpr(n) -Loop: - for { - var op string - if p.r.typ == itemStar { - op = "*" - } else if testOp(p.r, "div") || testOp(p.r, "mod") { - op = p.r.name - } else { - break Loop - } - p.next() - opnd = newOperatorNode(op, opnd, p.parseUnaryExpr(n)) - } - return opnd -} - -// UnaryExpr ::= UnionExpr | '-' UnaryExpr -func (p *parser) parseUnaryExpr(n node) node { - minus := false - // ignore '-' sequence - for p.r.typ == itemMinus { - p.next() - minus = !minus - } - opnd := p.parseUnionExpr(n) - if minus { - opnd = newOperatorNode("*", opnd, newOperandNode(float64(-1))) - } - return opnd -} - -// UnionExpr ::= PathExpr | UnionExpr '|' PathExpr -func (p *parser) parseUnionExpr(n node) node { - opnd := p.parsePathExpr(n) -Loop: - for { - if p.r.typ != itemUnion { - break Loop - } - p.next() - opnd2 := p.parsePathExpr(n) - // Checking the node type that must be is node set type? - opnd = newOperatorNode("|", opnd, opnd2) - } - return opnd -} - -// PathExpr ::= LocationPath | FilterExpr | FilterExpr '/' RelativeLocationPath | FilterExpr '//' RelativeLocationPath -func (p *parser) parsePathExpr(n node) node { - var opnd node - if isPrimaryExpr(p.r) { - opnd = p.parseFilterExpr(n) - switch p.r.typ { - case itemSlash: - p.next() - opnd = p.parseRelativeLocationPath(opnd) - case itemSlashSlash: - p.next() - opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd)) - } - } else { - opnd = p.parseLocationPath(nil) - } - return opnd -} - -// FilterExpr ::= PrimaryExpr | FilterExpr Predicate -func (p *parser) parseFilterExpr(n node) node { - opnd := p.parsePrimaryExpr(n) - if p.r.typ == itemLBracket { - opnd = newFilterNode(opnd, p.parsePredicate(opnd)) - } - return opnd -} - -// Predicate ::= '[' PredicateExpr ']' -func (p *parser) parsePredicate(n node) node { - p.skipItem(itemLBracket) - opnd := p.parseExpression(n) - p.skipItem(itemRBracket) - return opnd -} - -// LocationPath ::= RelativeLocationPath | AbsoluteLocationPath -func (p *parser) parseLocationPath(n node) (opnd node) { - switch p.r.typ { - case itemSlash: - p.next() - opnd = newRootNode("/") - if isStep(p.r.typ) { - opnd = p.parseRelativeLocationPath(opnd) // ?? child:: or self ?? - } - case itemSlashSlash: - p.next() - opnd = newRootNode("//") - opnd = p.parseRelativeLocationPath(newAxisNode("descendant-or-self", "", "", "", opnd)) - default: - opnd = p.parseRelativeLocationPath(n) - } - return opnd -} - -// RelativeLocationPath ::= Step | RelativeLocationPath '/' Step | AbbreviatedRelativeLocationPath -func (p *parser) parseRelativeLocationPath(n node) node { - opnd := n -Loop: - for { - opnd = p.parseStep(opnd) - switch p.r.typ { - case itemSlashSlash: - p.next() - opnd = newAxisNode("descendant-or-self", "", "", "", opnd) - case itemSlash: - p.next() - default: - break Loop - } - } - return opnd -} - -// Step ::= AxisSpecifier NodeTest Predicate* | AbbreviatedStep -func (p *parser) parseStep(n node) node { - axeTyp := "child" // default axes value. - if p.r.typ == itemDot || p.r.typ == itemDotDot { - if p.r.typ == itemDot { - axeTyp = "self" - } else { - axeTyp = "parent" - } - p.next() - return newAxisNode(axeTyp, "", "", "", n) - } - switch p.r.typ { - case itemAt: - p.next() - axeTyp = "attribute" - case itemAxe: - axeTyp = p.r.name - p.next() - } - opnd := p.parseNodeTest(n, axeTyp) - for p.r.typ == itemLBracket { - opnd = newFilterNode(opnd, p.parsePredicate(opnd)) - } - return opnd -} - -// NodeTest ::= NameTest | nodeType '(' ')' | 'processing-instruction' '(' Literal ')' -func (p *parser) parseNodeTest(n node, axeTyp string) (opnd node) { - switch p.r.typ { - case itemName: - if p.r.canBeFunc && isNodeType(p.r) { - var prop string - switch p.r.name { - case "comment", "text", "processing-instruction", "node": - prop = p.r.name - } - var name string - p.next() - p.skipItem(itemLParens) - if prop == "processing-instruction" && p.r.typ != itemRParens { - checkItem(p.r, itemString) - name = p.r.strval - p.next() - } - p.skipItem(itemRParens) - opnd = newAxisNode(axeTyp, name, "", prop, n) - } else { - prefix := p.r.prefix - name := p.r.name - p.next() - if p.r.name == "*" { - name = "" - } - opnd = newAxisNode(axeTyp, name, prefix, "", n) - } - case itemStar: - opnd = newAxisNode(axeTyp, "", "", "", n) - p.next() - default: - panic("expression must evaluate to a node-set") - } - return opnd -} - -// PrimaryExpr ::= VariableReference | '(' Expr ')' | Literal | Number | FunctionCall -func (p *parser) parsePrimaryExpr(n node) (opnd node) { - switch p.r.typ { - case itemString: - opnd = newOperandNode(p.r.strval) - p.next() - case itemNumber: - opnd = newOperandNode(p.r.numval) - p.next() - case itemDollar: - p.next() - checkItem(p.r, itemName) - opnd = newVariableNode(p.r.prefix, p.r.name) - p.next() - case itemLParens: - p.next() - opnd = p.parseExpression(n) - p.skipItem(itemRParens) - case itemName: - if p.r.canBeFunc && !isNodeType(p.r) { - opnd = p.parseMethod(nil) - } - } - return opnd -} - -// FunctionCall ::= FunctionName '(' ( Argument ( ',' Argument )* )? ')' -func (p *parser) parseMethod(n node) node { - var args []node - name := p.r.name - prefix := p.r.prefix - - p.skipItem(itemName) - p.skipItem(itemLParens) - if p.r.typ != itemRParens { - for { - args = append(args, p.parseExpression(n)) - if p.r.typ == itemRParens { - break - } - p.skipItem(itemComma) - } - } - p.skipItem(itemRParens) - return newFunctionNode(name, prefix, args) -} - -// Parse parsing the XPath express string expr and returns a tree node. -func parse(expr string) node { - r := &scanner{text: expr} - r.nextChar() - r.nextItem() - p := &parser{r: r} - return p.parseExpression(nil) -} - -// rootNode holds a top-level node of tree. -type rootNode struct { - nodeType - slash string -} - -func (r *rootNode) String() string { - return r.slash -} - -// operatorNode holds two Nodes operator. -type operatorNode struct { - nodeType - Op string - Left, Right node -} - -func (o *operatorNode) String() string { - return fmt.Sprintf("%v%s%v", o.Left, o.Op, o.Right) -} - -// axisNode holds a location step. -type axisNode struct { - nodeType - Input node - Prop string // node-test name.[comment|text|processing-instruction|node] - AxeType string // name of the axes.[attribute|ancestor|child|....] - LocalName string // local part name of node. - Prefix string // prefix name of node. -} - -func (a *axisNode) String() string { - var b bytes.Buffer - if a.AxeType != "" { - b.Write([]byte(a.AxeType + "::")) - } - if a.Prefix != "" { - b.Write([]byte(a.Prefix + ":")) - } - b.Write([]byte(a.LocalName)) - if a.Prop != "" { - b.Write([]byte("/" + a.Prop + "()")) - } - return b.String() -} - -// operandNode holds a constant operand. -type operandNode struct { - nodeType - Val interface{} -} - -func (o *operandNode) String() string { - return fmt.Sprintf("%v", o.Val) -} - -// filterNode holds a condition filter. -type filterNode struct { - nodeType - Input, Condition node -} - -func (f *filterNode) String() string { - return fmt.Sprintf("%s[%s]", f.Input, f.Condition) -} - -// variableNode holds a variable. -type variableNode struct { - nodeType - Name, Prefix string -} - -func (v *variableNode) String() string { - if v.Prefix == "" { - return v.Name - } - return fmt.Sprintf("%s:%s", v.Prefix, v.Name) -} - -// functionNode holds a function call. -type functionNode struct { - nodeType - Args []node - Prefix string - FuncName string // function name -} - -func (f *functionNode) String() string { - var b bytes.Buffer - // fun(arg1, ..., argn) - b.Write([]byte(f.FuncName)) - b.Write([]byte("(")) - for i, arg := range f.Args { - if i > 0 { - b.Write([]byte(",")) - } - b.Write([]byte(fmt.Sprintf("%s", arg))) - } - b.Write([]byte(")")) - return b.String() -} - -type scanner struct { - text, name, prefix string - - pos int - curr rune - typ itemType - strval string // text value at current pos - numval float64 // number value at current pos - canBeFunc bool -} - -func (s *scanner) nextChar() bool { - if s.pos >= len(s.text) { - s.curr = rune(0) - return false - } - s.curr = rune(s.text[s.pos]) - s.pos += 1 - return true -} - -func (s *scanner) nextItem() bool { - s.skipSpace() - switch s.curr { - case 0: - s.typ = itemEof - return false - case ',', '@', '(', ')', '|', '*', '[', ']', '+', '-', '=', '#', '$': - s.typ = asItemType(s.curr) - s.nextChar() - case '<': - s.typ = itemLt - s.nextChar() - if s.curr == '=' { - s.typ = itemLe - s.nextChar() - } - case '>': - s.typ = itemGt - s.nextChar() - if s.curr == '=' { - s.typ = itemGe - s.nextChar() - } - case '!': - s.typ = itemBang - s.nextChar() - if s.curr == '=' { - s.typ = itemNe - s.nextChar() - } - case '.': - s.typ = itemDot - s.nextChar() - if s.curr == '.' { - s.typ = itemDotDot - s.nextChar() - } else if isDigit(s.curr) { - s.typ = itemNumber - s.numval = s.scanFraction() - } - case '/': - s.typ = itemSlash - s.nextChar() - if s.curr == '/' { - s.typ = itemSlashSlash - s.nextChar() - } - case '"', '\'': - s.typ = itemString - s.strval = s.scanString() - default: - if isDigit(s.curr) { - s.typ = itemNumber - s.numval = s.scanNumber() - } else if isName(s.curr) { - s.typ = itemName - s.name = s.scanName() - s.prefix = "" - // "foo:bar" is one itemem not three because it doesn't allow spaces in between - // We should distinct it from "foo::" and need process "foo ::" as well - if s.curr == ':' { - s.nextChar() - // can be "foo:bar" or "foo::" - if s.curr == ':' { - // "foo::" - s.nextChar() - s.typ = itemAxe - } else { // "foo:*", "foo:bar" or "foo: " - s.prefix = s.name - if s.curr == '*' { - s.nextChar() - s.name = "*" - } else if isName(s.curr) { - s.name = s.scanName() - } else { - panic(fmt.Sprintf("%s has an invalid qualified name.", s.text)) - } - } - } else { - s.skipSpace() - if s.curr == ':' { - s.nextChar() - // it can be "foo ::" or just "foo :" - if s.curr == ':' { - s.nextChar() - s.typ = itemAxe - } else { - panic(fmt.Sprintf("%s has an invalid qualified name.", s.text)) - } - } - } - s.skipSpace() - s.canBeFunc = s.curr == '(' - } else { - panic(fmt.Sprintf("%s has an invalid token.", s.text)) - } - } - return true -} - -func (s *scanner) skipSpace() { -Loop: - for { - if !unicode.IsSpace(s.curr) || !s.nextChar() { - break Loop - } - } -} - -func (s *scanner) scanFraction() float64 { - var ( - i = s.pos - 2 - c = 1 // '.' - ) - for isDigit(s.curr) { - s.nextChar() - c++ - } - v, err := strconv.ParseFloat(s.text[i:i+c], 64) - if err != nil { - panic(fmt.Errorf("xpath: scanFraction parse float got error: %v", err)) - } - return v -} - -func (s *scanner) scanNumber() float64 { - var ( - c int - i = s.pos - 1 - ) - for isDigit(s.curr) { - s.nextChar() - c++ - } - if s.curr == '.' { - s.nextChar() - c++ - for isDigit(s.curr) { - s.nextChar() - c++ - } - } - v, err := strconv.ParseFloat(s.text[i:i+c], 64) - if err != nil { - panic(fmt.Errorf("xpath: scanNumber parse float got error: %v", err)) - } - return v -} - -func (s *scanner) scanString() string { - var ( - c = 0 - end = s.curr - ) - s.nextChar() - i := s.pos - 1 - for s.curr != end { - if !s.nextChar() { - panic(errors.New("xpath: scanString got unclosed string")) - } - c++ - } - s.nextChar() - return s.text[i : i+c] -} - -func (s *scanner) scanName() string { - var ( - c int - i = s.pos - 1 - ) - for isName(s.curr) { - c++ - if !s.nextChar() { - break - } - } - return s.text[i : i+c] -} - -func isName(r rune) bool { - return string(r) != ":" && string(r) != "/" && - (unicode.Is(first, r) || unicode.Is(second, r) || string(r) == "*") -} - -func isDigit(r rune) bool { - return unicode.IsDigit(r) -} - -func asItemType(r rune) itemType { - switch r { - case ',': - return itemComma - case '@': - return itemAt - case '(': - return itemLParens - case ')': - return itemRParens - case '|': - return itemUnion - case '*': - return itemStar - case '[': - return itemLBracket - case ']': - return itemRBracket - case '+': - return itemPlus - case '-': - return itemMinus - case '=': - return itemEq - case '$': - return itemDollar - } - panic(fmt.Errorf("unknown item: %v", r)) -} - -var first = &unicode.RangeTable{ - R16: []unicode.Range16{ - {0x003A, 0x003A, 1}, - {0x0041, 0x005A, 1}, - {0x005F, 0x005F, 1}, - {0x0061, 0x007A, 1}, - {0x00C0, 0x00D6, 1}, - {0x00D8, 0x00F6, 1}, - {0x00F8, 0x00FF, 1}, - {0x0100, 0x0131, 1}, - {0x0134, 0x013E, 1}, - {0x0141, 0x0148, 1}, - {0x014A, 0x017E, 1}, - {0x0180, 0x01C3, 1}, - {0x01CD, 0x01F0, 1}, - {0x01F4, 0x01F5, 1}, - {0x01FA, 0x0217, 1}, - {0x0250, 0x02A8, 1}, - {0x02BB, 0x02C1, 1}, - {0x0386, 0x0386, 1}, - {0x0388, 0x038A, 1}, - {0x038C, 0x038C, 1}, - {0x038E, 0x03A1, 1}, - {0x03A3, 0x03CE, 1}, - {0x03D0, 0x03D6, 1}, - {0x03DA, 0x03E0, 2}, - {0x03E2, 0x03F3, 1}, - {0x0401, 0x040C, 1}, - {0x040E, 0x044F, 1}, - {0x0451, 0x045C, 1}, - {0x045E, 0x0481, 1}, - {0x0490, 0x04C4, 1}, - {0x04C7, 0x04C8, 1}, - {0x04CB, 0x04CC, 1}, - {0x04D0, 0x04EB, 1}, - {0x04EE, 0x04F5, 1}, - {0x04F8, 0x04F9, 1}, - {0x0531, 0x0556, 1}, - {0x0559, 0x0559, 1}, - {0x0561, 0x0586, 1}, - {0x05D0, 0x05EA, 1}, - {0x05F0, 0x05F2, 1}, - {0x0621, 0x063A, 1}, - {0x0641, 0x064A, 1}, - {0x0671, 0x06B7, 1}, - {0x06BA, 0x06BE, 1}, - {0x06C0, 0x06CE, 1}, - {0x06D0, 0x06D3, 1}, - {0x06D5, 0x06D5, 1}, - {0x06E5, 0x06E6, 1}, - {0x0905, 0x0939, 1}, - {0x093D, 0x093D, 1}, - {0x0958, 0x0961, 1}, - {0x0985, 0x098C, 1}, - {0x098F, 0x0990, 1}, - {0x0993, 0x09A8, 1}, - {0x09AA, 0x09B0, 1}, - {0x09B2, 0x09B2, 1}, - {0x09B6, 0x09B9, 1}, - {0x09DC, 0x09DD, 1}, - {0x09DF, 0x09E1, 1}, - {0x09F0, 0x09F1, 1}, - {0x0A05, 0x0A0A, 1}, - {0x0A0F, 0x0A10, 1}, - {0x0A13, 0x0A28, 1}, - {0x0A2A, 0x0A30, 1}, - {0x0A32, 0x0A33, 1}, - {0x0A35, 0x0A36, 1}, - {0x0A38, 0x0A39, 1}, - {0x0A59, 0x0A5C, 1}, - {0x0A5E, 0x0A5E, 1}, - {0x0A72, 0x0A74, 1}, - {0x0A85, 0x0A8B, 1}, - {0x0A8D, 0x0A8D, 1}, - {0x0A8F, 0x0A91, 1}, - {0x0A93, 0x0AA8, 1}, - {0x0AAA, 0x0AB0, 1}, - {0x0AB2, 0x0AB3, 1}, - {0x0AB5, 0x0AB9, 1}, - {0x0ABD, 0x0AE0, 0x23}, - {0x0B05, 0x0B0C, 1}, - {0x0B0F, 0x0B10, 1}, - {0x0B13, 0x0B28, 1}, - {0x0B2A, 0x0B30, 1}, - {0x0B32, 0x0B33, 1}, - {0x0B36, 0x0B39, 1}, - {0x0B3D, 0x0B3D, 1}, - {0x0B5C, 0x0B5D, 1}, - {0x0B5F, 0x0B61, 1}, - {0x0B85, 0x0B8A, 1}, - {0x0B8E, 0x0B90, 1}, - {0x0B92, 0x0B95, 1}, - {0x0B99, 0x0B9A, 1}, - {0x0B9C, 0x0B9C, 1}, - {0x0B9E, 0x0B9F, 1}, - {0x0BA3, 0x0BA4, 1}, - {0x0BA8, 0x0BAA, 1}, - {0x0BAE, 0x0BB5, 1}, - {0x0BB7, 0x0BB9, 1}, - {0x0C05, 0x0C0C, 1}, - {0x0C0E, 0x0C10, 1}, - {0x0C12, 0x0C28, 1}, - {0x0C2A, 0x0C33, 1}, - {0x0C35, 0x0C39, 1}, - {0x0C60, 0x0C61, 1}, - {0x0C85, 0x0C8C, 1}, - {0x0C8E, 0x0C90, 1}, - {0x0C92, 0x0CA8, 1}, - {0x0CAA, 0x0CB3, 1}, - {0x0CB5, 0x0CB9, 1}, - {0x0CDE, 0x0CDE, 1}, - {0x0CE0, 0x0CE1, 1}, - {0x0D05, 0x0D0C, 1}, - {0x0D0E, 0x0D10, 1}, - {0x0D12, 0x0D28, 1}, - {0x0D2A, 0x0D39, 1}, - {0x0D60, 0x0D61, 1}, - {0x0E01, 0x0E2E, 1}, - {0x0E30, 0x0E30, 1}, - {0x0E32, 0x0E33, 1}, - {0x0E40, 0x0E45, 1}, - {0x0E81, 0x0E82, 1}, - {0x0E84, 0x0E84, 1}, - {0x0E87, 0x0E88, 1}, - {0x0E8A, 0x0E8D, 3}, - {0x0E94, 0x0E97, 1}, - {0x0E99, 0x0E9F, 1}, - {0x0EA1, 0x0EA3, 1}, - {0x0EA5, 0x0EA7, 2}, - {0x0EAA, 0x0EAB, 1}, - {0x0EAD, 0x0EAE, 1}, - {0x0EB0, 0x0EB0, 1}, - {0x0EB2, 0x0EB3, 1}, - {0x0EBD, 0x0EBD, 1}, - {0x0EC0, 0x0EC4, 1}, - {0x0F40, 0x0F47, 1}, - {0x0F49, 0x0F69, 1}, - {0x10A0, 0x10C5, 1}, - {0x10D0, 0x10F6, 1}, - {0x1100, 0x1100, 1}, - {0x1102, 0x1103, 1}, - {0x1105, 0x1107, 1}, - {0x1109, 0x1109, 1}, - {0x110B, 0x110C, 1}, - {0x110E, 0x1112, 1}, - {0x113C, 0x1140, 2}, - {0x114C, 0x1150, 2}, - {0x1154, 0x1155, 1}, - {0x1159, 0x1159, 1}, - {0x115F, 0x1161, 1}, - {0x1163, 0x1169, 2}, - {0x116D, 0x116E, 1}, - {0x1172, 0x1173, 1}, - {0x1175, 0x119E, 0x119E - 0x1175}, - {0x11A8, 0x11AB, 0x11AB - 0x11A8}, - {0x11AE, 0x11AF, 1}, - {0x11B7, 0x11B8, 1}, - {0x11BA, 0x11BA, 1}, - {0x11BC, 0x11C2, 1}, - {0x11EB, 0x11F0, 0x11F0 - 0x11EB}, - {0x11F9, 0x11F9, 1}, - {0x1E00, 0x1E9B, 1}, - {0x1EA0, 0x1EF9, 1}, - {0x1F00, 0x1F15, 1}, - {0x1F18, 0x1F1D, 1}, - {0x1F20, 0x1F45, 1}, - {0x1F48, 0x1F4D, 1}, - {0x1F50, 0x1F57, 1}, - {0x1F59, 0x1F5B, 0x1F5B - 0x1F59}, - {0x1F5D, 0x1F5D, 1}, - {0x1F5F, 0x1F7D, 1}, - {0x1F80, 0x1FB4, 1}, - {0x1FB6, 0x1FBC, 1}, - {0x1FBE, 0x1FBE, 1}, - {0x1FC2, 0x1FC4, 1}, - {0x1FC6, 0x1FCC, 1}, - {0x1FD0, 0x1FD3, 1}, - {0x1FD6, 0x1FDB, 1}, - {0x1FE0, 0x1FEC, 1}, - {0x1FF2, 0x1FF4, 1}, - {0x1FF6, 0x1FFC, 1}, - {0x2126, 0x2126, 1}, - {0x212A, 0x212B, 1}, - {0x212E, 0x212E, 1}, - {0x2180, 0x2182, 1}, - {0x3007, 0x3007, 1}, - {0x3021, 0x3029, 1}, - {0x3041, 0x3094, 1}, - {0x30A1, 0x30FA, 1}, - {0x3105, 0x312C, 1}, - {0x4E00, 0x9FA5, 1}, - {0xAC00, 0xD7A3, 1}, - }, -} - -var second = &unicode.RangeTable{ - R16: []unicode.Range16{ - {0x002D, 0x002E, 1}, - {0x0030, 0x0039, 1}, - {0x00B7, 0x00B7, 1}, - {0x02D0, 0x02D1, 1}, - {0x0300, 0x0345, 1}, - {0x0360, 0x0361, 1}, - {0x0387, 0x0387, 1}, - {0x0483, 0x0486, 1}, - {0x0591, 0x05A1, 1}, - {0x05A3, 0x05B9, 1}, - {0x05BB, 0x05BD, 1}, - {0x05BF, 0x05BF, 1}, - {0x05C1, 0x05C2, 1}, - {0x05C4, 0x0640, 0x0640 - 0x05C4}, - {0x064B, 0x0652, 1}, - {0x0660, 0x0669, 1}, - {0x0670, 0x0670, 1}, - {0x06D6, 0x06DC, 1}, - {0x06DD, 0x06DF, 1}, - {0x06E0, 0x06E4, 1}, - {0x06E7, 0x06E8, 1}, - {0x06EA, 0x06ED, 1}, - {0x06F0, 0x06F9, 1}, - {0x0901, 0x0903, 1}, - {0x093C, 0x093C, 1}, - {0x093E, 0x094C, 1}, - {0x094D, 0x094D, 1}, - {0x0951, 0x0954, 1}, - {0x0962, 0x0963, 1}, - {0x0966, 0x096F, 1}, - {0x0981, 0x0983, 1}, - {0x09BC, 0x09BC, 1}, - {0x09BE, 0x09BF, 1}, - {0x09C0, 0x09C4, 1}, - {0x09C7, 0x09C8, 1}, - {0x09CB, 0x09CD, 1}, - {0x09D7, 0x09D7, 1}, - {0x09E2, 0x09E3, 1}, - {0x09E6, 0x09EF, 1}, - {0x0A02, 0x0A3C, 0x3A}, - {0x0A3E, 0x0A3F, 1}, - {0x0A40, 0x0A42, 1}, - {0x0A47, 0x0A48, 1}, - {0x0A4B, 0x0A4D, 1}, - {0x0A66, 0x0A6F, 1}, - {0x0A70, 0x0A71, 1}, - {0x0A81, 0x0A83, 1}, - {0x0ABC, 0x0ABC, 1}, - {0x0ABE, 0x0AC5, 1}, - {0x0AC7, 0x0AC9, 1}, - {0x0ACB, 0x0ACD, 1}, - {0x0AE6, 0x0AEF, 1}, - {0x0B01, 0x0B03, 1}, - {0x0B3C, 0x0B3C, 1}, - {0x0B3E, 0x0B43, 1}, - {0x0B47, 0x0B48, 1}, - {0x0B4B, 0x0B4D, 1}, - {0x0B56, 0x0B57, 1}, - {0x0B66, 0x0B6F, 1}, - {0x0B82, 0x0B83, 1}, - {0x0BBE, 0x0BC2, 1}, - {0x0BC6, 0x0BC8, 1}, - {0x0BCA, 0x0BCD, 1}, - {0x0BD7, 0x0BD7, 1}, - {0x0BE7, 0x0BEF, 1}, - {0x0C01, 0x0C03, 1}, - {0x0C3E, 0x0C44, 1}, - {0x0C46, 0x0C48, 1}, - {0x0C4A, 0x0C4D, 1}, - {0x0C55, 0x0C56, 1}, - {0x0C66, 0x0C6F, 1}, - {0x0C82, 0x0C83, 1}, - {0x0CBE, 0x0CC4, 1}, - {0x0CC6, 0x0CC8, 1}, - {0x0CCA, 0x0CCD, 1}, - {0x0CD5, 0x0CD6, 1}, - {0x0CE6, 0x0CEF, 1}, - {0x0D02, 0x0D03, 1}, - {0x0D3E, 0x0D43, 1}, - {0x0D46, 0x0D48, 1}, - {0x0D4A, 0x0D4D, 1}, - {0x0D57, 0x0D57, 1}, - {0x0D66, 0x0D6F, 1}, - {0x0E31, 0x0E31, 1}, - {0x0E34, 0x0E3A, 1}, - {0x0E46, 0x0E46, 1}, - {0x0E47, 0x0E4E, 1}, - {0x0E50, 0x0E59, 1}, - {0x0EB1, 0x0EB1, 1}, - {0x0EB4, 0x0EB9, 1}, - {0x0EBB, 0x0EBC, 1}, - {0x0EC6, 0x0EC6, 1}, - {0x0EC8, 0x0ECD, 1}, - {0x0ED0, 0x0ED9, 1}, - {0x0F18, 0x0F19, 1}, - {0x0F20, 0x0F29, 1}, - {0x0F35, 0x0F39, 2}, - {0x0F3E, 0x0F3F, 1}, - {0x0F71, 0x0F84, 1}, - {0x0F86, 0x0F8B, 1}, - {0x0F90, 0x0F95, 1}, - {0x0F97, 0x0F97, 1}, - {0x0F99, 0x0FAD, 1}, - {0x0FB1, 0x0FB7, 1}, - {0x0FB9, 0x0FB9, 1}, - {0x20D0, 0x20DC, 1}, - {0x20E1, 0x3005, 0x3005 - 0x20E1}, - {0x302A, 0x302F, 1}, - {0x3031, 0x3035, 1}, - {0x3099, 0x309A, 1}, - {0x309D, 0x309E, 1}, - {0x30FC, 0x30FE, 1}, - }, -} diff --git a/vendor/github.com/antchfx/xpath/query.go b/vendor/github.com/antchfx/xpath/query.go deleted file mode 100644 index b076973c8..000000000 --- a/vendor/github.com/antchfx/xpath/query.go +++ /dev/null @@ -1,728 +0,0 @@ -package xpath - -import ( - "reflect" -) - -type iterator interface { - Current() NodeNavigator -} - -// An XPath query interface. -type query interface { - // Select traversing iterator returns a query matched node NodeNavigator. - Select(iterator) NodeNavigator - - // Evaluate evaluates query and returns values of the current query. - Evaluate(iterator) interface{} - - Clone() query -} - -// contextQuery is returns current node on the iterator object query. -type contextQuery struct { - count int - Root bool // Moving to root-level node in the current context iterator. -} - -func (c *contextQuery) Select(t iterator) (n NodeNavigator) { - if c.count == 0 { - c.count++ - n = t.Current().Copy() - if c.Root { - n.MoveToRoot() - } - } - return n -} - -func (c *contextQuery) Evaluate(iterator) interface{} { - c.count = 0 - return c -} - -func (c *contextQuery) Clone() query { - return &contextQuery{count: 0, Root: c.Root} -} - -// ancestorQuery is an XPath ancestor node query.(ancestor::*|ancestor-self::*) -type ancestorQuery struct { - iterator func() NodeNavigator - - Self bool - Input query - Predicate func(NodeNavigator) bool -} - -func (a *ancestorQuery) Select(t iterator) NodeNavigator { - for { - if a.iterator == nil { - node := a.Input.Select(t) - if node == nil { - return nil - } - first := true - a.iterator = func() NodeNavigator { - if first && a.Self { - first = false - if a.Predicate(node) { - return node - } - } - for node.MoveToParent() { - if !a.Predicate(node) { - break - } - return node - } - return nil - } - } - - if node := a.iterator(); node != nil { - return node - } - a.iterator = nil - } -} - -func (a *ancestorQuery) Evaluate(t iterator) interface{} { - a.Input.Evaluate(t) - a.iterator = nil - return a -} - -func (a *ancestorQuery) Test(n NodeNavigator) bool { - return a.Predicate(n) -} - -func (a *ancestorQuery) Clone() query { - return &ancestorQuery{Self: a.Self, Input: a.Input.Clone(), Predicate: a.Predicate} -} - -// attributeQuery is an XPath attribute node query.(@*) -type attributeQuery struct { - iterator func() NodeNavigator - - Input query - Predicate func(NodeNavigator) bool -} - -func (a *attributeQuery) Select(t iterator) NodeNavigator { - for { - if a.iterator == nil { - node := a.Input.Select(t) - if node == nil { - return nil - } - node = node.Copy() - a.iterator = func() NodeNavigator { - for { - onAttr := node.MoveToNextAttribute() - if !onAttr { - return nil - } - if a.Predicate(node) { - return node - } - } - } - } - - if node := a.iterator(); node != nil { - return node - } - a.iterator = nil - } -} - -func (a *attributeQuery) Evaluate(t iterator) interface{} { - a.Input.Evaluate(t) - a.iterator = nil - return a -} - -func (a *attributeQuery) Test(n NodeNavigator) bool { - return a.Predicate(n) -} - -func (a *attributeQuery) Clone() query { - return &attributeQuery{Input: a.Input.Clone(), Predicate: a.Predicate} -} - -// childQuery is an XPath child node query.(child::*) -type childQuery struct { - posit int - iterator func() NodeNavigator - - Input query - Predicate func(NodeNavigator) bool -} - -func (c *childQuery) Select(t iterator) NodeNavigator { - for { - if c.iterator == nil { - c.posit = 0 - node := c.Input.Select(t) - if node == nil { - return nil - } - node = node.Copy() - first := true - c.iterator = func() NodeNavigator { - for { - if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) { - return nil - } - first = false - if c.Predicate(node) { - return node - } - } - } - } - - if node := c.iterator(); node != nil { - c.posit++ - return node - } - c.iterator = nil - } -} - -func (c *childQuery) Evaluate(t iterator) interface{} { - c.Input.Evaluate(t) - c.iterator = nil - return c -} - -func (c *childQuery) Test(n NodeNavigator) bool { - return c.Predicate(n) -} - -func (c *childQuery) Clone() query { - return &childQuery{Input: c.Input.Clone(), Predicate: c.Predicate} -} - -// position returns a position of current NodeNavigator. -func (c *childQuery) position() int { - return c.posit -} - -// descendantQuery is an XPath descendant node query.(descendant::* | descendant-or-self::*) -type descendantQuery struct { - iterator func() NodeNavigator - posit int - - Self bool - Input query - Predicate func(NodeNavigator) bool -} - -func (d *descendantQuery) Select(t iterator) NodeNavigator { - for { - if d.iterator == nil { - d.posit = 0 - node := d.Input.Select(t) - if node == nil { - return nil - } - node = node.Copy() - level := 0 - first := true - d.iterator = func() NodeNavigator { - if first && d.Self { - first = false - if d.Predicate(node) { - return node - } - } - - for { - if node.MoveToChild() { - level++ - } else { - for { - if level == 0 { - return nil - } - if node.MoveToNext() { - break - } - node.MoveToParent() - level-- - } - } - if d.Predicate(node) { - return node - } - } - } - } - - if node := d.iterator(); node != nil { - d.posit++ - return node - } - d.iterator = nil - } -} - -func (d *descendantQuery) Evaluate(t iterator) interface{} { - d.Input.Evaluate(t) - d.iterator = nil - return d -} - -func (d *descendantQuery) Test(n NodeNavigator) bool { - return d.Predicate(n) -} - -// position returns a position of current NodeNavigator. -func (d *descendantQuery) position() int { - return d.posit -} - -func (d *descendantQuery) Clone() query { - return &descendantQuery{Self: d.Self, Input: d.Input.Clone(), Predicate: d.Predicate} -} - -// followingQuery is an XPath following node query.(following::*|following-sibling::*) -type followingQuery struct { - iterator func() NodeNavigator - - Input query - Sibling bool // The matching sibling node of current node. - Predicate func(NodeNavigator) bool -} - -func (f *followingQuery) Select(t iterator) NodeNavigator { - for { - if f.iterator == nil { - node := f.Input.Select(t) - if node == nil { - return nil - } - node = node.Copy() - if f.Sibling { - f.iterator = func() NodeNavigator { - for { - if !node.MoveToNext() { - return nil - } - if f.Predicate(node) { - return node - } - } - } - } else { - var q query // descendant query - f.iterator = func() NodeNavigator { - for { - if q == nil { - for !node.MoveToNext() { - if !node.MoveToParent() { - return nil - } - } - q = &descendantQuery{ - Self: true, - Input: &contextQuery{}, - Predicate: f.Predicate, - } - t.Current().MoveTo(node) - } - if node := q.Select(t); node != nil { - return node - } - q = nil - } - } - } - } - - if node := f.iterator(); node != nil { - return node - } - f.iterator = nil - } -} - -func (f *followingQuery) Evaluate(t iterator) interface{} { - f.Input.Evaluate(t) - return f -} - -func (f *followingQuery) Test(n NodeNavigator) bool { - return f.Predicate(n) -} - -func (f *followingQuery) Clone() query { - return &followingQuery{Input: f.Input.Clone(), Sibling: f.Sibling, Predicate: f.Predicate} -} - -// precedingQuery is an XPath preceding node query.(preceding::*) -type precedingQuery struct { - iterator func() NodeNavigator - Input query - Sibling bool // The matching sibling node of current node. - Predicate func(NodeNavigator) bool -} - -func (p *precedingQuery) Select(t iterator) NodeNavigator { - for { - if p.iterator == nil { - node := p.Input.Select(t) - if node == nil { - return nil - } - node = node.Copy() - if p.Sibling { - p.iterator = func() NodeNavigator { - for { - for !node.MoveToPrevious() { - return nil - } - if p.Predicate(node) { - return node - } - } - } - } else { - var q query - p.iterator = func() NodeNavigator { - for { - if q == nil { - for !node.MoveToPrevious() { - if !node.MoveToParent() { - return nil - } - } - q = &descendantQuery{ - Self: true, - Input: &contextQuery{}, - Predicate: p.Predicate, - } - t.Current().MoveTo(node) - } - if node := q.Select(t); node != nil { - return node - } - q = nil - } - } - } - } - if node := p.iterator(); node != nil { - return node - } - p.iterator = nil - } -} - -func (p *precedingQuery) Evaluate(t iterator) interface{} { - p.Input.Evaluate(t) - return p -} - -func (p *precedingQuery) Test(n NodeNavigator) bool { - return p.Predicate(n) -} - -func (p *precedingQuery) Clone() query { - return &precedingQuery{Input: p.Input.Clone(), Sibling: p.Sibling, Predicate: p.Predicate} -} - -// parentQuery is an XPath parent node query.(parent::*) -type parentQuery struct { - Input query - Predicate func(NodeNavigator) bool -} - -func (p *parentQuery) Select(t iterator) NodeNavigator { - for { - node := p.Input.Select(t) - if node == nil { - return nil - } - node = node.Copy() - if node.MoveToParent() && p.Predicate(node) { - return node - } - } -} - -func (p *parentQuery) Evaluate(t iterator) interface{} { - p.Input.Evaluate(t) - return p -} - -func (p *parentQuery) Clone() query { - return &parentQuery{Input: p.Input.Clone(), Predicate: p.Predicate} -} - -func (p *parentQuery) Test(n NodeNavigator) bool { - return p.Predicate(n) -} - -// selfQuery is an Self node query.(self::*) -type selfQuery struct { - Input query - Predicate func(NodeNavigator) bool -} - -func (s *selfQuery) Select(t iterator) NodeNavigator { - for { - node := s.Input.Select(t) - if node == nil { - return nil - } - - if s.Predicate(node) { - return node - } - } -} - -func (s *selfQuery) Evaluate(t iterator) interface{} { - s.Input.Evaluate(t) - return s -} - -func (s *selfQuery) Test(n NodeNavigator) bool { - return s.Predicate(n) -} - -func (s *selfQuery) Clone() query { - return &selfQuery{Input: s.Input.Clone(), Predicate: s.Predicate} -} - -// filterQuery is an XPath query for predicate filter. -type filterQuery struct { - Input query - Predicate query -} - -func (f *filterQuery) do(t iterator) bool { - val := reflect.ValueOf(f.Predicate.Evaluate(t)) - switch val.Kind() { - case reflect.Bool: - return val.Bool() - case reflect.String: - return len(val.String()) > 0 - case reflect.Float64: - pt := float64(getNodePosition(f.Input)) - return int(val.Float()) == int(pt) - default: - if q, ok := f.Predicate.(query); ok { - return q.Select(t) != nil - } - } - return false -} - -func (f *filterQuery) Select(t iterator) NodeNavigator { - for { - node := f.Input.Select(t) - if node == nil { - return node - } - node = node.Copy() - //fmt.Println(node.LocalName()) - - t.Current().MoveTo(node) - if f.do(t) { - return node - } - } -} - -func (f *filterQuery) Evaluate(t iterator) interface{} { - f.Input.Evaluate(t) - return f -} - -func (f *filterQuery) Clone() query { - return &filterQuery{Input: f.Input.Clone(), Predicate: f.Predicate.Clone()} -} - -// functionQuery is an XPath function that call a function to returns -// value of current NodeNavigator node. -type functionQuery struct { - Input query // Node Set - Func func(query, iterator) interface{} // The xpath function. -} - -func (f *functionQuery) Select(t iterator) NodeNavigator { - return nil -} - -// Evaluate call a specified function that will returns the -// following value type: number,string,boolean. -func (f *functionQuery) Evaluate(t iterator) interface{} { - return f.Func(f.Input, t) -} - -func (f *functionQuery) Clone() query { - return &functionQuery{Input: f.Input.Clone(), Func: f.Func} -} - -// constantQuery is an XPath constant operand. -type constantQuery struct { - Val interface{} -} - -func (c *constantQuery) Select(t iterator) NodeNavigator { - return nil -} - -func (c *constantQuery) Evaluate(t iterator) interface{} { - return c.Val -} - -func (c *constantQuery) Clone() query { - return c -} - -// logicalQuery is an XPath logical expression. -type logicalQuery struct { - Left, Right query - - Do func(iterator, interface{}, interface{}) interface{} -} - -func (l *logicalQuery) Select(t iterator) NodeNavigator { - // When a XPath expr is logical expression. - node := t.Current().Copy() - val := l.Evaluate(t) - switch val.(type) { - case bool: - if val.(bool) == true { - return node - } - } - return nil -} - -func (l *logicalQuery) Evaluate(t iterator) interface{} { - m := l.Left.Evaluate(t) - n := l.Right.Evaluate(t) - return l.Do(t, m, n) -} - -func (l *logicalQuery) Clone() query { - return &logicalQuery{Left: l.Left.Clone(), Right: l.Right.Clone(), Do: l.Do} -} - -// numericQuery is an XPath numeric operator expression. -type numericQuery struct { - Left, Right query - - Do func(interface{}, interface{}) interface{} -} - -func (n *numericQuery) Select(t iterator) NodeNavigator { - return nil -} - -func (n *numericQuery) Evaluate(t iterator) interface{} { - m := n.Left.Evaluate(t) - k := n.Right.Evaluate(t) - return n.Do(m, k) -} - -func (n *numericQuery) Clone() query { - return &numericQuery{Left: n.Left.Clone(), Right: n.Right.Clone(), Do: n.Do} -} - -type booleanQuery struct { - IsOr bool - Left, Right query - iterator func() NodeNavigator -} - -func (b *booleanQuery) Select(t iterator) NodeNavigator { - if b.iterator == nil { - var list []NodeNavigator - i := 0 - root := t.Current().Copy() - if b.IsOr { - for { - node := b.Left.Select(t) - if node == nil { - break - } - node = node.Copy() - list = append(list, node) - } - t.Current().MoveTo(root) - for { - node := b.Right.Select(t) - if node == nil { - break - } - node = node.Copy() - list = append(list, node) - } - } else { - var m []NodeNavigator - var n []NodeNavigator - for { - node := b.Left.Select(t) - if node == nil { - break - } - node = node.Copy() - list = append(m, node) - } - t.Current().MoveTo(root) - for { - node := b.Right.Select(t) - if node == nil { - break - } - node = node.Copy() - list = append(n, node) - } - for _, k := range m { - for _, j := range n { - if k == j { - list = append(list, k) - } - } - } - } - - b.iterator = func() NodeNavigator { - if i >= len(list) { - return nil - } - node := list[i] - i++ - return node - } - } - return b.iterator() -} - -func (b *booleanQuery) Evaluate(t iterator) interface{} { - m := b.Left.Evaluate(t) - if m.(bool) == b.IsOr { - return m - } - return b.Right.Evaluate(t) -} - -func (b *booleanQuery) Clone() query { - return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()} -} - -func getNodePosition(q query) int { - type Position interface { - position() int - } - if count, ok := q.(Position); ok { - return count.position() - } - return 1 -} diff --git a/vendor/github.com/antchfx/xpath/xpath.go b/vendor/github.com/antchfx/xpath/xpath.go deleted file mode 100644 index 4460c3777..000000000 --- a/vendor/github.com/antchfx/xpath/xpath.go +++ /dev/null @@ -1,154 +0,0 @@ -package xpath - -import ( - "errors" -) - -// NodeType represents a type of XPath node. -type NodeType int - -const ( - // RootNode is a root node of the XML document or node tree. - RootNode NodeType = iota - - // ElementNode is an element, such as . - ElementNode - - // AttributeNode is an attribute, such as id='123'. - AttributeNode - - // TextNode is the text content of a node. - TextNode - - // CommentNode is a comment node, such as - CommentNode -) - -// NodeNavigator provides cursor model for navigating XML data. -type NodeNavigator interface { - // NodeType returns the XPathNodeType of the current node. - NodeType() NodeType - - // LocalName gets the Name of the current node. - LocalName() string - - // Prefix returns namespace prefix associated with the current node. - Prefix() string - - // Value gets the value of current node. - Value() string - - // Copy does a deep copy of the NodeNavigator and all its components. - Copy() NodeNavigator - - // MoveToRoot moves the NodeNavigator to the root node of the current node. - MoveToRoot() - - // MoveToParent moves the NodeNavigator to the parent node of the current node. - MoveToParent() bool - - // MoveToNextAttribute moves the NodeNavigator to the next attribute on current node. - MoveToNextAttribute() bool - - // MoveToChild moves the NodeNavigator to the first child node of the current node. - MoveToChild() bool - - // MoveToFirst moves the NodeNavigator to the first sibling node of the current node. - MoveToFirst() bool - - // MoveToNext moves the NodeNavigator to the next sibling node of the current node. - MoveToNext() bool - - // MoveToPrevious moves the NodeNavigator to the previous sibling node of the current node. - MoveToPrevious() bool - - // MoveTo moves the NodeNavigator to the same position as the specified NodeNavigator. - MoveTo(NodeNavigator) bool -} - -// NodeIterator holds all matched Node object. -type NodeIterator struct { - node NodeNavigator - query query -} - -// Current returns current node which matched. -func (t *NodeIterator) Current() NodeNavigator { - return t.node -} - -// MoveNext moves Navigator to the next match node. -func (t *NodeIterator) MoveNext() bool { - n := t.query.Select(t) - if n != nil { - if !t.node.MoveTo(n) { - t.node = n.Copy() - } - return true - } - return false -} - -// Select selects a node set using the specified XPath expression. -// This method is deprecated, recommend using Expr.Select() method instead. -func Select(root NodeNavigator, expr string) *NodeIterator { - exp, err := Compile(expr) - if err != nil { - panic(err) - } - return exp.Select(root) -} - -// Expr is an XPath expression for query. -type Expr struct { - s string - q query -} - -type iteratorFunc func() NodeNavigator - -func (f iteratorFunc) Current() NodeNavigator { - return f() -} - -// Evaluate returns the result of the expression. -// The result type of the expression is one of the follow: bool,float64,string,NodeIterator). -func (expr *Expr) Evaluate(root NodeNavigator) interface{} { - val := expr.q.Evaluate(iteratorFunc(func() NodeNavigator { return root })) - switch val.(type) { - case query: - return &NodeIterator{query: expr.q.Clone(), node: root} - } - return val -} - -// Select selects a node set using the specified XPath expression. -func (expr *Expr) Select(root NodeNavigator) *NodeIterator { - return &NodeIterator{query: expr.q.Clone(), node: root} -} - -// String returns XPath expression string. -func (expr *Expr) String() string { - return expr.s -} - -// Compile compiles an XPath expression string. -func Compile(expr string) (*Expr, error) { - if expr == "" { - return nil, errors.New("expr expression is nil") - } - qy, err := build(expr) - if err != nil { - return nil, err - } - return &Expr{s: expr, q: qy}, nil -} - -// MustCompile compiles an XPath expression string and ignored error. -func MustCompile(expr string) *Expr { - exp, err := Compile(expr) - if err != nil { - return nil - } - return exp -} diff --git a/vendor/github.com/antchfx/xquery/LICENSE b/vendor/github.com/antchfx/xquery/LICENSE deleted file mode 100644 index e14c37141..000000000 --- a/vendor/github.com/antchfx/xquery/LICENSE +++ /dev/null @@ -1,17 +0,0 @@ -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/antchfx/xquery/xml/node.go b/vendor/github.com/antchfx/xquery/xml/node.go deleted file mode 100644 index 064d1cdd8..000000000 --- a/vendor/github.com/antchfx/xquery/xml/node.go +++ /dev/null @@ -1,252 +0,0 @@ -package xmlquery - -import ( - "bytes" - "encoding/xml" - "fmt" - "io" - "net/http" - "strings" - - "golang.org/x/net/html/charset" -) - -// A NodeType is the type of a Node. -type NodeType uint - -const ( - // DocumentNode is a document object that, as the root of the document tree, - // provides access to the entire XML document. - DocumentNode NodeType = iota - // DeclarationNode is the document type declaration, indicated by the following - // tag (for example, ). - DeclarationNode - // ElementNode is an element (for example, ). - ElementNode - // TextNode is the text content of a node. - TextNode - // CommentNode a comment (for example, ). - CommentNode -) - -// A Node consists of a NodeType and some Data (tag name for -// element nodes, content for text) and are part of a tree of Nodes. -type Node struct { - Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node - - Type NodeType - Data string - Prefix string - NamespaceURI string - Attr []xml.Attr - - level int // node level in the tree -} - -// InnerText returns the text between the start and end tags of the object. -func (n *Node) InnerText() string { - var output func(*bytes.Buffer, *Node) - output = func(buf *bytes.Buffer, n *Node) { - switch n.Type { - case TextNode: - buf.WriteString(n.Data) - return - case CommentNode: - return - } - for child := n.FirstChild; child != nil; child = child.NextSibling { - output(buf, child) - } - } - - var buf bytes.Buffer - output(&buf, n) - return buf.String() -} - -func outputXML(buf *bytes.Buffer, n *Node) { - if n.Type == TextNode || n.Type == CommentNode { - buf.WriteString(strings.TrimSpace(n.Data)) - return - } - buf.WriteString("<" + n.Data) - for _, attr := range n.Attr { - if attr.Name.Space != "" { - buf.WriteString(fmt.Sprintf(` %s:%s="%s"`, attr.Name.Space, attr.Name.Local, attr.Value)) - } else { - buf.WriteString(fmt.Sprintf(` %s="%s"`, attr.Name.Local, attr.Value)) - } - } - buf.WriteString(">") - for child := n.FirstChild; child != nil; child = child.NextSibling { - outputXML(buf, child) - } - buf.WriteString(fmt.Sprintf("", n.Data)) -} - -// OutputXML returns the text that including tags name. -func (n *Node) OutputXML(self bool) string { - var buf bytes.Buffer - if self { - outputXML(&buf, n) - } else { - for n := n.FirstChild; n != nil; n = n.NextSibling { - outputXML(&buf, n) - } - } - - return buf.String() -} - -func addAttr(n *Node, key, val string) { - var attr xml.Attr - if i := strings.Index(key, ":"); i > 0 { - attr = xml.Attr{ - Name: xml.Name{Space: key[:i], Local: key[i+1:]}, - Value: val, - } - } else { - attr = xml.Attr{ - Name: xml.Name{Local: key}, - Value: val, - } - } - - n.Attr = append(n.Attr, attr) -} - -func addChild(parent, n *Node) { - n.Parent = parent - if parent.FirstChild == nil { - parent.FirstChild = n - } else { - parent.LastChild.NextSibling = n - n.PrevSibling = parent.LastChild - } - - parent.LastChild = n -} - -func addSibling(sibling, n *Node) { - n.Parent = sibling.Parent - sibling.NextSibling = n - n.PrevSibling = sibling - if sibling.Parent != nil { - sibling.Parent.LastChild = n - } -} - -// LoadURL loads the XML document from the specified URL. -func LoadURL(url string) (*Node, error) { - resp, err := http.Get(url) - if err != nil { - return nil, err - } - defer resp.Body.Close() - return parse(resp.Body) -} - -func parse(r io.Reader) (*Node, error) { - var ( - decoder = xml.NewDecoder(r) - doc = &Node{Type: DocumentNode} - space2prefix = make(map[string]string) - level = 0 - ) - decoder.CharsetReader = charset.NewReaderLabel - prev := doc - for { - tok, err := decoder.Token() - switch { - case err == io.EOF: - goto quit - case err != nil: - return nil, err - } - - switch tok := tok.(type) { - case xml.StartElement: - if level == 0 { - // mising XML declaration - node := &Node{Type: DeclarationNode, Data: "xml", level: 1} - addChild(prev, node) - level = 1 - prev = node - } - node := &Node{ - Type: ElementNode, - Data: tok.Name.Local, - Prefix: space2prefix[tok.Name.Space], - NamespaceURI: tok.Name.Space, - Attr: tok.Attr, - level: level, - } - for _, att := range tok.Attr { - if att.Name.Space == "xmlns" { - space2prefix[att.Value] = att.Name.Local - } - } - //fmt.Println(fmt.Sprintf("start > %s : %d", node.Data, level)) - if level == prev.level { - addSibling(prev, node) - } else if level > prev.level { - addChild(prev, node) - } else if level < prev.level { - for i := prev.level - level; i > 1; i-- { - prev = prev.Parent - } - addSibling(prev.Parent, node) - } - prev = node - level++ - case xml.EndElement: - level-- - case xml.CharData: - node := &Node{Type: TextNode, Data: string(tok), level: level} - if level == prev.level { - addSibling(prev, node) - } else if level > prev.level { - addChild(prev, node) - } - case xml.Comment: - node := &Node{Type: CommentNode, Data: string(tok), level: level} - if level == prev.level { - addSibling(prev, node) - } else if level > prev.level { - addChild(prev, node) - } - case xml.ProcInst: // Processing Instruction - if prev.Type != DeclarationNode { - level++ - } - node := &Node{Type: DeclarationNode, Data: tok.Target, level: level} - pairs := strings.Split(string(tok.Inst), " ") - for _, pair := range pairs { - pair = strings.TrimSpace(pair) - if i := strings.Index(pair, "="); i > 0 { - addAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`)) - } - } - if level == prev.level { - addSibling(prev, node) - } else if level > prev.level { - addChild(prev, node) - } - prev = node - case xml.Directive: - } - - } -quit: - return doc, nil -} - -// Parse returns the parse tree for the XML from the given Reader. -func Parse(r io.Reader) (*Node, error) { - return parse(r) -} - -// ParseXML returns the parse tree for the XML from the given Reader.Deprecated. -func ParseXML(r io.Reader) (*Node, error) { - return parse(r) -} diff --git a/vendor/github.com/antchfx/xquery/xml/query.go b/vendor/github.com/antchfx/xquery/xml/query.go deleted file mode 100644 index 7981c637e..000000000 --- a/vendor/github.com/antchfx/xquery/xml/query.go +++ /dev/null @@ -1,230 +0,0 @@ -/* -Package xmlquery provides extract data from XML documents using XPath expression. -*/ -package xmlquery - -import ( - "fmt" - "strings" - - "github.com/antchfx/xpath" -) - -// SelectElements finds child elements with the specified name. -func (n *Node) SelectElements(name string) []*Node { - return Find(n, name) -} - -// SelectElement finds child elements with the specified name. -func (n *Node) SelectElement(name string) *Node { - return FindOne(n, name) -} - -// SelectAttr returns the attribute value with the specified name. -func (n *Node) SelectAttr(name string) string { - var local, space string - local = name - if i := strings.Index(name, ":"); i > 0 { - space = name[:i] - local = name[i+1:] - } - for _, attr := range n.Attr { - if attr.Name.Local == local && attr.Name.Space == space { - return attr.Value - } - } - return "" -} - -var _ xpath.NodeNavigator = &NodeNavigator{} - -// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified html.Node. -func CreateXPathNavigator(top *Node) *NodeNavigator { - return &NodeNavigator{curr: top, root: top, attr: -1} -} - -// Find searches the Node that matches by the specified XPath expr. -func Find(top *Node, expr string) []*Node { - exp, err := xpath.Compile(expr) - if err != nil { - panic(err) - } - t := exp.Select(CreateXPathNavigator(top)) - var elems []*Node - for t.MoveNext() { - elems = append(elems, (t.Current().(*NodeNavigator)).curr) - } - return elems -} - -// FindOne searches the Node that matches by the specified XPath expr, -// and returns first element of matched. -func FindOne(top *Node, expr string) *Node { - exp, err := xpath.Compile(expr) - if err != nil { - panic(err) - } - t := exp.Select(CreateXPathNavigator(top)) - var elem *Node - if t.MoveNext() { - elem = (t.Current().(*NodeNavigator)).curr - } - return elem -} - -// FindEach searches the html.Node and calls functions cb. -func FindEach(top *Node, expr string, cb func(int, *Node)) { - exp, err := xpath.Compile(expr) - if err != nil { - panic(err) - } - t := exp.Select(CreateXPathNavigator(top)) - var i int - for t.MoveNext() { - cb(i, (t.Current().(*NodeNavigator)).curr) - i++ - } -} - -type NodeNavigator struct { - root, curr *Node - attr int -} - -func (x *NodeNavigator) Current() *Node { - return x.curr -} - -func (x *NodeNavigator) NodeType() xpath.NodeType { - switch x.curr.Type { - case CommentNode: - return xpath.CommentNode - case TextNode: - return xpath.TextNode - case DeclarationNode, DocumentNode: - return xpath.RootNode - case ElementNode: - if x.attr != -1 { - return xpath.AttributeNode - } - return xpath.ElementNode - } - panic(fmt.Sprintf("unknown XML node type: %v", x.curr.Type)) -} - -func (x *NodeNavigator) LocalName() string { - if x.attr != -1 { - return x.curr.Attr[x.attr].Name.Local - } - return x.curr.Data - -} - -func (x *NodeNavigator) Prefix() string { - return x.curr.Prefix -} - -func (x *NodeNavigator) Value() string { - switch x.curr.Type { - case CommentNode: - return x.curr.Data - case ElementNode: - if x.attr != -1 { - return x.curr.Attr[x.attr].Value - } - return x.curr.InnerText() - case TextNode: - return x.curr.Data - } - return "" -} - -func (x *NodeNavigator) Copy() xpath.NodeNavigator { - n := *x - return &n -} - -func (x *NodeNavigator) MoveToRoot() { - x.curr = x.root -} - -func (x *NodeNavigator) MoveToParent() bool { - if x.attr != -1 { - x.attr = -1 - return true - } else if node := x.curr.Parent; node != nil { - x.curr = node - return true - } - return false -} - -func (x *NodeNavigator) MoveToNextAttribute() bool { - if x.attr >= len(x.curr.Attr)-1 { - return false - } - x.attr++ - return true -} - -func (x *NodeNavigator) MoveToChild() bool { - if x.attr != -1 { - return false - } - if node := x.curr.FirstChild; node != nil { - x.curr = node - return true - } - return false -} - -func (x *NodeNavigator) MoveToFirst() bool { - if x.attr != -1 || x.curr.PrevSibling == nil { - return false - } - for { - node := x.curr.PrevSibling - if node == nil { - break - } - x.curr = node - } - return true -} - -func (x *NodeNavigator) String() string { - return x.Value() -} - -func (x *NodeNavigator) MoveToNext() bool { - if x.attr != -1 { - return false - } - if node := x.curr.NextSibling; node != nil { - x.curr = node - return true - } - return false -} - -func (x *NodeNavigator) MoveToPrevious() bool { - if x.attr != -1 { - return false - } - if node := x.curr.PrevSibling; node != nil { - x.curr = node - return true - } - return false -} - -func (x *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool { - node, ok := other.(*NodeNavigator) - if !ok || node.root != x.root { - return false - } - - x.curr = node.curr - x.attr = node.attr - return true -} diff --git a/vendor/github.com/dylanmei/winrmtest/.gitignore b/vendor/github.com/dylanmei/winrmtest/.gitignore deleted file mode 100644 index 2d4daa405..000000000 --- a/vendor/github.com/dylanmei/winrmtest/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea* diff --git a/vendor/github.com/dylanmei/winrmtest/README.md b/vendor/github.com/dylanmei/winrmtest/README.md deleted file mode 100644 index 19c19609d..000000000 --- a/vendor/github.com/dylanmei/winrmtest/README.md +++ /dev/null @@ -1,48 +0,0 @@ - -# winrmtest - -An in-progress testing package to compliment the [masterzen/winrm](https://github.com/masterzen/winrm) Go-based winrm library. - -My primary use-case for this is for [dylanmei/packer-communicator-winrm](https://github.com/dylanmei/packer-communicator-winrm), a [Packer](http://packer.io) communicator plugin for interacting with machines using Windows Remote Management. - -## Example Use - -A fictitious "Windows tools" package. - -``` - -package wintools - -import ( - "io" - "testing" - "github.com/dylanmei/winrmtest" -) - -func Test_empty_temp_directory(t *testing.T) { - r := winrmtest.NewRemote() - defer r.Close() - - r.CommandFunc(wimrmtest.MatchText("dir C:\Temp"), func(out, err io.Writer) int { - out.Write([]byte(` Volume in drive C is Windows 2012 R2 - Volume Serial Number is XXXX-XXXX - - Directory of C:\ - -File Not Found`)) - return 0 - }) - - lister := NewDirectoryLister(r.Host, r.Port) - list, _ := lister.TempDirectory() - - if count := len(list.Dirs()); count != 0 { - t.Errorf("Expected 0 directories but found %d.\n", count) - } - - if count := len(list.Files()); count != 0 { - t.Errorf("Expected 0 files but found %d.\n", count) - } -} -``` - diff --git a/vendor/github.com/dylanmei/winrmtest/remote.go b/vendor/github.com/dylanmei/winrmtest/remote.go deleted file mode 100644 index ecc083f79..000000000 --- a/vendor/github.com/dylanmei/winrmtest/remote.go +++ /dev/null @@ -1,79 +0,0 @@ -package winrmtest - -import ( - "io" - "net/http" - "net/http/httptest" - "net/url" - "regexp" - "strconv" - "strings" -) - -// Remote respresents a WinRM server -type Remote struct { - Host string - Port int - server *httptest.Server - service *wsman -} - -// NewRemote returns a new initialized Remote -func NewRemote() *Remote { - mux := http.NewServeMux() - srv := httptest.NewServer(mux) - - host, port, _ := splitAddr(srv.URL) - remote := Remote{ - Host: host, - Port: port, - server: srv, - service: &wsman{}, - } - - mux.Handle("/wsman", remote.service) - return &remote -} - -// Close closes the WinRM server -func (r *Remote) Close() { - r.server.Close() -} - -// MatcherFunc respresents a function used to match WinRM commands -type MatcherFunc func(candidate string) bool - -// MatchText return a new MatcherFunc based on text matching -func MatchText(text string) MatcherFunc { - return func(candidate string) bool { - return text == candidate - } -} - -// MatchPattern return a new MatcherFunc based on pattern matching -func MatchPattern(pattern string) MatcherFunc { - r := regexp.MustCompile(pattern) - return func(candidate string) bool { - return r.MatchString(candidate) - } -} - -// CommandFunc respresents a function used to mock WinRM commands -type CommandFunc func(out, err io.Writer) (exitCode int) - -// CommandFunc adds a WinRM command mock function to the WinRM server -func (r *Remote) CommandFunc(m MatcherFunc, f CommandFunc) { - r.service.HandleCommand(m, f) -} - -func splitAddr(addr string) (host string, port int, err error) { - u, err := url.Parse(addr) - if err != nil { - return - } - - split := strings.Split(u.Host, ":") - host = split[0] - port, err = strconv.Atoi(split[1]) - return -} diff --git a/vendor/github.com/dylanmei/winrmtest/wsman.go b/vendor/github.com/dylanmei/winrmtest/wsman.go deleted file mode 100644 index 35234da47..000000000 --- a/vendor/github.com/dylanmei/winrmtest/wsman.go +++ /dev/null @@ -1,160 +0,0 @@ -package winrmtest - -import ( - "bytes" - "encoding/base64" - "fmt" - "net/http" - "strconv" - "strings" - - "github.com/antchfx/xquery/xml" - "github.com/satori/go.uuid" -) - -type wsman struct { - commands []*command - identitySeed int -} - -type command struct { - id string - matcher MatcherFunc - handler CommandFunc -} - -func (w *wsman) HandleCommand(m MatcherFunc, f CommandFunc) string { - id := uuid.NewV4().String() - w.commands = append(w.commands, &command{ - id: id, - matcher: m, - handler: f, - }) - - return id -} - -func (w *wsman) CommandByText(cmd string) *command { - for _, c := range w.commands { - if c.matcher(cmd) { - return c - } - } - return nil -} - -func (w *wsman) CommandByID(id string) *command { - for _, c := range w.commands { - if c.id == id { - return c - } - } - return nil -} - -func (w *wsman) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - rw.Header().Add("Content-Type", "application/soap+xml") - - defer r.Body.Close() - env, err := xmlquery.Parse(r.Body) - - if err != nil { - return - } - - action := readAction(env) - switch { - case strings.HasSuffix(action, "transfer/Create"): - // create a new shell - - rw.Write([]byte(` - - 123 - `)) - - case strings.HasSuffix(action, "shell/Command"): - // execute on behalf of the client - text := readCommand(env) - cmd := w.CommandByText(text) - - if cmd == nil { - fmt.Printf("I don't know this command: Command=%s\n", text) - rw.WriteHeader(http.StatusInternalServerError) - return - } - - rw.Write([]byte(fmt.Sprintf(` - - %s - `, cmd.id))) - - case strings.HasSuffix(action, "shell/Receive"): - // client ready to receive the results - - id := readCommandIDFromDesiredStream(env) - cmd := w.CommandByID(id) - - if cmd == nil { - fmt.Printf("I don't know this command: CommandId=%s\n", id) - rw.WriteHeader(http.StatusInternalServerError) - return - } - - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - result := cmd.handler(stdout, stderr) - content := base64.StdEncoding.EncodeToString(stdout.Bytes()) - - rw.Write([]byte(fmt.Sprintf(` - - - %s - - - - %d - - - `, id, content, id, id, result))) - - case strings.HasSuffix(action, "shell/Signal"): - // end of the shell command - rw.WriteHeader(http.StatusOK) - case strings.HasSuffix(action, "transfer/Delete"): - // end of the session - rw.WriteHeader(http.StatusOK) - default: - fmt.Printf("I don't know this action: %s\n", action) - rw.WriteHeader(http.StatusInternalServerError) - } -} - -func readAction(env *xmlquery.Node) string { - xpath := xmlquery.FindOne(env, "//a:Action") - if xpath == nil { - return "" - } - - return xpath.InnerText() -} - -func readCommand(env *xmlquery.Node) string { - xpath := xmlquery.FindOne(env, "//rsp:Command") - if xpath == nil { - return "" - } - - if unquoted, err := strconv.Unquote(xpath.InnerText()); err == nil { - return unquoted - } - return xpath.InnerText() -} - -func readCommandIDFromDesiredStream(env *xmlquery.Node) string { - xpath := xmlquery.FindOne(env, "//rsp:DesiredStream") - if xpath == nil { - return "" - } - - return xpath.SelectAttr("CommandId") -} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/doc.go new file mode 100644 index 000000000..56f4edddd --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/doc.go @@ -0,0 +1,243 @@ +/* +Package acctest provides an acceptance testing framework for testing builders +and provisioners. + +Writing Provisioner Acceptance Tests + +Packer has implemented a `ProvisionerTestCase` structure to help write +provisioner acceptance tests. + +```go +type ProvisionerTestCase struct { + // Check is called after this step is executed in order to test that + // the step executed successfully. If this is not set, then the next + // step will be called + Check func(*exec.Cmd, string) error + // IsCompatible checks whether a provisioner is able to run against a + // given builder type and guest operating system, and returns a boolean. + // if it returns true, the test combination is okay to run. If false, the + // test combination is not okay to run. + IsCompatible func(builderType string, BuilderGuestOS string) bool + // Name is the name of the test case. Be simple but unique and descriptive. + Name string + // Setup, if non-nil, will be called once before the test case + // runs. This can be used for some setup like setting environment + // variables, or for validation prior to the + // test running. For example, you can use this to make sure certain + // binaries are installed, or text fixtures are in place. + Setup func() error + // Teardown will be called before the test case is over regardless + // of if the test succeeded or failed. This should return an error + // in the case that the test can't guarantee all resources were + // properly cleaned up. + Teardown builderT.TestTeardownFunc + // Template is the provisioner template to use. + // The provisioner template fragment must be a json-formatted string + // containing the provisioner definition but no other portions of a packer + // template. For + // example: + // + // ```json + // { + // "type": "shell-local", + // "inline", ["echo hello world"] + // } + //``` + // + // is a valid entry for "template" here, but the complete Packer template: + // + // ```json + // { + // "provisioners": [ + // { + // "type": "shell-local", + // "inline", ["echo hello world"] + // } + // ] + // } + // ``` + // + // is invalid as input. + // + // You may provide multiple provisioners in the same template. For example: + // ```json + // { + // "type": "shell-local", + // "inline", ["echo hello world"] + // }, + // { + // "type": "shell-local", + // "inline", ["echo hello world 2"] + // } + // ``` + Template string + // Type is the type of provisioner. + Type string +} + +``` + +To start writing a new provisioner acceptance test, you should add a test file +named `provisioner_acc_test.go` in the same folder as your provisioner is +defined. Create a test case by implementing the above struct, and run it +by calling `provisioneracc.TestProvisionersAgainstBuilders(testCase, t)` + +The following example has been adapted from a shell-local provisioner test: + +``` +import ( + "github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc" + "github.com/hashicorp/packer-plugin-sdk/acctest/testutils" +) + +// ... + +func TestAccShellProvisioner_basic(t *testing.T) { + // Create a json template fragment containing just the provisioners you want + // to run. + templateString := `{ + "type": "shell-local", + "script": "test-fixtures/script.sh", + "max_retries" : 5 +}` + + // instantiate a test case. + testCase := &provisioneracc.ProvisionerTestCase{ + IsCompatible: func() bool {return true}, + Name: "shell-local-provisioner-basic", + Teardown: func() error { + testutils.CleanupFiles("test-fixtures/file.txt") + return nil + }, + Template: templateString, + Type: "shell-local", + Check: func(buildcommand *exec.Cmd, logfile string) error { + if buildcommand.ProcessState != nil { + if buildcommand.ProcessState.ExitCode() != 0 { + return fmt.Errorf("Bad exit code. Logfile: %s", logfile) + } + } + filecontents, err := loadFile("file.txt") + if err != nil { + return err + } + if !strings.Contains(filecontents, "hello") { + return fmt.Errorf("file contents were wrong: %s", filecontents) + } + return nil + }, + } + + provisioneracc.TestProvisionersAgainstBuilders(testCase, t) +} + +``` + + +After writing the struct and implementing the interface, now is time to write the test that will run all +of this code you wrote. Your test should be like: + +```go +func TestShellProvisioner(t *testing.T) { + acc.TestProvisionersPreCheck("shell", t) + acc.TestProvisionersAgainstBuilders(new(ShellProvisionerAccTest), t) +} +``` + +The method `TestProvisionersAgainstBuilders` will run the provisioner against +all available and compatible builders. If there are not builders compatible with +the test you want to run, you can add a builder using the following steps: + +Create a subdirectory in provisioneracc/test-fixtures for the type of builder +you are adding. In this subdirectory, add one json file containing a single +builder fragment. For example, one of our amazon-ebs builders is defined in +provisioneracc/test-fixtures/amazon-ebs/amazon-ebs.txt and contains: + +```json +{ + "type": "amazon-ebs", + "ami_name": "packer-acc-test", + "instance_type": "t2.micro", + "region": "us-east-1", + "ssh_username": "ubuntu", + "source_ami_filter": { + "filters": { + "virtualization-type": "hvm", + "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", + "root-device-type": "ebs" + }, + "owners": ["099720109477"], + "most_recent": true + }, + "force_deregister" : true, + "tags": { + "packer-test": "true" + } +} +``` + +note that this fragment does not contain anything other than a single builder +definition. The testing framework will combine this with the provisioner +fragment to create a working json template. + +In order to tell the testing framework how to use this builder fragment, you +need to implement a `BuilderFixture` struct: + +```go +type BuilderFixture struct { + // Name is the name of the builder fixture. + // Be simple and descriptive. + Name string + // Setup creates necessary extra test fixtures, and renders their values + // into the BuilderFixture.Template. + Setup func() + // Template is the path to a builder template fragment. + // The builder template fragment must be a json-formatted file containing + // the builder definition but no other portions of a packer template. For + // example: + // + // ```json + // { + // "type": "null", + // "communicator", "none" + // } + //``` + // + // is a valid entry for "template" here, but the complete Packer template: + // + // ```json + // { + // "builders": [ + // "type": "null", + // "communicator": "none" + // ] + // } + // ``` + // + // is invalid as input. + // + // Only provide one builder template fragment per file. + TemplatePath string + + // GuestOS says what guest os type the builder template fragment creates. + // Valid values are "windows", "linux" or "darwin" guests. + GuestOS string + + // HostOS says what host os type the builder is capable of running on. + // Valid values are "any", windows", or "posix". If you set "posix", then + // this builder can run on a "linux" or "darwin" platform. If you set + // "any", then this builder can be used on any platform. + HostOS string + + Teardown builderT.TestTeardownFunc +} +``` +Implement this struct to the file "provisioneracc/builders.go", then add +the new implementation to the `BuildersAccTest` map in +`provisioneracc/provisioners.go` + +Once you finish these steps, you should be ready to run your new provisioner +acceptance test by setting the name used in the BuildersAccTest map as your +`ACC_TEST_BUILDERS` environment variable. +*/ +package acctest diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/builders.go b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/builders.go new file mode 100644 index 000000000..84010c07e --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/builders.go @@ -0,0 +1,63 @@ +/* +Package provisioneracc creates a framework for provisioner acceptance +testing. For builder acceptance testing, use the top level tooling in the +acctest package. +*/ +package provisioneracc + +import ( + "github.com/hashicorp/packer-plugin-sdk/acctest/testutils" +) + +// Variables stored in this file represent implementations of the BuilderFixture +// struct inside of provisioners.go + +// AmasonEBSBuilderFixtureLinux points to a build stub of a simple amazon-ebs +// build running on a linux operating system. +var AmasonEBSBuilderFixtureLinux = &BuilderFixture{ + Name: "Amazon-ebs Linux builder", + TemplatePath: "amazon-ebs/amazon-ebs.txt", + GuestOS: "linux", + HostOS: "any", + Teardown: func() error { + // TODO + // helper := AWSHelper{ + // Region: "us-east-1", + // AMIName: "packer-acc-test", + // } + // return helper.CleanUpAmi() + return nil + }, +} + +// AmasonEBSBuilderFixtureWindows points to a build stub of a simple amazon-ebs +// build running on a Windows operating system. +var AmasonEBSBuilderFixtureWindows = &BuilderFixture{ + Name: "Amazon-ebs Windows builder", + TemplatePath: "amazon-ebs/amazon-ebs_windows.txt", + GuestOS: "windows", + HostOS: "any", + Teardown: func() error { + // TODO + // helper := AWSHelper{ + // Region: "us-east-1", + // AMIName: "packer-acc-test", + // } + // return helper.CleanUpAmi() + return nil + }, +} + +// VirtualboxBuilderFixtureLinux points to a build stub of a simple amazon-ebs +// build running on a linux operating system. +var VirtualboxBuilderFixtureLinux = &BuilderFixture{ + Name: "Virtualbox Windows builder", + TemplatePath: "virtualbox/virtualbox-iso.txt", + GuestOS: "linux", + HostOS: "any", + Teardown: func() error { + testutils.CleanupFiles("virtualbox-iso-packer-acc-test") + testutils.CleanupFiles("packer_cache") + return nil + }, +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/provisioners.go b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/provisioners.go new file mode 100644 index 000000000..8473ced8b --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/provisioners.go @@ -0,0 +1,338 @@ +package provisioneracc + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + + builderT "github.com/hashicorp/packer-plugin-sdk/acctest" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// ProvisionerTestCase is a single set of tests to run for a provisioner. +// A ProvisionerTestCase should generally map 1:1 to each test method for your +// acceptance tests. +type ProvisionerTestCase struct { + // Check is called after this step is executed in order to test that + // the step executed successfully. If this is not set, then the next + // step will be called + Check func(*exec.Cmd, string) error + // IsCompatible checks whether a provisioner is able to run against a + // given builder type and guest operating system, and returns a boolean. + // if it returns true, the test combination is okay to run. If false, the + // test combination is not okay to run. + IsCompatible func(builderType string, BuilderGuestOS string) bool + // Name is the name of the test case. Be simple but unique and descriptive. + Name string + // Setup, if non-nil, will be called once before the test case + // runs. This can be used for some setup like setting environment + // variables, or for validation prior to the + // test running. For example, you can use this to make sure certain + // binaries are installed, or text fixtures are in place. + Setup func() error + // Teardown will be called before the test case is over regardless + // of if the test succeeded or failed. This should return an error + // in the case that the test can't guarantee all resources were + // properly cleaned up. + Teardown builderT.TestTeardownFunc + // Template is the provisioner template to use. + // The provisioner template fragment must be a json-formatted string + // containing the provisioner definition but no other portions of a packer + // template. For + // example: + // + // ```json + // { + // "type": "shell-local", + // "inline", ["echo hello world"] + // } + //``` + // + // is a valid entry for "template" here, but the complete Packer template: + // + // ```json + // { + // "provisioners": [ + // { + // "type": "shell-local", + // "inline", ["echo hello world"] + // } + // ] + // } + // ``` + // + // is invalid as input. + // + // You may provide multiple provisioners in the same template. For example: + // ```json + // { + // "type": "shell-local", + // "inline", ["echo hello world"] + // }, + // { + // "type": "shell-local", + // "inline", ["echo hello world 2"] + // } + // ``` + Template string + // Type is the type of provisioner. + Type string +} + +// BuilderFixtures are basic builder test configurations and metadata used +// in provisioner acceptance testing. These are frameworks to be used by +// provisioner tests, not tests in and of themselves. BuilderFixtures should +// generally be simple and not contain excessive or complex configurations. +// Instantiations of this struct are stored in the builders.go file in this +// module. +type BuilderFixture struct { + // Name is the name of the builder fixture. + // Be simple and descriptive. + Name string + // Setup creates necessary extra test fixtures, and renders their values + // into the BuilderFixture.Template. + Setup func() + // Template is the path to a builder template fragment. + // The builder template fragment must be a json-formatted file containing + // the builder definition but no other portions of a packer template. For + // example: + // + // ```json + // { + // "type": "null", + // "communicator", "none" + // } + //``` + // + // is a valid entry for "template" here, but the complete Packer template: + // + // ```json + // { + // "builders": [ + // "type": "null", + // "communicator": "none" + // ] + // } + // ``` + // + // is invalid as input. + // + // Only provide one builder template fragment per file. + TemplatePath string + + // GuestOS says what guest os type the builder template fragment creates. + // Valid values are "windows", "linux" or "darwin" guests. + GuestOS string + + // HostOS says what host os type the builder is capable of running on. + // Valid values are "any", windows", or "posix". If you set "posix", then + // this builder can run on a "linux" or "darwin" platform. If you set + // "any", then this builder can be used on any platform. + HostOS string + + Teardown builderT.TestTeardownFunc +} + +func fixtureDir() string { + _, file, _, _ := runtime.Caller(0) + return filepath.Join(filepath.Dir(file), "test-fixtures") +} + +func LoadBuilderFragment(templateFragmentPath string) (string, error) { + dir := fixtureDir() + fragmentAbsPath := filepath.Join(dir, templateFragmentPath) + fragmentFile, err := os.Open(fragmentAbsPath) + if err != nil { + return "", fmt.Errorf("Unable find %s", fragmentAbsPath) + } + defer fragmentFile.Close() + + fragmentString, err := ioutil.ReadAll(fragmentFile) + if err != nil { + return "", fmt.Errorf("Unable to read %s", fragmentAbsPath) + } + + return string(fragmentString), nil +} + +func RunProvisionerAccTest(testCase *ProvisionerTestCase, t *testing.T) { + TestProvisionersAgainstBuilders(testCase, t) +} + +//nolint:errcheck +func TestProvisionersAgainstBuilders(testCase *ProvisionerTestCase, t *testing.T) { + // retrieve user-desired builders. + builderTypes := checkBuilders(t) + + // Run this provisioner test case against each builder type requested. + for _, builderType := range builderTypes { + buildFixtures := BuildersAccTest[builderType] + // loop over individual build templates, merge with provisioner + // templates, and shell out to run test. + for _, buildFixture := range buildFixtures { + if !testCase.IsCompatible(builderType, buildFixture.GuestOS) { + continue + } + + testName := fmt.Sprintf("%s on %s", testCase.Name, buildFixture.Name) + + if testCase.Setup != nil { + err := testCase.Setup() + if err != nil { + t.Fatalf("test %s setup failed: %s", testName, err) + } + } + + t.Run(testName, func(t *testing.T) { + builderFragment, err := LoadBuilderFragment(buildFixture.TemplatePath) + if err != nil { + t.Fatalf("failed to load builder fragment: %s", err) + } + + // Combine provisioner and builder template fragments; write to + // file. + out := bytes.NewBuffer(nil) + fmt.Fprintf(out, `{"builders": [%s],"provisioners": [%s]}`, + builderFragment, testCase.Template) + templateName := fmt.Sprintf("%s_%s.json", builderType, testCase.Type) + templatePath := filepath.Join("./", templateName) + writeJsonTemplate(out, templatePath, t) + logfile := fmt.Sprintf("packer_log_%s_%s.txt", builderType, testCase.Type) + + // Make sure packer is installed: + packerbin, err := exec.LookPath("packer") + if err != nil { + t.Fatalf("Couldn't find packer binary installed on system: %s", err.Error()) + } + // Run build + buildCommand := exec.Command(packerbin, "build", "--machine-readable", templatePath) + buildCommand.Env = append(buildCommand.Env, os.Environ()...) + buildCommand.Env = append(buildCommand.Env, "PACKER_LOG=1", + fmt.Sprintf("PACKER_LOG_PATH=%s", logfile)) + buildCommand.Run() + + // Check for test custom pass/fail before we clean up + var checkErr error + if testCase.Check != nil { + checkErr = testCase.Check(buildCommand, logfile) + } + + // Cleanup stuff created by builder. + cleanErr := buildFixture.Teardown() + if cleanErr != nil { + log.Printf("bad: failed to clean up builder-created resources: %s", cleanErr.Error()) + } + // Clean up anything created in provisioner run + if testCase.Teardown != nil { + cleanErr = testCase.Teardown() + if cleanErr != nil { + log.Printf("bad: failed to clean up test-created resources: %s", cleanErr.Error()) + } + } + + // Fail test if check failed. + if checkErr != nil { + cwd, _ := os.Getwd() + t.Fatalf(fmt.Sprintf("Error running provisioner acceptance"+ + " tests: %s\nLogs can be found at %s\nand the "+ + "acceptance test template can be found at %s", + checkErr.Error(), filepath.Join(cwd, logfile), + filepath.Join(cwd, templatePath))) + } else { + os.Remove(templatePath) + os.Remove(logfile) + } + }) + } + } +} + +// checkBuilders retrieves all of the builders that the user has requested to +// run acceptance tests against. +func checkBuilders(t *testing.T) []string { + b := os.Getenv("ACC_TEST_BUILDERS") + // validate if we want to run provisioners acc tests + if b == "" { + t.Skip("Provisioners Acceptance tests skipped unless env 'ACC_TEST_BUILDERS' is set") + } + + // Get builders type to test provisioners against + var builders []string + for k := range BuildersAccTest { + // This will validate that only defined builders are executed against + if b != "all" && !strings.Contains(b, k) { + continue + } + builders = append(builders, k) + } + return builders +} + +func writeJsonTemplate(out *bytes.Buffer, filePath string, t *testing.T) { + outputFile, err := os.Create(filePath) + if err != nil { + t.Fatalf("bad: failed to create template file: %s", err.Error()) + } + _, err = outputFile.Write(out.Bytes()) + if err != nil { + t.Fatalf("bad: failed to write template file: %s", err.Error()) + } + outputFile.Sync() +} + +// BuilderAcceptance is specialized tooling implemented by individual builders. +// To add your builder to the provisioner testing framework, create a struct +// that implements this interface, add it to the BuildersAccTest map below. +// TODO add this interface to the plugin server so that Packer can request it +// From the plugin rather than importing it here. +type BuilderAcceptance interface { + // GetConfigs provides a mapping of guest OS architecture to builder + // template fragment. + // The builder template fragment must be a json-formatted string containing + // the builder definition but no other portions of a packer template. For + // example: + // + // ```json + // { + // "type": "null", + // "communicator", "none" + // } + //``` + // + // is a valid entry for "template" here, but the complete Packer template: + // + // ```json + // { + // "builders": [ + // "type": "null", + // "communicator": "none" + // ] + // } + // ``` + // + // is invalid as input. + // + // Valid keys for the map are "linux" and "windows". These keys will be used + // to determine whether a given builder template is compatible with a given + // provisioner template. + GetConfigs() (map[string]string, error) + // GetBuilderStore() returns a MapOfBuilder that contains the actual builder + // struct definition being used for this test. + GetBuilderStore() packersdk.MapOfBuilder + // CleanUp cleans up any side-effects of the builder not already cleaned up + // by the builderT framework. + CleanUp() error +} + +// Mapping of all builder fixtures defined for a given builder type. +var BuildersAccTest = map[string][]*BuilderFixture{ + "virtualbox-iso": []*BuilderFixture{VirtualboxBuilderFixtureLinux}, + "amazon-ebs": []*BuilderFixture{AmasonEBSBuilderFixtureLinux, AmasonEBSBuilderFixtureWindows}, +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/testing.go b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/testing.go new file mode 100644 index 000000000..7af3dc48f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/testing.go @@ -0,0 +1,222 @@ +package acctest + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "testing" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/template" + "github.com/hashicorp/packer/packer" + "github.com/hashicorp/packer/provisioner/file" + shellprovisioner "github.com/hashicorp/packer/provisioner/shell" +) + +// TestEnvVar must be set to a non-empty value for acceptance tests to run. +const TestEnvVar = "PACKER_ACC" + +// TestCase is a single set of tests to run for a backend. A TestCase +// should generally map 1:1 to each test method for your acceptance +// tests. +type TestCase struct { + // Precheck, if non-nil, will be called once before the test case + // runs at all. This can be used for some validation prior to the + // test running. + PreCheck func() + + // Builder is the Builder that will be tested. It will be available + // as the "test" builder in the template. + Builder packersdk.Builder + + // Template is the template contents to use. + Template string + + // Check is called after this step is executed in order to test that + // the step executed successfully. If this is not set, then the next + // step will be called + Check TestCheckFunc + + // Teardown will be called before the test case is over regardless + // of if the test succeeded or failed. This should return an error + // in the case that the test can't guarantee all resources were + // properly cleaned up. + Teardown TestTeardownFunc + + // If SkipArtifactTeardown is true, we will not attempt to destroy the + // artifact created in this test run. + SkipArtifactTeardown bool + // If set, overrides the default provisioner store with custom provisioners. + // This can be useful for running acceptance tests for a particular + // provisioner using a specific builder. + // Default provisioner store: + // ProvisionerStore: packersdk.MapOfProvisioner{ + // "shell": func() (packersdk.Provisioner, error) { return &shellprovisioner.Provisioner{}, nil }, + // "file": func() (packersdk.Provisioner, error) { return &file.Provisioner{}, nil }, + // }, + ProvisionerStore packersdk.MapOfProvisioner +} + +// TestCheckFunc is the callback used for Check in TestStep. +type TestCheckFunc func([]packersdk.Artifact) error + +// TestTeardownFunc is the callback used for Teardown in TestCase. +type TestTeardownFunc func() error + +// TestT is the interface used to handle the test lifecycle of a test. +// +// Users should just use a *testing.T object, which implements this. +type TestT interface { + Error(args ...interface{}) + Fatal(args ...interface{}) + Skip(args ...interface{}) +} + +type TestBuilderStore struct { + packer.BuilderStore + StartFn func(name string) (packersdk.Builder, error) +} + +func (tbs TestBuilderStore) Start(name string) (packersdk.Builder, error) { return tbs.StartFn(name) } + +// Test performs an acceptance test on a backend with the given test case. +// +// Tests are not run unless an environmental variable "PACKER_ACC" is +// set to some non-empty value. This is to avoid test cases surprising +// a user by creating real resources. +// +// Tests will fail unless the verbose flag (`go test -v`, or explicitly +// the "-test.v" flag) is set. Because some acceptance tests take quite +// long, we require the verbose flag so users are able to see progress +// output. +func Test(t TestT, c TestCase) { + // We only run acceptance tests if an env var is set because they're + // slow and generally require some outside configuration. + if os.Getenv(TestEnvVar) == "" { + t.Skip(fmt.Sprintf( + "Acceptance tests skipped unless env '%s' set", + TestEnvVar)) + return + } + + // We require verbose mode so that the user knows what is going on. + if !testTesting && !testing.Verbose() { + t.Fatal("Acceptance tests must be run with the -v flag on tests") + return + } + + // Run the PreCheck if we have it + if c.PreCheck != nil { + c.PreCheck() + } + + // Parse the template + log.Printf("[DEBUG] Parsing template...") + tpl, err := template.Parse(strings.NewReader(c.Template)) + if err != nil { + t.Fatal(fmt.Sprintf("Failed to parse template: %s", err)) + return + } + + if c.ProvisionerStore == nil { + c.ProvisionerStore = packersdk.MapOfProvisioner{ + "shell": func() (packersdk.Provisioner, error) { return &shellprovisioner.Provisioner{}, nil }, + "file": func() (packersdk.Provisioner, error) { return &file.Provisioner{}, nil }, + } + } + // Build the core + log.Printf("[DEBUG] Initializing core...") + core := packer.NewCore(&packer.CoreConfig{ + Components: packer.ComponentFinder{ + BuilderStore: TestBuilderStore{ + StartFn: func(n string) (packersdk.Builder, error) { + if n == "test" { + return c.Builder, nil + } + + return nil, nil + }, + }, + ProvisionerStore: c.ProvisionerStore, + }, + Template: tpl, + }) + err = core.Initialize() + if err != nil { + t.Fatal(fmt.Sprintf("Failed to init core: %s", err)) + return + } + + // Get the build + log.Printf("[DEBUG] Retrieving 'test' build") + build, err := core.Build("test") + if err != nil { + t.Fatal(fmt.Sprintf("Failed to get 'test' build: %s", err)) + return + } + + // Prepare it + log.Printf("[DEBUG] Preparing 'test' build") + warnings, err := build.Prepare() + if err != nil { + t.Fatal(fmt.Sprintf("Prepare error: %s", err)) + return + } + if len(warnings) > 0 { + t.Fatal(fmt.Sprintf( + "Prepare warnings:\n\n%s", + strings.Join(warnings, "\n"))) + return + } + + // Run it! We use a temporary directory for caching and discard + // any UI output. We discard since it shows up in logs anyways. + log.Printf("[DEBUG] Running 'test' build") + ui := &packersdk.BasicUi{ + Reader: os.Stdin, + Writer: ioutil.Discard, + ErrorWriter: ioutil.Discard, + PB: &packersdk.NoopProgressTracker{}, + } + artifacts, err := build.Run(context.Background(), ui) + if err != nil { + t.Fatal(fmt.Sprintf("Run error:\n\n%s", err)) + goto TEARDOWN + } + + // Check function + if c.Check != nil { + log.Printf("[DEBUG] Running check function") + if err := c.Check(artifacts); err != nil { + t.Fatal(fmt.Sprintf("Check error:\n\n%s", err)) + goto TEARDOWN + } + } + +TEARDOWN: + if !c.SkipArtifactTeardown { + // Delete all artifacts + for _, a := range artifacts { + if err := a.Destroy(); err != nil { + t.Error(fmt.Sprintf( + "!!! ERROR REMOVING ARTIFACT '%s': %s !!!", + a.String(), err)) + } + } + } + + // Teardown + if c.Teardown != nil { + log.Printf("[DEBUG] Running teardown function") + if err := c.Teardown(); err != nil { + t.Fatal(fmt.Sprintf("Teardown failure:\n\n%s", err)) + return + } + } +} + +// This is for unit tests of this package. +var testTesting = false diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/testutils/utils.go b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/testutils/utils.go new file mode 100644 index 000000000..e8ab95031 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/testutils/utils.go @@ -0,0 +1,20 @@ +// Package testutils provides some simple ease-of-use tools for implementing +// acceptance testing. +package testutils + +import "os" + +// CleanupFiles removes all the provided filenames. +func CleanupFiles(moreFiles ...string) { + for _, file := range moreFiles { + os.RemoveAll(file) + } +} + +// FileExists returns true if the filename is found. +func FileExists(filename string) bool { + if _, err := os.Stat(filename); err == nil { + return true + } + return false +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/adapter.go b/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/adapter.go new file mode 100644 index 000000000..5d9adf455 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/adapter.go @@ -0,0 +1,338 @@ +package adapter + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "net" + "strings" + + "github.com/google/shlex" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "golang.org/x/crypto/ssh" +) + +// An adapter satisfies SSH requests (from an Ansible client) by delegating SSH +// exec and subsystem commands to a packersdk.Communicator. +type Adapter struct { + done <-chan struct{} + l net.Listener + config *ssh.ServerConfig + sftpCmd string + ui packersdk.Ui + comm packersdk.Communicator +} + +func NewAdapter(done <-chan struct{}, l net.Listener, config *ssh.ServerConfig, sftpCmd string, ui packersdk.Ui, comm packersdk.Communicator) *Adapter { + return &Adapter{ + done: done, + l: l, + config: config, + sftpCmd: sftpCmd, + ui: ui, + comm: comm, + } +} + +func (c *Adapter) Serve() { + log.Printf("SSH proxy: serving on %s", c.l.Addr()) + + for { + // Accept will return if either the underlying connection is closed or if a connection is made. + // after returning, check to see if c.done can be received. If so, then Accept() returned because + // the connection has been closed. + conn, err := c.l.Accept() + select { + case <-c.done: + return + default: + if err != nil { + c.ui.Error(fmt.Sprintf("listen.Accept failed: %v", err)) + continue + } + go func(conn net.Conn) { + if err := c.Handle(conn, c.ui); err != nil { + c.ui.Error(err.Error()) + } + }(conn) + } + } +} + +func (c *Adapter) Handle(conn net.Conn, ui packersdk.Ui) error { + log.Print("SSH proxy: accepted connection") + _, chans, reqs, err := ssh.NewServerConn(conn, c.config) + if err != nil { + return errors.New("failed to handshake") + } + + // discard all global requests + go ssh.DiscardRequests(reqs) + + // Service the incoming NewChannels + for newChannel := range chans { + if newChannel.ChannelType() != "session" { + newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") + continue + } + + go func(ch ssh.NewChannel) { + if err := c.handleSession(ch); err != nil { + c.ui.Error(err.Error()) + } + }(newChannel) + } + + return nil +} + +func (c *Adapter) handleSession(newChannel ssh.NewChannel) error { + channel, requests, err := newChannel.Accept() + if err != nil { + return err + } + defer channel.Close() + + done := make(chan struct{}) + + // Sessions have requests such as "pty-req", "shell", "env", and "exec". + // see RFC 4254, section 6 + go func(in <-chan *ssh.Request) { + env := make([]envRequestPayload, 4) + for req := range in { + switch req.Type { + case "pty-req": + log.Println("ansible provisioner pty-req request") + // accept pty-req requests, but don't actually do anything. Necessary for OpenSSH and sudo. + req.Reply(true, nil) + + case "env": + req, err := newEnvRequest(req) + if err != nil { + c.ui.Error(err.Error()) + req.Reply(false, nil) + continue + } + env = append(env, req.Payload) + log.Printf("new env request: %s", req.Payload) + req.Reply(true, nil) + case "exec": + req, err := newExecRequest(req) + if err != nil { + c.ui.Error(err.Error()) + req.Reply(false, nil) + close(done) + continue + } + + log.Printf("new exec request: %s", req.Payload) + + if len(req.Payload) == 0 { + req.Reply(false, nil) + close(done) + return + } + + go func(channel ssh.Channel) { + exit := c.exec(string(req.Payload), channel, channel, channel.Stderr()) + + exitStatus := make([]byte, 4) + binary.BigEndian.PutUint32(exitStatus, uint32(exit)) + channel.SendRequest("exit-status", false, exitStatus) + close(done) + }(channel) + req.Reply(true, nil) + case "subsystem": + req, err := newSubsystemRequest(req) + if err != nil { + c.ui.Error(err.Error()) + req.Reply(false, nil) + continue + } + + log.Printf("new subsystem request: %s", req.Payload) + switch req.Payload { + case "sftp": + sftpCmd := c.sftpCmd + if len(sftpCmd) == 0 { + sftpCmd = "/usr/lib/sftp-server -e" + } + + log.Print("starting sftp subsystem") + go func() { + _ = c.remoteExec(sftpCmd, channel, channel, channel.Stderr()) + close(done) + }() + req.Reply(true, nil) + default: + c.ui.Error(fmt.Sprintf("unsupported subsystem requested: %s", req.Payload)) + req.Reply(false, nil) + } + default: + log.Printf("rejecting %s request", req.Type) + req.Reply(false, nil) + } + } + }(requests) + + <-done + return nil +} + +func (c *Adapter) Shutdown() { + c.l.Close() +} + +func (c *Adapter) exec(command string, in io.Reader, out io.Writer, err io.Writer) int { + var exitStatus int + switch { + case strings.HasPrefix(command, "scp ") && serveSCP(command[4:]): + err := c.scpExec(command[4:], in, out) + if err != nil { + log.Println(err) + exitStatus = 1 + } + default: + exitStatus = c.remoteExec(command, in, out, err) + } + return exitStatus +} + +func serveSCP(args string) bool { + opts, _ := scpOptions(args) + return bytes.IndexAny(opts, "tf") >= 0 +} + +func (c *Adapter) scpExec(args string, in io.Reader, out io.Writer) error { + opts, rest := scpOptions(args) + + // remove the quoting that ansible added to rest for shell safety. + shargs, err := shlex.Split(rest) + if err != nil { + return err + } + rest = strings.Join(shargs, "") + + if i := bytes.IndexByte(opts, 't'); i >= 0 { + return scpUploadSession(opts, rest, in, out, c.comm) + } + + if i := bytes.IndexByte(opts, 'f'); i >= 0 { + return scpDownloadSession(opts, rest, in, out, c.comm) + } + return errors.New("no scp mode specified") +} + +func (c *Adapter) remoteExec(command string, in io.Reader, out io.Writer, err io.Writer) int { + cmd := &packersdk.RemoteCmd{ + Stdin: in, + Stdout: out, + Stderr: err, + Command: command, + } + ctx := context.TODO() + + if err := c.comm.Start(ctx, cmd); err != nil { + c.ui.Error(err.Error()) + } + + cmd.Wait() + + return cmd.ExitStatus() +} + +type envRequest struct { + *ssh.Request + Payload envRequestPayload +} + +type envRequestPayload struct { + Name string + Value string +} + +func (p envRequestPayload) String() string { + return fmt.Sprintf("%s=%s", p.Name, p.Value) +} + +func newEnvRequest(raw *ssh.Request) (*envRequest, error) { + r := new(envRequest) + r.Request = raw + + if err := ssh.Unmarshal(raw.Payload, &r.Payload); err != nil { + return nil, err + } + + return r, nil +} + +func sshString(buf io.Reader) (string, error) { + var size uint32 + err := binary.Read(buf, binary.BigEndian, &size) + if err != nil { + return "", err + } + + b := make([]byte, size) + err = binary.Read(buf, binary.BigEndian, b) + if err != nil { + return "", err + } + return string(b), nil +} + +type execRequest struct { + *ssh.Request + Payload execRequestPayload +} + +type execRequestPayload string + +func (p execRequestPayload) String() string { + return string(p) +} + +func newExecRequest(raw *ssh.Request) (*execRequest, error) { + r := new(execRequest) + r.Request = raw + buf := bytes.NewReader(r.Request.Payload) + + var err error + var payload string + if payload, err = sshString(buf); err != nil { + return nil, err + } + + r.Payload = execRequestPayload(payload) + return r, nil +} + +type subsystemRequest struct { + *ssh.Request + Payload subsystemRequestPayload +} + +type subsystemRequestPayload string + +func (p subsystemRequestPayload) String() string { + return string(p) +} + +func newSubsystemRequest(raw *ssh.Request) (*subsystemRequest, error) { + r := new(subsystemRequest) + r.Request = raw + buf := bytes.NewReader(r.Request.Payload) + + var err error + var payload string + if payload, err = sshString(buf); err != nil { + return nil, err + } + + r.Payload = subsystemRequestPayload(payload) + return r, nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/doc.go new file mode 100644 index 000000000..bf6254089 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/doc.go @@ -0,0 +1,13 @@ +/* +Package adapter helps command line tools connect to the guest via a Packer +communicator. + +A typical use is for custom provisioners that wrap command line +tools. For example, the Ansible provisioner and the Inspec provisioner both +use this package to proxy communicator calls. + +You may want to use this adapter if you are writing a provisioner that wraps a +tool which under normal usage would be run locally and form a connection to the +remote instance itself. +*/ +package adapter diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/scp.go b/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/scp.go new file mode 100644 index 000000000..ccf2c1fb0 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/adapter/scp.go @@ -0,0 +1,349 @@ +package adapter + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strings" + "time" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/tmp" +) + +const ( + scpOK = "\x00" + scpEmptyError = "\x02\n" +) + +/* +scp is a simple, but poorly documented, protocol. Thankfully, its source is +freely available, and there is at least one page that describes it reasonably +well. + +* https://raw.githubusercontent.com/openssh/openssh-portable/master/scp.c +* https://opensource.apple.com/source/OpenSSH/OpenSSH-7.1/openssh/scp.c +* https://blogs.oracle.com/janp/entry/how_the_scp_protocol_works is a great + resource, but has some bad information. Its first problem is that it doesn't + correctly describe why the producer has to read more responses than messages + it sends (because it has to read the 0 sent by the sink to start the + transfer). The second problem is that it omits that the producer needs to + send a 0 byte after file contents. +*/ + +func scpUploadSession(opts []byte, rest string, in io.Reader, out io.Writer, comm packersdk.Communicator) error { + rest = strings.TrimSpace(rest) + if len(rest) == 0 { + fmt.Fprintf(out, scpEmptyError) + return errors.New("no scp target specified") + } + + d, err := tmp.Dir("ansible-upload") + if err != nil { + fmt.Fprintf(out, scpEmptyError) + return err + } + defer os.RemoveAll(d) + + // To properly implement scp, rest should be checked to see if it is a + // directory on the remote side, but ansible only sends files, so there's no + // need to set targetIsDir, because it can be safely assumed that rest is + // intended to be a file, and whatever names are used in 'C' commands are + // irrelevant. + state := &scpUploadState{target: rest, srcRoot: d, comm: comm} + + fmt.Fprintf(out, scpOK) // signal the client to start the transfer. + return state.Protocol(bufio.NewReader(in), out) +} + +func scpDownloadSession(opts []byte, rest string, in io.Reader, out io.Writer, comm packersdk.Communicator) error { + rest = strings.TrimSpace(rest) + if len(rest) == 0 { + fmt.Fprintf(out, scpEmptyError) + return errors.New("no scp source specified") + } + + d, err := tmp.Dir("ansible-download") + if err != nil { + fmt.Fprintf(out, scpEmptyError) + return err + } + defer os.RemoveAll(d) + + if bytes.Contains([]byte{'d'}, opts) { + // the only ansible module that supports downloading via scp is fetch, + // fetch only supports file downloads as of Ansible 2.1. + fmt.Fprintf(out, scpEmptyError) + return errors.New("directory downloads not supported") + } + + f, err := os.Create(filepath.Join(d, filepath.Base(rest))) + if err != nil { + fmt.Fprintf(out, scpEmptyError) + return err + } + defer f.Close() + + err = comm.Download(rest, f) + if err != nil { + fmt.Fprintf(out, scpEmptyError) + return err + } + + state := &scpDownloadState{srcRoot: d} + + return state.Protocol(bufio.NewReader(in), out) +} + +func (state *scpDownloadState) FileProtocol(path string, info os.FileInfo, in *bufio.Reader, out io.Writer) error { + size := info.Size() + perms := fmt.Sprintf("C%04o", info.Mode().Perm()) + fmt.Fprintln(out, perms, size, info.Name()) + if err := scpResponse(in); err != nil { + return err + } + + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + io.CopyN(out, f, size) + fmt.Fprintf(out, scpOK) + + return scpResponse(in) +} + +type scpUploadState struct { + comm packersdk.Communicator + target string // target is the directory on the target + srcRoot string // srcRoot is the directory on the host + mtime time.Time + atime time.Time + dir string // dir is a path relative to the roots + targetIsDir bool +} + +func (scp scpUploadState) DestPath() string { + return filepath.Join(scp.target, scp.dir) +} + +func (scp scpUploadState) SrcPath() string { + return filepath.Join(scp.srcRoot, scp.dir) +} + +func (state *scpUploadState) Protocol(in *bufio.Reader, out io.Writer) error { + for { + b, err := in.ReadByte() + if err != nil { + return err + } + switch b { + case 'T': + err := state.TimeProtocol(in, out) + if err != nil { + return err + } + case 'C': + return state.FileProtocol(in, out) + case 'E': + state.dir = filepath.Dir(state.dir) + fmt.Fprintf(out, scpOK) + return nil + case 'D': + return state.DirProtocol(in, out) + default: + fmt.Fprintf(out, scpEmptyError) + return fmt.Errorf("unexpected message: %c", b) + } + } +} + +func (state *scpUploadState) FileProtocol(in *bufio.Reader, out io.Writer) error { + defer func() { + state.mtime = time.Time{} + }() + + var mode os.FileMode + var size int64 + var name string + _, err := fmt.Fscanf(in, "%04o %d %s\n", &mode, &size, &name) + if err != nil { + fmt.Fprintf(out, scpEmptyError) + return fmt.Errorf("invalid file message: %v", err) + } + fmt.Fprintf(out, scpOK) + + var fi os.FileInfo = fileInfo{name: name, size: size, mode: mode, mtime: state.mtime} + + dest := state.DestPath() + if state.targetIsDir { + dest = filepath.Join(dest, fi.Name()) + } + + err = state.comm.Upload(dest, io.LimitReader(in, fi.Size()), &fi) + if err != nil { + fmt.Fprintf(out, scpEmptyError) + return err + } + + if err := scpResponse(in); err != nil { + return err + } + + fmt.Fprintf(out, scpOK) + return nil +} + +func (state *scpUploadState) TimeProtocol(in *bufio.Reader, out io.Writer) error { + var m, a int64 + if _, err := fmt.Fscanf(in, "%d 0 %d 0\n", &m, &a); err != nil { + fmt.Fprintf(out, scpEmptyError) + return err + } + fmt.Fprintf(out, scpOK) + + state.atime = time.Unix(a, 0) + state.mtime = time.Unix(m, 0) + return nil +} + +func (state *scpUploadState) DirProtocol(in *bufio.Reader, out io.Writer) error { + var mode os.FileMode + var length uint + var name string + + if _, err := fmt.Fscanf(in, "%04o %d %s\n", &mode, &length, &name); err != nil { + fmt.Fprintf(out, scpEmptyError) + return fmt.Errorf("invalid directory message: %v", err) + } + fmt.Fprintf(out, scpOK) + + path := filepath.Join(state.dir, name) + if err := os.Mkdir(path, mode); err != nil { + return err + } + state.dir = path + + if state.atime.IsZero() { + state.atime = time.Now() + } + if state.mtime.IsZero() { + state.mtime = time.Now() + } + + if err := os.Chtimes(path, state.atime, state.mtime); err != nil { + return err + } + + if err := state.comm.UploadDir(filepath.Dir(state.DestPath()), state.SrcPath(), nil); err != nil { + return err + } + + state.mtime = time.Time{} + state.atime = time.Time{} + return state.Protocol(in, out) +} + +type scpDownloadState struct { + srcRoot string // srcRoot is the directory on the host +} + +func (state *scpDownloadState) Protocol(in *bufio.Reader, out io.Writer) error { + r := bufio.NewReader(in) + // read the byte sent by the other side to start the transfer + if err := scpResponse(r); err != nil { + return err + } + + return filepath.Walk(state.srcRoot, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if path == state.srcRoot { + return nil + } + + if info.IsDir() { + // no need to get fancy; srcRoot should only contain one file, because + // Ansible only allows fetching a single file. + return errors.New("unexpected directory") + } + + return state.FileProtocol(path, info, r, out) + }) +} + +func scpOptions(s string) (opts []byte, rest string) { + end := 0 + opt := false + +Loop: + for i := 0; i < len(s); i++ { + b := s[i] + switch { + case b == ' ': + opt = false + end++ + case b == '-': + opt = true + end++ + case opt: + opts = append(opts, b) + end++ + default: + break Loop + } + } + + rest = s[end:] + return +} + +func scpResponse(r *bufio.Reader) error { + code, err := r.ReadByte() + if err != nil { + return err + } + + if code != 0 { + message, err := r.ReadString('\n') + if err != nil { + return fmt.Errorf("Error reading error message: %s", err) + } + + // 1 is a warning. Anything higher (really just 2) is an error. + if code > 1 { + return errors.New(message) + } + + log.Println("WARNING:", err) + } + return nil +} + +type fileInfo struct { + name string + size int64 + mode os.FileMode + mtime time.Time +} + +func (fi fileInfo) Name() string { return fi.name } +func (fi fileInfo) Size() int64 { return fi.size } +func (fi fileInfo) Mode() os.FileMode { return fi.mode } +func (fi fileInfo) ModTime() time.Time { + if fi.mtime.IsZero() { + return time.Now() + } + return fi.mtime +} +func (fi fileInfo) IsDir() bool { return fi.mode.IsDir() } +func (fi fileInfo) Sys() interface{} { return nil } diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.go new file mode 100644 index 000000000..940b8561e --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.go @@ -0,0 +1,2123 @@ +// Code generated by pigeon; DO NOT EDIT. + +package bootcommand + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" + "unicode" + "unicode/utf8" +) + +var g = &grammar{ + rules: []*rule{ + { + name: "Input", + pos: position{line: 6, col: 1, offset: 26}, + expr: &actionExpr{ + pos: position{line: 6, col: 10, offset: 35}, + run: (*parser).callonInput1, + expr: &seqExpr{ + pos: position{line: 6, col: 10, offset: 35}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 6, col: 10, offset: 35}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 6, col: 15, offset: 40}, + name: "Expr", + }, + }, + &ruleRefExpr{ + pos: position{line: 6, col: 20, offset: 45}, + name: "EOF", + }, + }, + }, + }, + }, + { + name: "Expr", + pos: position{line: 10, col: 1, offset: 75}, + expr: &actionExpr{ + pos: position{line: 10, col: 9, offset: 83}, + run: (*parser).callonExpr1, + expr: &labeledExpr{ + pos: position{line: 10, col: 9, offset: 83}, + label: "l", + expr: &oneOrMoreExpr{ + pos: position{line: 10, col: 11, offset: 85}, + expr: &choiceExpr{ + pos: position{line: 10, col: 13, offset: 87}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 10, col: 13, offset: 87}, + name: "Wait", + }, + &ruleRefExpr{ + pos: position{line: 10, col: 20, offset: 94}, + name: "CharToggle", + }, + &ruleRefExpr{ + pos: position{line: 10, col: 33, offset: 107}, + name: "Special", + }, + &ruleRefExpr{ + pos: position{line: 10, col: 43, offset: 117}, + name: "Literal", + }, + }, + }, + }, + }, + }, + }, + { + name: "Wait", + pos: position{line: 14, col: 1, offset: 150}, + expr: &actionExpr{ + pos: position{line: 14, col: 8, offset: 157}, + run: (*parser).callonWait1, + expr: &seqExpr{ + pos: position{line: 14, col: 8, offset: 157}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 14, col: 8, offset: 157}, + name: "ExprStart", + }, + &litMatcher{ + pos: position{line: 14, col: 18, offset: 167}, + val: "wait", + ignoreCase: false, + want: "\"wait\"", + }, + &labeledExpr{ + pos: position{line: 14, col: 25, offset: 174}, + label: "duration", + expr: &zeroOrOneExpr{ + pos: position{line: 14, col: 34, offset: 183}, + expr: &choiceExpr{ + pos: position{line: 14, col: 36, offset: 185}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 14, col: 36, offset: 185}, + name: "Duration", + }, + &ruleRefExpr{ + pos: position{line: 14, col: 47, offset: 196}, + name: "Integer", + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 14, col: 58, offset: 207}, + name: "ExprEnd", + }, + }, + }, + }, + }, + { + name: "CharToggle", + pos: position{line: 27, col: 1, offset: 453}, + expr: &actionExpr{ + pos: position{line: 27, col: 14, offset: 466}, + run: (*parser).callonCharToggle1, + expr: &seqExpr{ + pos: position{line: 27, col: 14, offset: 466}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 27, col: 14, offset: 466}, + name: "ExprStart", + }, + &labeledExpr{ + pos: position{line: 27, col: 24, offset: 476}, + label: "lit", + expr: &ruleRefExpr{ + pos: position{line: 27, col: 29, offset: 481}, + name: "Literal", + }, + }, + &labeledExpr{ + pos: position{line: 27, col: 38, offset: 490}, + label: "t", + expr: &choiceExpr{ + pos: position{line: 27, col: 41, offset: 493}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 27, col: 41, offset: 493}, + name: "On", + }, + &ruleRefExpr{ + pos: position{line: 27, col: 46, offset: 498}, + name: "Off", + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 27, col: 51, offset: 503}, + name: "ExprEnd", + }, + }, + }, + }, + }, + { + name: "Special", + pos: position{line: 31, col: 1, offset: 574}, + expr: &actionExpr{ + pos: position{line: 31, col: 11, offset: 584}, + run: (*parser).callonSpecial1, + expr: &seqExpr{ + pos: position{line: 31, col: 11, offset: 584}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 31, col: 11, offset: 584}, + name: "ExprStart", + }, + &labeledExpr{ + pos: position{line: 31, col: 21, offset: 594}, + label: "s", + expr: &ruleRefExpr{ + pos: position{line: 31, col: 24, offset: 597}, + name: "SpecialKey", + }, + }, + &labeledExpr{ + pos: position{line: 31, col: 36, offset: 609}, + label: "t", + expr: &zeroOrOneExpr{ + pos: position{line: 31, col: 38, offset: 611}, + expr: &choiceExpr{ + pos: position{line: 31, col: 39, offset: 612}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 31, col: 39, offset: 612}, + name: "On", + }, + &ruleRefExpr{ + pos: position{line: 31, col: 44, offset: 617}, + name: "Off", + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 31, col: 50, offset: 623}, + name: "ExprEnd", + }, + }, + }, + }, + }, + { + name: "Number", + pos: position{line: 39, col: 1, offset: 810}, + expr: &actionExpr{ + pos: position{line: 39, col: 10, offset: 819}, + run: (*parser).callonNumber1, + expr: &seqExpr{ + pos: position{line: 39, col: 10, offset: 819}, + exprs: []interface{}{ + &zeroOrOneExpr{ + pos: position{line: 39, col: 10, offset: 819}, + expr: &litMatcher{ + pos: position{line: 39, col: 10, offset: 819}, + val: "-", + ignoreCase: false, + want: "\"-\"", + }, + }, + &ruleRefExpr{ + pos: position{line: 39, col: 15, offset: 824}, + name: "Integer", + }, + &zeroOrOneExpr{ + pos: position{line: 39, col: 23, offset: 832}, + expr: &seqExpr{ + pos: position{line: 39, col: 25, offset: 834}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 39, col: 25, offset: 834}, + val: ".", + ignoreCase: false, + want: "\".\"", + }, + &oneOrMoreExpr{ + pos: position{line: 39, col: 29, offset: 838}, + expr: &ruleRefExpr{ + pos: position{line: 39, col: 29, offset: 838}, + name: "Digit", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "Integer", + pos: position{line: 43, col: 1, offset: 884}, + expr: &choiceExpr{ + pos: position{line: 43, col: 11, offset: 894}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 43, col: 11, offset: 894}, + val: "0", + ignoreCase: false, + want: "\"0\"", + }, + &actionExpr{ + pos: position{line: 43, col: 17, offset: 900}, + run: (*parser).callonInteger3, + expr: &seqExpr{ + pos: position{line: 43, col: 17, offset: 900}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 43, col: 17, offset: 900}, + name: "NonZeroDigit", + }, + &zeroOrMoreExpr{ + pos: position{line: 43, col: 30, offset: 913}, + expr: &ruleRefExpr{ + pos: position{line: 43, col: 30, offset: 913}, + name: "Digit", + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "Duration", + pos: position{line: 47, col: 1, offset: 977}, + expr: &actionExpr{ + pos: position{line: 47, col: 12, offset: 988}, + run: (*parser).callonDuration1, + expr: &oneOrMoreExpr{ + pos: position{line: 47, col: 12, offset: 988}, + expr: &seqExpr{ + pos: position{line: 47, col: 14, offset: 990}, + exprs: []interface{}{ + &ruleRefExpr{ + pos: position{line: 47, col: 14, offset: 990}, + name: "Number", + }, + &ruleRefExpr{ + pos: position{line: 47, col: 21, offset: 997}, + name: "TimeUnit", + }, + }, + }, + }, + }, + }, + { + name: "On", + pos: position{line: 51, col: 1, offset: 1060}, + expr: &actionExpr{ + pos: position{line: 51, col: 6, offset: 1065}, + run: (*parser).callonOn1, + expr: &litMatcher{ + pos: position{line: 51, col: 6, offset: 1065}, + val: "on", + ignoreCase: true, + want: "\"on\"i", + }, + }, + }, + { + name: "Off", + pos: position{line: 55, col: 1, offset: 1098}, + expr: &actionExpr{ + pos: position{line: 55, col: 7, offset: 1104}, + run: (*parser).callonOff1, + expr: &litMatcher{ + pos: position{line: 55, col: 7, offset: 1104}, + val: "off", + ignoreCase: true, + want: "\"off\"i", + }, + }, + }, + { + name: "Literal", + pos: position{line: 59, col: 1, offset: 1139}, + expr: &actionExpr{ + pos: position{line: 59, col: 11, offset: 1149}, + run: (*parser).callonLiteral1, + expr: &anyMatcher{ + line: 59, col: 11, offset: 1149, + }, + }, + }, + { + name: "ExprEnd", + pos: position{line: 64, col: 1, offset: 1230}, + expr: &litMatcher{ + pos: position{line: 64, col: 11, offset: 1240}, + val: ">", + ignoreCase: false, + want: "\">\"", + }, + }, + { + name: "ExprStart", + pos: position{line: 65, col: 1, offset: 1244}, + expr: &litMatcher{ + pos: position{line: 65, col: 13, offset: 1256}, + val: "<", + ignoreCase: false, + want: "\"<\"", + }, + }, + { + name: "SpecialKey", + pos: position{line: 66, col: 1, offset: 1260}, + expr: &choiceExpr{ + pos: position{line: 66, col: 14, offset: 1273}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 66, col: 14, offset: 1273}, + val: "bs", + ignoreCase: true, + want: "\"bs\"i", + }, + &litMatcher{ + pos: position{line: 66, col: 22, offset: 1281}, + val: "del", + ignoreCase: true, + want: "\"del\"i", + }, + &litMatcher{ + pos: position{line: 66, col: 31, offset: 1290}, + val: "enter", + ignoreCase: true, + want: "\"enter\"i", + }, + &litMatcher{ + pos: position{line: 66, col: 42, offset: 1301}, + val: "esc", + ignoreCase: true, + want: "\"esc\"i", + }, + &litMatcher{ + pos: position{line: 66, col: 51, offset: 1310}, + val: "f10", + ignoreCase: true, + want: "\"f10\"i", + }, + &litMatcher{ + pos: position{line: 66, col: 60, offset: 1319}, + val: "f11", + ignoreCase: true, + want: "\"f11\"i", + }, + &litMatcher{ + pos: position{line: 66, col: 69, offset: 1328}, + val: "f12", + ignoreCase: true, + want: "\"f12\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 11, offset: 1345}, + val: "f1", + ignoreCase: true, + want: "\"f1\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 19, offset: 1353}, + val: "f2", + ignoreCase: true, + want: "\"f2\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 27, offset: 1361}, + val: "f3", + ignoreCase: true, + want: "\"f3\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 35, offset: 1369}, + val: "f4", + ignoreCase: true, + want: "\"f4\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 43, offset: 1377}, + val: "f5", + ignoreCase: true, + want: "\"f5\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 51, offset: 1385}, + val: "f6", + ignoreCase: true, + want: "\"f6\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 59, offset: 1393}, + val: "f7", + ignoreCase: true, + want: "\"f7\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 67, offset: 1401}, + val: "f8", + ignoreCase: true, + want: "\"f8\"i", + }, + &litMatcher{ + pos: position{line: 67, col: 75, offset: 1409}, + val: "f9", + ignoreCase: true, + want: "\"f9\"i", + }, + &litMatcher{ + pos: position{line: 68, col: 12, offset: 1426}, + val: "return", + ignoreCase: true, + want: "\"return\"i", + }, + &litMatcher{ + pos: position{line: 68, col: 24, offset: 1438}, + val: "tab", + ignoreCase: true, + want: "\"tab\"i", + }, + &litMatcher{ + pos: position{line: 68, col: 33, offset: 1447}, + val: "up", + ignoreCase: true, + want: "\"up\"i", + }, + &litMatcher{ + pos: position{line: 68, col: 41, offset: 1455}, + val: "down", + ignoreCase: true, + want: "\"down\"i", + }, + &litMatcher{ + pos: position{line: 68, col: 51, offset: 1465}, + val: "spacebar", + ignoreCase: true, + want: "\"spacebar\"i", + }, + &litMatcher{ + pos: position{line: 68, col: 65, offset: 1479}, + val: "insert", + ignoreCase: true, + want: "\"insert\"i", + }, + &litMatcher{ + pos: position{line: 68, col: 77, offset: 1491}, + val: "home", + ignoreCase: true, + want: "\"home\"i", + }, + &litMatcher{ + pos: position{line: 69, col: 11, offset: 1509}, + val: "end", + ignoreCase: true, + want: "\"end\"i", + }, + &litMatcher{ + pos: position{line: 69, col: 20, offset: 1518}, + val: "pageup", + ignoreCase: true, + want: "\"pageUp\"i", + }, + &litMatcher{ + pos: position{line: 69, col: 32, offset: 1530}, + val: "pagedown", + ignoreCase: true, + want: "\"pageDown\"i", + }, + &litMatcher{ + pos: position{line: 69, col: 46, offset: 1544}, + val: "leftalt", + ignoreCase: true, + want: "\"leftAlt\"i", + }, + &litMatcher{ + pos: position{line: 69, col: 59, offset: 1557}, + val: "leftctrl", + ignoreCase: true, + want: "\"leftCtrl\"i", + }, + &litMatcher{ + pos: position{line: 69, col: 73, offset: 1571}, + val: "leftshift", + ignoreCase: true, + want: "\"leftShift\"i", + }, + &litMatcher{ + pos: position{line: 70, col: 11, offset: 1594}, + val: "rightalt", + ignoreCase: true, + want: "\"rightAlt\"i", + }, + &litMatcher{ + pos: position{line: 70, col: 25, offset: 1608}, + val: "rightctrl", + ignoreCase: true, + want: "\"rightCtrl\"i", + }, + &litMatcher{ + pos: position{line: 70, col: 40, offset: 1623}, + val: "rightshift", + ignoreCase: true, + want: "\"rightShift\"i", + }, + &litMatcher{ + pos: position{line: 70, col: 56, offset: 1639}, + val: "leftsuper", + ignoreCase: true, + want: "\"leftSuper\"i", + }, + &litMatcher{ + pos: position{line: 70, col: 71, offset: 1654}, + val: "rightsuper", + ignoreCase: true, + want: "\"rightSuper\"i", + }, + &litMatcher{ + pos: position{line: 71, col: 11, offset: 1678}, + val: "left", + ignoreCase: true, + want: "\"left\"i", + }, + &litMatcher{ + pos: position{line: 71, col: 21, offset: 1688}, + val: "right", + ignoreCase: true, + want: "\"right\"i", + }, + }, + }, + }, + { + name: "NonZeroDigit", + pos: position{line: 73, col: 1, offset: 1698}, + expr: &charClassMatcher{ + pos: position{line: 73, col: 16, offset: 1713}, + val: "[1-9]", + ranges: []rune{'1', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + { + name: "Digit", + pos: position{line: 74, col: 1, offset: 1719}, + expr: &charClassMatcher{ + pos: position{line: 74, col: 9, offset: 1727}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + { + name: "TimeUnit", + pos: position{line: 75, col: 1, offset: 1733}, + expr: &choiceExpr{ + pos: position{line: 75, col: 13, offset: 1745}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 75, col: 13, offset: 1745}, + val: "ns", + ignoreCase: false, + want: "\"ns\"", + }, + &litMatcher{ + pos: position{line: 75, col: 20, offset: 1752}, + val: "us", + ignoreCase: false, + want: "\"us\"", + }, + &litMatcher{ + pos: position{line: 75, col: 27, offset: 1759}, + val: "µs", + ignoreCase: false, + want: "\"µs\"", + }, + &litMatcher{ + pos: position{line: 75, col: 34, offset: 1767}, + val: "ms", + ignoreCase: false, + want: "\"ms\"", + }, + &litMatcher{ + pos: position{line: 75, col: 41, offset: 1774}, + val: "s", + ignoreCase: false, + want: "\"s\"", + }, + &litMatcher{ + pos: position{line: 75, col: 47, offset: 1780}, + val: "m", + ignoreCase: false, + want: "\"m\"", + }, + &litMatcher{ + pos: position{line: 75, col: 53, offset: 1786}, + val: "h", + ignoreCase: false, + want: "\"h\"", + }, + }, + }, + }, + { + name: "_", + displayName: "\"whitespace\"", + pos: position{line: 77, col: 1, offset: 1792}, + expr: &zeroOrMoreExpr{ + pos: position{line: 77, col: 19, offset: 1810}, + expr: &charClassMatcher{ + pos: position{line: 77, col: 19, offset: 1810}, + val: "[ \\n\\t\\r]", + chars: []rune{' ', '\n', '\t', '\r'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + { + name: "EOF", + pos: position{line: 79, col: 1, offset: 1822}, + expr: ¬Expr{ + pos: position{line: 79, col: 8, offset: 1829}, + expr: &anyMatcher{ + line: 79, col: 9, offset: 1830, + }, + }, + }, + }, +} + +func (c *current) onInput1(expr interface{}) (interface{}, error) { + return expr, nil +} + +func (p *parser) callonInput1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onInput1(stack["expr"]) +} + +func (c *current) onExpr1(l interface{}) (interface{}, error) { + return l, nil +} + +func (p *parser) callonExpr1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onExpr1(stack["l"]) +} + +func (c *current) onWait1(duration interface{}) (interface{}, error) { + var d time.Duration + switch t := duration.(type) { + case time.Duration: + d = t + case int64: + d = time.Duration(t) * time.Second + default: + d = time.Second + } + return &waitExpression{d}, nil +} + +func (p *parser) callonWait1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onWait1(stack["duration"]) +} + +func (c *current) onCharToggle1(lit, t interface{}) (interface{}, error) { + return &literal{lit.(*literal).s, t.(KeyAction)}, nil +} + +func (p *parser) callonCharToggle1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCharToggle1(stack["lit"], stack["t"]) +} + +func (c *current) onSpecial1(s, t interface{}) (interface{}, error) { + l := strings.ToLower(string(s.([]byte))) + if t == nil { + return &specialExpression{l, KeyPress}, nil + } + return &specialExpression{l, t.(KeyAction)}, nil +} + +func (p *parser) callonSpecial1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onSpecial1(stack["s"], stack["t"]) +} + +func (c *current) onNumber1() (interface{}, error) { + return string(c.text), nil +} + +func (p *parser) callonNumber1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onNumber1() +} + +func (c *current) onInteger3() (interface{}, error) { + return strconv.ParseInt(string(c.text), 10, 64) +} + +func (p *parser) callonInteger3() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onInteger3() +} + +func (c *current) onDuration1() (interface{}, error) { + return time.ParseDuration(string(c.text)) +} + +func (p *parser) callonDuration1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onDuration1() +} + +func (c *current) onOn1() (interface{}, error) { + return KeyOn, nil +} + +func (p *parser) callonOn1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onOn1() +} + +func (c *current) onOff1() (interface{}, error) { + return KeyOff, nil +} + +func (p *parser) callonOff1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onOff1() +} + +func (c *current) onLiteral1() (interface{}, error) { + r, _ := utf8.DecodeRune(c.text) + return &literal{r, KeyPress}, nil +} + +func (p *parser) callonLiteral1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onLiteral1() +} + +var ( + // errNoRule is returned when the grammar to parse has no rule. + errNoRule = errors.New("grammar has no rule") + + // errInvalidEntrypoint is returned when the specified entrypoint rule + // does not exit. + errInvalidEntrypoint = errors.New("invalid entrypoint") + + // errInvalidEncoding is returned when the source is not properly + // utf8-encoded. + errInvalidEncoding = errors.New("invalid encoding") + + // errMaxExprCnt is used to signal that the maximum number of + // expressions have been parsed. + errMaxExprCnt = errors.New("max number of expresssions parsed") +) + +// Option is a function that can set an option on the parser. It returns +// the previous setting as an Option. +type Option func(*parser) Option + +// MaxExpressions creates an Option to stop parsing after the provided +// number of expressions have been parsed, if the value is 0 then the parser will +// parse for as many steps as needed (possibly an infinite number). +// +// The default for maxExprCnt is 0. +func MaxExpressions(maxExprCnt uint64) Option { + return func(p *parser) Option { + oldMaxExprCnt := p.maxExprCnt + p.maxExprCnt = maxExprCnt + return MaxExpressions(oldMaxExprCnt) + } +} + +// Entrypoint creates an Option to set the rule name to use as entrypoint. +// The rule name must have been specified in the -alternate-entrypoints +// if generating the parser with the -optimize-grammar flag, otherwise +// it may have been optimized out. Passing an empty string sets the +// entrypoint to the first rule in the grammar. +// +// The default is to start parsing at the first rule in the grammar. +func Entrypoint(ruleName string) Option { + return func(p *parser) Option { + oldEntrypoint := p.entrypoint + p.entrypoint = ruleName + if ruleName == "" { + p.entrypoint = g.rules[0].name + } + return Entrypoint(oldEntrypoint) + } +} + +// Statistics adds a user provided Stats struct to the parser to allow +// the user to process the results after the parsing has finished. +// Also the key for the "no match" counter is set. +// +// Example usage: +// +// input := "input" +// stats := Stats{} +// _, err := Parse("input-file", []byte(input), Statistics(&stats, "no match")) +// if err != nil { +// log.Panicln(err) +// } +// b, err := json.MarshalIndent(stats.ChoiceAltCnt, "", " ") +// if err != nil { +// log.Panicln(err) +// } +// fmt.Println(string(b)) +// +func Statistics(stats *Stats, choiceNoMatch string) Option { + return func(p *parser) Option { + oldStats := p.Stats + p.Stats = stats + oldChoiceNoMatch := p.choiceNoMatch + p.choiceNoMatch = choiceNoMatch + if p.Stats.ChoiceAltCnt == nil { + p.Stats.ChoiceAltCnt = make(map[string]map[string]int) + } + return Statistics(oldStats, oldChoiceNoMatch) + } +} + +// Debug creates an Option to set the debug flag to b. When set to true, +// debugging information is printed to stdout while parsing. +// +// The default is false. +func Debug(b bool) Option { + return func(p *parser) Option { + old := p.debug + p.debug = b + return Debug(old) + } +} + +// Memoize creates an Option to set the memoize flag to b. When set to true, +// the parser will cache all results so each expression is evaluated only +// once. This guarantees linear parsing time even for pathological cases, +// at the expense of more memory and slower times for typical cases. +// +// The default is false. +func Memoize(b bool) Option { + return func(p *parser) Option { + old := p.memoize + p.memoize = b + return Memoize(old) + } +} + +// AllowInvalidUTF8 creates an Option to allow invalid UTF-8 bytes. +// Every invalid UTF-8 byte is treated as a utf8.RuneError (U+FFFD) +// by character class matchers and is matched by the any matcher. +// The returned matched value, c.text and c.offset are NOT affected. +// +// The default is false. +func AllowInvalidUTF8(b bool) Option { + return func(p *parser) Option { + old := p.allowInvalidUTF8 + p.allowInvalidUTF8 = b + return AllowInvalidUTF8(old) + } +} + +// Recover creates an Option to set the recover flag to b. When set to +// true, this causes the parser to recover from panics and convert it +// to an error. Setting it to false can be useful while debugging to +// access the full stack trace. +// +// The default is true. +func Recover(b bool) Option { + return func(p *parser) Option { + old := p.recover + p.recover = b + return Recover(old) + } +} + +// GlobalStore creates an Option to set a key to a certain value in +// the globalStore. +func GlobalStore(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.globalStore[key] + p.cur.globalStore[key] = value + return GlobalStore(key, old) + } +} + +// InitState creates an Option to set a key to a certain value in +// the global "state" store. +func InitState(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.state[key] + p.cur.state[key] = value + return InitState(key, old) + } +} + +// ParseFile parses the file identified by filename. +func ParseFile(filename string, opts ...Option) (i interface{}, err error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if closeErr := f.Close(); closeErr != nil { + err = closeErr + } + }() + return ParseReader(filename, f, opts...) +} + +// ParseReader parses the data from r using filename as information in the +// error messages. +func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return Parse(filename, b, opts...) +} + +// Parse parses the data from b using filename as information in the +// error messages. +func Parse(filename string, b []byte, opts ...Option) (interface{}, error) { + return newParser(filename, b, opts...).parse(g) +} + +// position records a position in the text. +type position struct { + line, col, offset int +} + +func (p position) String() string { + return strconv.Itoa(p.line) + ":" + strconv.Itoa(p.col) + " [" + strconv.Itoa(p.offset) + "]" +} + +// savepoint stores all state required to go back to this point in the +// parser. +type savepoint struct { + position + rn rune + w int +} + +type current struct { + pos position // start position of the match + text []byte // raw text of the match + + // state is a store for arbitrary key,value pairs that the user wants to be + // tied to the backtracking of the parser. + // This is always rolled back if a parsing rule fails. + state storeDict + + // globalStore is a general store for the user to store arbitrary key-value + // pairs that they need to manage and that they do not want tied to the + // backtracking of the parser. This is only modified by the user and never + // rolled back by the parser. It is always up to the user to keep this in a + // consistent state. + globalStore storeDict +} + +type storeDict map[string]interface{} + +// the AST types... + +type grammar struct { + pos position + rules []*rule +} + +type rule struct { + pos position + name string + displayName string + expr interface{} +} + +type choiceExpr struct { + pos position + alternatives []interface{} +} + +type actionExpr struct { + pos position + expr interface{} + run func(*parser) (interface{}, error) +} + +type recoveryExpr struct { + pos position + expr interface{} + recoverExpr interface{} + failureLabel []string +} + +type seqExpr struct { + pos position + exprs []interface{} +} + +type throwExpr struct { + pos position + label string +} + +type labeledExpr struct { + pos position + label string + expr interface{} +} + +type expr struct { + pos position + expr interface{} +} + +type andExpr expr +type notExpr expr +type zeroOrOneExpr expr +type zeroOrMoreExpr expr +type oneOrMoreExpr expr + +type ruleRefExpr struct { + pos position + name string +} + +type stateCodeExpr struct { + pos position + run func(*parser) error +} + +type andCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +type notCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +type litMatcher struct { + pos position + val string + ignoreCase bool + want string +} + +type charClassMatcher struct { + pos position + val string + basicLatinChars [128]bool + chars []rune + ranges []rune + classes []*unicode.RangeTable + ignoreCase bool + inverted bool +} + +type anyMatcher position + +// errList cumulates the errors found by the parser. +type errList []error + +func (e *errList) add(err error) { + *e = append(*e, err) +} + +func (e errList) err() error { + if len(e) == 0 { + return nil + } + e.dedupe() + return e +} + +func (e *errList) dedupe() { + var cleaned []error + set := make(map[string]bool) + for _, err := range *e { + if msg := err.Error(); !set[msg] { + set[msg] = true + cleaned = append(cleaned, err) + } + } + *e = cleaned +} + +func (e errList) Error() string { + switch len(e) { + case 0: + return "" + case 1: + return e[0].Error() + default: + var buf bytes.Buffer + + for i, err := range e { + if i > 0 { + buf.WriteRune('\n') + } + buf.WriteString(err.Error()) + } + return buf.String() + } +} + +// parserError wraps an error with a prefix indicating the rule in which +// the error occurred. The original error is stored in the Inner field. +type parserError struct { + Inner error + pos position + prefix string + expected []string +} + +// Error returns the error message. +func (p *parserError) Error() string { + return p.prefix + ": " + p.Inner.Error() +} + +// newParser creates a parser with the specified input source and options. +func newParser(filename string, b []byte, opts ...Option) *parser { + stats := Stats{ + ChoiceAltCnt: make(map[string]map[string]int), + } + + p := &parser{ + filename: filename, + errs: new(errList), + data: b, + pt: savepoint{position: position{line: 1}}, + recover: true, + cur: current{ + state: make(storeDict), + globalStore: make(storeDict), + }, + maxFailPos: position{col: 1, line: 1}, + maxFailExpected: make([]string, 0, 20), + Stats: &stats, + // start rule is rule [0] unless an alternate entrypoint is specified + entrypoint: g.rules[0].name, + } + p.setOptions(opts) + + if p.maxExprCnt == 0 { + p.maxExprCnt = math.MaxUint64 + } + + return p +} + +// setOptions applies the options to the parser. +func (p *parser) setOptions(opts []Option) { + for _, opt := range opts { + opt(p) + } +} + +type resultTuple struct { + v interface{} + b bool + end savepoint +} + +const choiceNoMatch = -1 + +// Stats stores some statistics, gathered during parsing +type Stats struct { + // ExprCnt counts the number of expressions processed during parsing + // This value is compared to the maximum number of expressions allowed + // (set by the MaxExpressions option). + ExprCnt uint64 + + // ChoiceAltCnt is used to count for each ordered choice expression, + // which alternative is used how may times. + // These numbers allow to optimize the order of the ordered choice expression + // to increase the performance of the parser + // + // The outer key of ChoiceAltCnt is composed of the name of the rule as well + // as the line and the column of the ordered choice. + // The inner key of ChoiceAltCnt is the number (one-based) of the matching alternative. + // For each alternative the number of matches are counted. If an ordered choice does not + // match, a special counter is incremented. The name of this counter is set with + // the parser option Statistics. + // For an alternative to be included in ChoiceAltCnt, it has to match at least once. + ChoiceAltCnt map[string]map[string]int +} + +type parser struct { + filename string + pt savepoint + cur current + + data []byte + errs *errList + + depth int + recover bool + debug bool + + memoize bool + // memoization table for the packrat algorithm: + // map[offset in source] map[expression or rule] {value, match} + memo map[int]map[interface{}]resultTuple + + // rules table, maps the rule identifier to the rule node + rules map[string]*rule + // variables stack, map of label to value + vstack []map[string]interface{} + // rule stack, allows identification of the current rule in errors + rstack []*rule + + // parse fail + maxFailPos position + maxFailExpected []string + maxFailInvertExpected bool + + // max number of expressions to be parsed + maxExprCnt uint64 + // entrypoint for the parser + entrypoint string + + allowInvalidUTF8 bool + + *Stats + + choiceNoMatch string + // recovery expression stack, keeps track of the currently available recovery expression, these are traversed in reverse + recoveryStack []map[string]interface{} +} + +// push a variable set on the vstack. +func (p *parser) pushV() { + if cap(p.vstack) == len(p.vstack) { + // create new empty slot in the stack + p.vstack = append(p.vstack, nil) + } else { + // slice to 1 more + p.vstack = p.vstack[:len(p.vstack)+1] + } + + // get the last args set + m := p.vstack[len(p.vstack)-1] + if m != nil && len(m) == 0 { + // empty map, all good + return + } + + m = make(map[string]interface{}) + p.vstack[len(p.vstack)-1] = m +} + +// pop a variable set from the vstack. +func (p *parser) popV() { + // if the map is not empty, clear it + m := p.vstack[len(p.vstack)-1] + if len(m) > 0 { + // GC that map + p.vstack[len(p.vstack)-1] = nil + } + p.vstack = p.vstack[:len(p.vstack)-1] +} + +// push a recovery expression with its labels to the recoveryStack +func (p *parser) pushRecovery(labels []string, expr interface{}) { + if cap(p.recoveryStack) == len(p.recoveryStack) { + // create new empty slot in the stack + p.recoveryStack = append(p.recoveryStack, nil) + } else { + // slice to 1 more + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)+1] + } + + m := make(map[string]interface{}, len(labels)) + for _, fl := range labels { + m[fl] = expr + } + p.recoveryStack[len(p.recoveryStack)-1] = m +} + +// pop a recovery expression from the recoveryStack +func (p *parser) popRecovery() { + // GC that map + p.recoveryStack[len(p.recoveryStack)-1] = nil + + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1] +} + +func (p *parser) print(prefix, s string) string { + if !p.debug { + return s + } + + fmt.Printf("%s %d:%d:%d: %s [%#U]\n", + prefix, p.pt.line, p.pt.col, p.pt.offset, s, p.pt.rn) + return s +} + +func (p *parser) in(s string) string { + p.depth++ + return p.print(strings.Repeat(" ", p.depth)+">", s) +} + +func (p *parser) out(s string) string { + p.depth-- + return p.print(strings.Repeat(" ", p.depth)+"<", s) +} + +func (p *parser) addErr(err error) { + p.addErrAt(err, p.pt.position, []string{}) +} + +func (p *parser) addErrAt(err error, pos position, expected []string) { + var buf bytes.Buffer + if p.filename != "" { + buf.WriteString(p.filename) + } + if buf.Len() > 0 { + buf.WriteString(":") + } + buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset)) + if len(p.rstack) > 0 { + if buf.Len() > 0 { + buf.WriteString(": ") + } + rule := p.rstack[len(p.rstack)-1] + if rule.displayName != "" { + buf.WriteString("rule " + rule.displayName) + } else { + buf.WriteString("rule " + rule.name) + } + } + pe := &parserError{Inner: err, pos: pos, prefix: buf.String(), expected: expected} + p.errs.add(pe) +} + +func (p *parser) failAt(fail bool, pos position, want string) { + // process fail if parsing fails and not inverted or parsing succeeds and invert is set + if fail == p.maxFailInvertExpected { + if pos.offset < p.maxFailPos.offset { + return + } + + if pos.offset > p.maxFailPos.offset { + p.maxFailPos = pos + p.maxFailExpected = p.maxFailExpected[:0] + } + + if p.maxFailInvertExpected { + want = "!" + want + } + p.maxFailExpected = append(p.maxFailExpected, want) + } +} + +// read advances the parser to the next rune. +func (p *parser) read() { + p.pt.offset += p.pt.w + rn, n := utf8.DecodeRune(p.data[p.pt.offset:]) + p.pt.rn = rn + p.pt.w = n + p.pt.col++ + if rn == '\n' { + p.pt.line++ + p.pt.col = 0 + } + + if rn == utf8.RuneError && n == 1 { // see utf8.DecodeRune + if !p.allowInvalidUTF8 { + p.addErr(errInvalidEncoding) + } + } +} + +// restore parser position to the savepoint pt. +func (p *parser) restore(pt savepoint) { + if p.debug { + defer p.out(p.in("restore")) + } + if pt.offset == p.pt.offset { + return + } + p.pt = pt +} + +// Cloner is implemented by any value that has a Clone method, which returns a +// copy of the value. This is mainly used for types which are not passed by +// value (e.g map, slice, chan) or structs that contain such types. +// +// This is used in conjunction with the global state feature to create proper +// copies of the state to allow the parser to properly restore the state in +// the case of backtracking. +type Cloner interface { + Clone() interface{} +} + +var statePool = &sync.Pool{ + New: func() interface{} { return make(storeDict) }, +} + +func (sd storeDict) Discard() { + for k := range sd { + delete(sd, k) + } + statePool.Put(sd) +} + +// clone and return parser current state. +func (p *parser) cloneState() storeDict { + if p.debug { + defer p.out(p.in("cloneState")) + } + + state := statePool.Get().(storeDict) + for k, v := range p.cur.state { + if c, ok := v.(Cloner); ok { + state[k] = c.Clone() + } else { + state[k] = v + } + } + return state +} + +// restore parser current state to the state storeDict. +// every restoreState should applied only one time for every cloned state +func (p *parser) restoreState(state storeDict) { + if p.debug { + defer p.out(p.in("restoreState")) + } + p.cur.state.Discard() + p.cur.state = state +} + +// get the slice of bytes from the savepoint start to the current position. +func (p *parser) sliceFrom(start savepoint) []byte { + return p.data[start.position.offset:p.pt.position.offset] +} + +func (p *parser) getMemoized(node interface{}) (resultTuple, bool) { + if len(p.memo) == 0 { + return resultTuple{}, false + } + m := p.memo[p.pt.offset] + if len(m) == 0 { + return resultTuple{}, false + } + res, ok := m[node] + return res, ok +} + +func (p *parser) setMemoized(pt savepoint, node interface{}, tuple resultTuple) { + if p.memo == nil { + p.memo = make(map[int]map[interface{}]resultTuple) + } + m := p.memo[pt.offset] + if m == nil { + m = make(map[interface{}]resultTuple) + p.memo[pt.offset] = m + } + m[node] = tuple +} + +func (p *parser) buildRulesTable(g *grammar) { + p.rules = make(map[string]*rule, len(g.rules)) + for _, r := range g.rules { + p.rules[r.name] = r + } +} + +func (p *parser) parse(g *grammar) (val interface{}, err error) { + if len(g.rules) == 0 { + p.addErr(errNoRule) + return nil, p.errs.err() + } + + // TODO : not super critical but this could be generated + p.buildRulesTable(g) + + if p.recover { + // panic can be used in action code to stop parsing immediately + // and return the panic as an error. + defer func() { + if e := recover(); e != nil { + if p.debug { + defer p.out(p.in("panic handler")) + } + val = nil + switch e := e.(type) { + case error: + p.addErr(e) + default: + p.addErr(fmt.Errorf("%v", e)) + } + err = p.errs.err() + } + }() + } + + startRule, ok := p.rules[p.entrypoint] + if !ok { + p.addErr(errInvalidEntrypoint) + return nil, p.errs.err() + } + + p.read() // advance to first rune + val, ok = p.parseRule(startRule) + if !ok { + if len(*p.errs) == 0 { + // If parsing fails, but no errors have been recorded, the expected values + // for the farthest parser position are returned as error. + maxFailExpectedMap := make(map[string]struct{}, len(p.maxFailExpected)) + for _, v := range p.maxFailExpected { + maxFailExpectedMap[v] = struct{}{} + } + expected := make([]string, 0, len(maxFailExpectedMap)) + eof := false + if _, ok := maxFailExpectedMap["!."]; ok { + delete(maxFailExpectedMap, "!.") + eof = true + } + for k := range maxFailExpectedMap { + expected = append(expected, k) + } + sort.Strings(expected) + if eof { + expected = append(expected, "EOF") + } + p.addErrAt(errors.New("no match found, expected: "+listJoin(expected, ", ", "or")), p.maxFailPos, expected) + } + + return nil, p.errs.err() + } + return val, p.errs.err() +} + +func listJoin(list []string, sep string, lastSep string) string { + switch len(list) { + case 0: + return "" + case 1: + return list[0] + default: + return strings.Join(list[:len(list)-1], sep) + " " + lastSep + " " + list[len(list)-1] + } +} + +func (p *parser) parseRule(rule *rule) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseRule " + rule.name)) + } + + if p.memoize { + res, ok := p.getMemoized(rule) + if ok { + p.restore(res.end) + return res.v, res.b + } + } + + start := p.pt + p.rstack = append(p.rstack, rule) + p.pushV() + val, ok := p.parseExpr(rule.expr) + p.popV() + p.rstack = p.rstack[:len(p.rstack)-1] + if ok && p.debug { + p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) + } + + if p.memoize { + p.setMemoized(start, rule, resultTuple{val, ok, p.pt}) + } + return val, ok +} + +func (p *parser) parseExpr(expr interface{}) (interface{}, bool) { + var pt savepoint + + if p.memoize { + res, ok := p.getMemoized(expr) + if ok { + p.restore(res.end) + return res.v, res.b + } + pt = p.pt + } + + p.ExprCnt++ + if p.ExprCnt > p.maxExprCnt { + panic(errMaxExprCnt) + } + + var val interface{} + var ok bool + switch expr := expr.(type) { + case *actionExpr: + val, ok = p.parseActionExpr(expr) + case *andCodeExpr: + val, ok = p.parseAndCodeExpr(expr) + case *andExpr: + val, ok = p.parseAndExpr(expr) + case *anyMatcher: + val, ok = p.parseAnyMatcher(expr) + case *charClassMatcher: + val, ok = p.parseCharClassMatcher(expr) + case *choiceExpr: + val, ok = p.parseChoiceExpr(expr) + case *labeledExpr: + val, ok = p.parseLabeledExpr(expr) + case *litMatcher: + val, ok = p.parseLitMatcher(expr) + case *notCodeExpr: + val, ok = p.parseNotCodeExpr(expr) + case *notExpr: + val, ok = p.parseNotExpr(expr) + case *oneOrMoreExpr: + val, ok = p.parseOneOrMoreExpr(expr) + case *recoveryExpr: + val, ok = p.parseRecoveryExpr(expr) + case *ruleRefExpr: + val, ok = p.parseRuleRefExpr(expr) + case *seqExpr: + val, ok = p.parseSeqExpr(expr) + case *stateCodeExpr: + val, ok = p.parseStateCodeExpr(expr) + case *throwExpr: + val, ok = p.parseThrowExpr(expr) + case *zeroOrMoreExpr: + val, ok = p.parseZeroOrMoreExpr(expr) + case *zeroOrOneExpr: + val, ok = p.parseZeroOrOneExpr(expr) + default: + panic(fmt.Sprintf("unknown expression type %T", expr)) + } + if p.memoize { + p.setMemoized(pt, expr, resultTuple{val, ok, p.pt}) + } + return val, ok +} + +func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseActionExpr")) + } + + start := p.pt + val, ok := p.parseExpr(act.expr) + if ok { + p.cur.pos = start.position + p.cur.text = p.sliceFrom(start) + state := p.cloneState() + actVal, err := act.run(p) + if err != nil { + p.addErrAt(err, start.position, []string{}) + } + p.restoreState(state) + + val = actVal + } + if ok && p.debug { + p.print(strings.Repeat(" ", p.depth)+"MATCH", string(p.sliceFrom(start))) + } + return val, ok +} + +func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseAndCodeExpr")) + } + + state := p.cloneState() + + ok, err := and.run(p) + if err != nil { + p.addErr(err) + } + p.restoreState(state) + + return nil, ok +} + +func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseAndExpr")) + } + + pt := p.pt + state := p.cloneState() + p.pushV() + _, ok := p.parseExpr(and.expr) + p.popV() + p.restoreState(state) + p.restore(pt) + + return nil, ok +} + +func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseAnyMatcher")) + } + + if p.pt.rn == utf8.RuneError && p.pt.w == 0 { + // EOF - see utf8.DecodeRune + p.failAt(false, p.pt.position, ".") + return nil, false + } + start := p.pt + p.read() + p.failAt(true, start.position, ".") + return p.sliceFrom(start), true +} + +func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseCharClassMatcher")) + } + + cur := p.pt.rn + start := p.pt + + // can't match EOF + if cur == utf8.RuneError && p.pt.w == 0 { // see utf8.DecodeRune + p.failAt(false, start.position, chr.val) + return nil, false + } + + if chr.ignoreCase { + cur = unicode.ToLower(cur) + } + + // try to match in the list of available chars + for _, rn := range chr.chars { + if rn == cur { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of ranges + for i := 0; i < len(chr.ranges); i += 2 { + if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of Unicode classes + for _, cl := range chr.classes { + if unicode.Is(cl, cur) { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + if chr.inverted { + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + p.failAt(false, start.position, chr.val) + return nil, false +} + +func (p *parser) incChoiceAltCnt(ch *choiceExpr, altI int) { + choiceIdent := fmt.Sprintf("%s %d:%d", p.rstack[len(p.rstack)-1].name, ch.pos.line, ch.pos.col) + m := p.ChoiceAltCnt[choiceIdent] + if m == nil { + m = make(map[string]int) + p.ChoiceAltCnt[choiceIdent] = m + } + // We increment altI by 1, so the keys do not start at 0 + alt := strconv.Itoa(altI + 1) + if altI == choiceNoMatch { + alt = p.choiceNoMatch + } + m[alt]++ +} + +func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseChoiceExpr")) + } + + for altI, alt := range ch.alternatives { + // dummy assignment to prevent compile error if optimized + _ = altI + + state := p.cloneState() + + p.pushV() + val, ok := p.parseExpr(alt) + p.popV() + if ok { + p.incChoiceAltCnt(ch, altI) + return val, ok + } + p.restoreState(state) + } + p.incChoiceAltCnt(ch, choiceNoMatch) + return nil, false +} + +func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseLabeledExpr")) + } + + p.pushV() + val, ok := p.parseExpr(lab.expr) + p.popV() + if ok && lab.label != "" { + m := p.vstack[len(p.vstack)-1] + m[lab.label] = val + } + return val, ok +} + +func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseLitMatcher")) + } + + start := p.pt + for _, want := range lit.val { + cur := p.pt.rn + if lit.ignoreCase { + cur = unicode.ToLower(cur) + } + if cur != want { + p.failAt(false, start.position, lit.want) + p.restore(start) + return nil, false + } + p.read() + } + p.failAt(true, start.position, lit.want) + return p.sliceFrom(start), true +} + +func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseNotCodeExpr")) + } + + state := p.cloneState() + + ok, err := not.run(p) + if err != nil { + p.addErr(err) + } + p.restoreState(state) + + return nil, !ok +} + +func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseNotExpr")) + } + + pt := p.pt + state := p.cloneState() + p.pushV() + p.maxFailInvertExpected = !p.maxFailInvertExpected + _, ok := p.parseExpr(not.expr) + p.maxFailInvertExpected = !p.maxFailInvertExpected + p.popV() + p.restoreState(state) + p.restore(pt) + + return nil, !ok +} + +func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseOneOrMoreExpr")) + } + + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + if len(vals) == 0 { + // did not match once, no match + return nil, false + } + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseRecoveryExpr (" + strings.Join(recover.failureLabel, ",") + ")")) + } + + p.pushRecovery(recover.failureLabel, recover.recoverExpr) + val, ok := p.parseExpr(recover.expr) + p.popRecovery() + + return val, ok +} + +func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseRuleRefExpr " + ref.name)) + } + + if ref.name == "" { + panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos)) + } + + rule := p.rules[ref.name] + if rule == nil { + p.addErr(fmt.Errorf("undefined rule: %s", ref.name)) + return nil, false + } + return p.parseRule(rule) +} + +func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseSeqExpr")) + } + + vals := make([]interface{}, 0, len(seq.exprs)) + + pt := p.pt + state := p.cloneState() + for _, expr := range seq.exprs { + val, ok := p.parseExpr(expr) + if !ok { + p.restoreState(state) + p.restore(pt) + return nil, false + } + vals = append(vals, val) + } + return vals, true +} + +func (p *parser) parseStateCodeExpr(state *stateCodeExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseStateCodeExpr")) + } + + err := state.run(p) + if err != nil { + p.addErr(err) + } + return nil, true +} + +func (p *parser) parseThrowExpr(expr *throwExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseThrowExpr")) + } + + for i := len(p.recoveryStack) - 1; i >= 0; i-- { + if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok { + if val, ok := p.parseExpr(recoverExpr); ok { + return val, ok + } + } + } + + return nil, false +} + +func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseZeroOrMoreExpr")) + } + + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) { + if p.debug { + defer p.out(p.in("parseZeroOrOneExpr")) + } + + p.pushV() + val, _ := p.parseExpr(expr.expr) + p.popV() + // whether it matched or not, consider it a match + return val, true +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.pigeon b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.pigeon new file mode 100644 index 000000000..fdfbba0cf --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.pigeon @@ -0,0 +1,79 @@ +{ +package bootcommand + +} + +Input <- expr:Expr EOF { + return expr, nil +} + +Expr <- l:( Wait / CharToggle / Special / Literal)+ { + return l, nil +} + +Wait = ExprStart "wait" duration:( Duration / Integer )? ExprEnd { + var d time.Duration + switch t := duration.(type) { + case time.Duration: + d = t + case int64: + d = time.Duration(t) * time.Second + default: + d = time.Second + } + return &waitExpression{d}, nil +} + +CharToggle = ExprStart lit:(Literal) t:(On / Off) ExprEnd { + return &literal{lit.(*literal).s, t.(KeyAction)}, nil +} + +Special = ExprStart s:(SpecialKey) t:(On / Off)? ExprEnd { + l := strings.ToLower(string(s.([]byte))) + if t == nil { + return &specialExpression{l, KeyPress}, nil + } + return &specialExpression{l, t.(KeyAction)}, nil +} + +Number = '-'? Integer ( '.' Digit+ )? { + return string(c.text), nil +} + +Integer = '0' / NonZeroDigit Digit* { + return strconv.ParseInt(string(c.text), 10, 64) +} + +Duration = ( Number TimeUnit )+ { + return time.ParseDuration(string(c.text)) +} + +On = "on"i { + return KeyOn, nil +} + +Off = "off"i { + return KeyOff, nil +} + +Literal = . { + r, _ := utf8.DecodeRune(c.text) + return &literal{r, KeyPress}, nil +} + +ExprEnd = ">" +ExprStart = "<" +SpecialKey = "bs"i / "del"i / "enter"i / "esc"i / "f10"i / "f11"i / "f12"i + / "f1"i / "f2"i / "f3"i / "f4"i / "f5"i / "f6"i / "f7"i / "f8"i / "f9"i + / "return"i / "tab"i / "up"i / "down"i / "spacebar"i / "insert"i / "home"i + / "end"i / "pageUp"i / "pageDown"i / "leftAlt"i / "leftCtrl"i / "leftShift"i + / "rightAlt"i / "rightCtrl"i / "rightShift"i / "leftSuper"i / "rightSuper"i + / "left"i / "right"i + +NonZeroDigit = [1-9] +Digit = [0-9] +TimeUnit = ("ns" / "us" / "µs" / "ms" / "s" / "m" / "h") + +_ "whitespace" <- [ \n\t\r]* + +EOF <- !. diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command_ast.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command_ast.go new file mode 100644 index 000000000..f680345c4 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command_ast.go @@ -0,0 +1,157 @@ +package bootcommand + +import ( + "context" + "fmt" + "log" + "strings" + "time" +) + +// KeysAction represents what we want to do with a key press. +// It can take 3 states. We either want to: +// * press the key once +// * press and hold +// * press and release +type KeyAction int + +const ( + KeyOn KeyAction = 1 << iota + KeyOff + KeyPress +) + +func (k KeyAction) String() string { + switch k { + case KeyOn: + return "On" + case KeyOff: + return "Off" + case KeyPress: + return "Press" + } + panic(fmt.Sprintf("Unknwon KeyAction %d", k)) +} + +type expression interface { + // Do executes the expression + Do(context.Context, BCDriver) error + // Validate validates the expression without executing it + Validate() error +} + +type expressionSequence []expression + +// Do executes every expression in the sequence and then flushes remaining +// scancodes. +func (s expressionSequence) Do(ctx context.Context, b BCDriver) error { + // validate should never fail here, since it should be called before + // expressionSequence.Do. Only reason we don't panic is so we can clean up. + if errs := s.Validate(); errs != nil { + return fmt.Errorf("Found an invalid boot command. This is likely an error in Packer, so please open a ticket.") + } + + for _, exp := range s { + if err := ctx.Err(); err != nil { + return err + } + if err := exp.Do(ctx, b); err != nil { + return err + } + } + return b.Flush() +} + +// Validate tells us if every expression in the sequence is valid. +func (s expressionSequence) Validate() (errs []error) { + for _, exp := range s { + if err := exp.Validate(); err != nil { + errs = append(errs, err) + } + } + return +} + +// GenerateExpressionSequence generates a sequence of expressions from the +// given command. This is the primary entry point to the boot command parser. +func GenerateExpressionSequence(command string) (expressionSequence, error) { + seq := expressionSequence{} + if command == "" { + return seq, nil + } + got, err := ParseReader("", strings.NewReader(command)) + if err != nil { + return nil, err + } + for _, exp := range got.([]interface{}) { + seq = append(seq, exp.(expression)) + } + return seq, nil +} + +type waitExpression struct { + d time.Duration +} + +// Do waits the amount of time described by the expression. It is cancellable +// through the context. +func (w *waitExpression) Do(ctx context.Context, driver BCDriver) error { + driver.Flush() + log.Printf("[INFO] Waiting %s", w.d) + select { + case <-time.After(w.d): + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// Validate returns an error if the time is <= 0 +func (w *waitExpression) Validate() error { + if w.d <= 0 { + return fmt.Errorf("Expecting a positive wait value. Got %s", w.d) + } + return nil +} + +func (w *waitExpression) String() string { + return fmt.Sprintf("Wait<%s>", w.d) +} + +type specialExpression struct { + s string + action KeyAction +} + +// Do sends the special command to the driver, along with the key action. +func (s *specialExpression) Do(ctx context.Context, driver BCDriver) error { + return driver.SendSpecial(s.s, s.action) +} + +// Validate always passes +func (s *specialExpression) Validate() error { + return nil +} + +func (s *specialExpression) String() string { + return fmt.Sprintf("Spec-%s(%s)", s.action, s.s) +} + +type literal struct { + s rune + action KeyAction +} + +// Do sends the key to the driver, along with the key action. +func (l *literal) Do(ctx context.Context, driver BCDriver) error { + return driver.SendKey(l.s, l.action) +} + +// Validate always passes +func (l *literal) Validate() error { + return nil +} + +func (l *literal) String() string { + return fmt.Sprintf("LIT-%s(%s)", l.action, string(l.s)) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/config.go new file mode 100644 index 000000000..e8f650822 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/config.go @@ -0,0 +1,215 @@ +//go:generate struct-markdown + +package bootcommand + +import ( + "fmt" + "strings" + "time" + + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" +) + +// PackerKeyEnv is used to specify the key interval (delay) between keystrokes +// sent to the VM, typically in boot commands. This is to prevent host CPU +// utilization from causing key presses to be skipped or repeated incorrectly. +const PackerKeyEnv = "PACKER_KEY_INTERVAL" + +// PackerKeyDefault 100ms is appropriate for shared build infrastructure while a +// shorter delay (e.g. 10ms) can be used on a workstation. See PackerKeyEnv. +const PackerKeyDefault = 100 * time.Millisecond + +// The boot configuration is very important: `boot_command` specifies the keys +// to type when the virtual machine is first booted in order to start the OS +// installer. This command is typed after boot_wait, which gives the virtual +// machine some time to actually load. +// +// The boot_command is an array of strings. The strings are all typed in +// sequence. It is an array only to improve readability within the template. +// +// There are a set of special keys available. If these are in your boot +// command, they will be replaced by the proper key: +// +// - `` - Backspace +// +// - `` - Delete +// +// - ` ` - Simulates an actual "enter" or "return" keypress. +// +// - `` - Simulates pressing the escape key. +// +// - `` - Simulates pressing the tab key. +// +// - ` - ` - Simulates pressing a function key. +// +// - ` ` - Simulates pressing an arrow key. +// +// - `` - Simulates pressing the spacebar. +// +// - `` - Simulates pressing the insert key. +// +// - ` ` - Simulates pressing the home and end keys. +// +// - ` ` - Simulates pressing the page up and page down +// keys. +// +// - `` - Simulates pressing the Menu key. +// +// - ` ` - Simulates pressing the alt key. +// +// - ` ` - Simulates pressing the ctrl key. +// +// - ` ` - Simulates pressing the shift key. +// +// - ` ` - Simulates pressing the ⌘ or Windows key. +// +// - ` ` - Adds a 1, 5 or 10 second pause before +// sending any additional keys. This is useful if you have to generally +// wait for the UI to update before typing more. +// +// - `` - Add an arbitrary pause before sending any additional keys. +// The format of `XX` is a sequence of positive decimal numbers, each with +// optional fraction and a unit suffix, such as `300ms`, `1.5h` or `2h45m`. +// Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For +// example `` or ``. +// +// - ` ` - Any printable keyboard character, and of these +// "special" expressions, with the exception of the `` types, can +// also be toggled on or off. For example, to simulate ctrl+c, use +// `c`. Be sure to release them, otherwise they +// will be held down until the machine reboots. To hold the `c` key down, +// you would use ``. Likewise, `` to release. +// +// - `{{ .HTTPIP }} {{ .HTTPPort }}` - The IP and port, respectively of an +// HTTP server that is started serving the directory specified by the +// `http_directory` configuration parameter. If `http_directory` isn't +// specified, these will be blank! +// +// - `{{ .Name }}` - The name of the VM. +// +// Example boot command. This is actually a working boot command used to start an +// CentOS 6.4 installer: +// +// In JSON: +// +// ```json +// "boot_command": [ +// "", +// " ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/centos6-ks.cfg" +// ] +// ``` +// +// In HCL2: +// +// ```hcl +// boot_command = [ +// "", +// " ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/centos6-ks.cfg" +// ] +// ``` +// +// The example shown below is a working boot command used to start an Ubuntu +// 12.04 installer: +// +// In JSON: +// +// ```json +// "boot_command": [ +// "", +// "/install/vmlinuz noapic ", +// "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ", +// "debian-installer=en_US auto locale=en_US kbd-chooser/method=us ", +// "hostname={{ .Name }} ", +// "fb=false debconf/frontend=noninteractive ", +// "keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ", +// "keyboard-configuration/variant=USA console-setup/ask_detect=false ", +// "initrd=/install/initrd.gz -- " +// ] +// ``` +// +// In HCL2: +// +// ```hcl +// boot_command = [ +// "", +// "/install/vmlinuz noapic ", +// "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ", +// "debian-installer=en_US auto locale=en_US kbd-chooser/method=us ", +// "hostname={{ .Name }} ", +// "fb=false debconf/frontend=noninteractive ", +// "keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ", +// "keyboard-configuration/variant=USA console-setup/ask_detect=false ", +// "initrd=/install/initrd.gz -- " +// ] +// ``` +// +// For more examples of various boot commands, see the sample projects from our +// [community templates page](/community-tools#templates). +type BootConfig struct { + // Time to wait after sending a group of key pressses. The value of this + // should be a duration. Examples are `5s` and `1m30s` which will cause + // Packer to wait five seconds and one minute 30 seconds, respectively. If + // this isn't specified, a sensible default value is picked depending on + // the builder type. + BootGroupInterval time.Duration `mapstructure:"boot_keygroup_interval"` + // The time to wait after booting the initial virtual machine before typing + // the `boot_command`. The value of this should be a duration. Examples are + // `5s` and `1m30s` which will cause Packer to wait five seconds and one + // minute 30 seconds, respectively. If this isn't specified, the default is + // `10s` or 10 seconds. To set boot_wait to 0s, use a negative number, such + // as "-1s" + BootWait time.Duration `mapstructure:"boot_wait"` + // This is an array of commands to type when the virtual machine is first + // booted. The goal of these commands should be to type just enough to + // initialize the operating system installer. Special keys can be typed as + // well, and are covered in the section below on the boot command. If this + // is not specified, it is assumed the installer will start itself. + BootCommand []string `mapstructure:"boot_command"` +} + +// The boot command "typed" character for character over a VNC connection to +// the machine, simulating a human actually typing the keyboard. +// +// Keystrokes are typed as separate key up/down events over VNC with a default +// 100ms delay. The delay alleviates issues with latency and CPU contention. +// You can tune this delay on a per-builder basis by specifying +// "boot_key_interval" in your Packer template. +type VNCConfig struct { + BootConfig `mapstructure:",squash"` + // Whether to create a VNC connection or not. A boot_command cannot be used + // when this is true. Defaults to false. + DisableVNC bool `mapstructure:"disable_vnc"` + // Time in ms to wait between each key press + BootKeyInterval time.Duration `mapstructure:"boot_key_interval"` +} + +func (c *BootConfig) Prepare(ctx *interpolate.Context) (errs []error) { + if c.BootWait == 0 { + c.BootWait = 10 * time.Second + } + + if c.BootCommand != nil { + expSeq, err := GenerateExpressionSequence(c.FlatBootCommand()) + if err != nil { + errs = append(errs, err) + } else if vErrs := expSeq.Validate(); vErrs != nil { + errs = append(errs, vErrs...) + } + } + + return +} + +func (c *BootConfig) FlatBootCommand() string { + return strings.Join(c.BootCommand, "") +} + +func (c *VNCConfig) Prepare(ctx *interpolate.Context) (errs []error) { + if len(c.BootCommand) > 0 && c.DisableVNC { + errs = append(errs, + fmt.Errorf("A boot command cannot be used when vnc is disabled.")) + } + + errs = append(errs, c.BootConfig.Prepare(ctx)...) + return +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/doc.go new file mode 100644 index 000000000..2bfa3d8bd --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/doc.go @@ -0,0 +1,10 @@ +// Package bootcommand generates and sends boot commands to the remote instance. +// +// This package is relevant to people who want to create new builders, particularly +// builders with the capacity to build a VM from an iso. +// +// You can choose between three different drivers to send the command: a vnc +// driver, a usb driver, and a PX-XT keyboard driver. The driver you choose will +// depend on what kind of keyboard codes your hypervisor expects, and how you want +// to implement the connection. +package bootcommand diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/driver.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/driver.go new file mode 100644 index 000000000..04b0eecd6 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/driver.go @@ -0,0 +1,11 @@ +package bootcommand + +const shiftedChars = "~!@#$%^&*()_+{}|:\"<>?" + +// BCDriver is our access to the VM we want to type boot commands to +type BCDriver interface { + SendKey(key rune, action KeyAction) error + SendSpecial(special string, action KeyAction) error + // Flush will be called when we want to send scancodes to the VM. + Flush() error +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/gen.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/gen.go new file mode 100644 index 000000000..f4e3fa964 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/gen.go @@ -0,0 +1,3 @@ +//go:generate pigeon -o boot_command.go boot_command.pigeon + +package bootcommand diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/pc_xt_driver.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/pc_xt_driver.go new file mode 100644 index 000000000..93ab9f21f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/pc_xt_driver.go @@ -0,0 +1,213 @@ +package bootcommand + +import ( + "fmt" + "log" + "os" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +// SendCodeFunc will be called to send codes to the VM +type SendCodeFunc func([]string) error +type scMap map[string]*scancode + +type pcXTDriver struct { + interval time.Duration + sendImpl SendCodeFunc + specialMap scMap + scancodeMap map[rune]byte + buffer [][]string + // TODO: set from env + scancodeChunkSize int +} + +type scancode struct { + make []string + break_ []string +} + +func (sc *scancode) makeBreak() []string { + return append(sc.make, sc.break_...) +} + +// NewPCXTDriver creates a new boot command driver for VMs that expect PC-XT +// keyboard codes. `send` should send its argument to the VM. `chunkSize` should +// be the maximum number of keyboard codes to send to `send` at one time. +func NewPCXTDriver(send SendCodeFunc, chunkSize int, interval time.Duration) *pcXTDriver { + // We delay (default 100ms) between each input event to allow for CPU or + // network latency. See PackerKeyEnv for tuning. + keyInterval := PackerKeyDefault + if delay, err := time.ParseDuration(os.Getenv(PackerKeyEnv)); err == nil { + keyInterval = delay + } + // Override interval based on builder-specific override + if interval > time.Duration(0) { + keyInterval = interval + } + // Scancodes reference: https://www.win.tue.nl/~aeb/linux/kbd/scancodes-1.html + // https://www.win.tue.nl/~aeb/linux/kbd/scancodes-10.html + // + // Scancodes are recorded here in pairs. The first entry represents + // the key press and the second entry represents the key release and is + // derived from the first by the addition of 0x80. + sMap := make(scMap) + sMap["bs"] = &scancode{[]string{"0e"}, []string{"8e"}} + sMap["del"] = &scancode{[]string{"e0", "53"}, []string{"e0", "d3"}} + sMap["down"] = &scancode{[]string{"e0", "50"}, []string{"e0", "d0"}} + sMap["end"] = &scancode{[]string{"e0", "4f"}, []string{"e0", "cf"}} + sMap["enter"] = &scancode{[]string{"1c"}, []string{"9c"}} + sMap["esc"] = &scancode{[]string{"01"}, []string{"81"}} + sMap["f1"] = &scancode{[]string{"3b"}, []string{"bb"}} + sMap["f2"] = &scancode{[]string{"3c"}, []string{"bc"}} + sMap["f3"] = &scancode{[]string{"3d"}, []string{"bd"}} + sMap["f4"] = &scancode{[]string{"3e"}, []string{"be"}} + sMap["f5"] = &scancode{[]string{"3f"}, []string{"bf"}} + sMap["f6"] = &scancode{[]string{"40"}, []string{"c0"}} + sMap["f7"] = &scancode{[]string{"41"}, []string{"c1"}} + sMap["f8"] = &scancode{[]string{"42"}, []string{"c2"}} + sMap["f9"] = &scancode{[]string{"43"}, []string{"c3"}} + sMap["f10"] = &scancode{[]string{"44"}, []string{"c4"}} + sMap["f11"] = &scancode{[]string{"57"}, []string{"d7"}} + sMap["f12"] = &scancode{[]string{"58"}, []string{"d8"}} + sMap["home"] = &scancode{[]string{"e0", "47"}, []string{"e0", "c7"}} + sMap["insert"] = &scancode{[]string{"e0", "52"}, []string{"e0", "d2"}} + sMap["left"] = &scancode{[]string{"e0", "4b"}, []string{"e0", "cb"}} + sMap["leftalt"] = &scancode{[]string{"38"}, []string{"b8"}} + sMap["leftctrl"] = &scancode{[]string{"1d"}, []string{"9d"}} + sMap["leftshift"] = &scancode{[]string{"2a"}, []string{"aa"}} + sMap["leftsuper"] = &scancode{[]string{"e0", "5b"}, []string{"e0", "db"}} + sMap["menu"] = &scancode{[]string{"e0", "5d"}, []string{"e0", "dd"}} + sMap["pagedown"] = &scancode{[]string{"e0", "51"}, []string{"e0", "d1"}} + sMap["pageup"] = &scancode{[]string{"e0", "49"}, []string{"e0", "c9"}} + sMap["return"] = &scancode{[]string{"1c"}, []string{"9c"}} + sMap["right"] = &scancode{[]string{"e0", "4d"}, []string{"e0", "cd"}} + sMap["rightalt"] = &scancode{[]string{"e0", "38"}, []string{"e0", "b8"}} + sMap["rightctrl"] = &scancode{[]string{"e0", "1d"}, []string{"e0", "9d"}} + sMap["rightshift"] = &scancode{[]string{"36"}, []string{"b6"}} + sMap["rightsuper"] = &scancode{[]string{"e0", "5c"}, []string{"e0", "dc"}} + sMap["spacebar"] = &scancode{[]string{"39"}, []string{"b9"}} + sMap["tab"] = &scancode{[]string{"0f"}, []string{"8f"}} + sMap["up"] = &scancode{[]string{"e0", "48"}, []string{"e0", "c8"}} + + scancodeIndex := make(map[string]byte) + scancodeIndex["1234567890-="] = 0x02 + scancodeIndex["!@#$%^&*()_+"] = 0x02 + scancodeIndex["qwertyuiop[]"] = 0x10 + scancodeIndex["QWERTYUIOP{}"] = 0x10 + scancodeIndex["asdfghjkl;'`"] = 0x1e + scancodeIndex[`ASDFGHJKL:"~`] = 0x1e + scancodeIndex[`\zxcvbnm,./`] = 0x2b + scancodeIndex["|ZXCVBNM<>?"] = 0x2b + scancodeIndex[" "] = 0x39 + + scancodeMap := make(map[rune]byte) + for chars, start := range scancodeIndex { + var i byte = 0 + for len(chars) > 0 { + r, size := utf8.DecodeRuneInString(chars) + chars = chars[size:] + scancodeMap[r] = start + i + i += 1 + } + } + + return &pcXTDriver{ + interval: keyInterval, + sendImpl: send, + specialMap: sMap, + scancodeMap: scancodeMap, + scancodeChunkSize: chunkSize, + } +} + +// Flush send all scanecodes. +func (d *pcXTDriver) Flush() error { + defer func() { + d.buffer = nil + }() + sc, err := chunkScanCodes(d.buffer, d.scancodeChunkSize) + if err != nil { + return err + } + for _, b := range sc { + if err := d.sendImpl(b); err != nil { + return err + } + time.Sleep(d.interval) + } + return nil +} + +func (d *pcXTDriver) SendKey(key rune, action KeyAction) error { + keyShift := unicode.IsUpper(key) || strings.ContainsRune(shiftedChars, key) + + var sc []string + + if action&(KeyOn|KeyPress) != 0 { + scInt := d.scancodeMap[key] + if keyShift { + sc = append(sc, "2a") + } + sc = append(sc, fmt.Sprintf("%02x", scInt)) + } + + if action&(KeyOff|KeyPress) != 0 { + scInt := d.scancodeMap[key] + 0x80 + if keyShift { + sc = append(sc, "aa") + } + sc = append(sc, fmt.Sprintf("%02x", scInt)) + } + + log.Printf("Sending char '%c', code '%s', shift %v", + key, strings.Join(sc, ""), keyShift) + + d.send(sc) + return nil +} + +func (d *pcXTDriver) SendSpecial(special string, action KeyAction) error { + keyCode, ok := d.specialMap[special] + if !ok { + return fmt.Errorf("special %s not found.", special) + } + log.Printf("Special code '%s' '<%s>' found, replacing with: %v", action.String(), special, keyCode) + + switch action { + case KeyOn: + d.send(keyCode.make) + case KeyOff: + d.send(keyCode.break_) + case KeyPress: + d.send(keyCode.makeBreak()) + } + return nil +} + +// send stores the codes in an internal buffer. Use Flush to send them. +func (d *pcXTDriver) send(codes []string) { + d.buffer = append(d.buffer, codes) +} + +func chunkScanCodes(sc [][]string, size int) (out [][]string, err error) { + var running []string + for _, codes := range sc { + if size > 0 { + if len(codes) > size { + return nil, fmt.Errorf("chunkScanCodes: size cannot be smaller than sc width.") + } + if len(running)+len(codes) > size { + out = append(out, running) + running = nil + } + } + running = append(running, codes...) + } + if running != nil { + out = append(out, running) + } + return +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/usb_driver.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/usb_driver.go new file mode 100644 index 000000000..c79c5a312 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/usb_driver.go @@ -0,0 +1,135 @@ +package bootcommand + +import ( + "fmt" + "log" + "os" + "strings" + "time" + "unicode" + + "golang.org/x/mobile/event/key" +) + +// SendUsbScanCodes will be called to send codes to the VM +type SendUsbScanCodes func(k key.Code, down bool) error + +type usbDriver struct { + sendImpl SendUsbScanCodes + interval time.Duration + specialMap map[string]key.Code + scancodeMap map[rune]key.Code +} + +func NewUSBDriver(send SendUsbScanCodes, interval time.Duration) *usbDriver { + // We delay (default 100ms) between each key event to allow for CPU or + // network latency. See PackerKeyEnv for tuning. + keyInterval := PackerKeyDefault + if delay, err := time.ParseDuration(os.Getenv(PackerKeyEnv)); err == nil { + keyInterval = delay + } + // override interval based on builder-specific override. + if interval > time.Duration(0) { + keyInterval = interval + } + + special := map[string]key.Code{ + "enter": key.CodeReturnEnter, + "return": key.CodeReturnEnter, + "esc": key.CodeEscape, + "bs": key.CodeDeleteBackspace, + "del": key.CodeDeleteForward, + "tab": key.CodeTab, + "f1": key.CodeF1, + "f2": key.CodeF2, + "f3": key.CodeF3, + "f4": key.CodeF4, + "f5": key.CodeF5, + "f6": key.CodeF6, + "f7": key.CodeF7, + "f8": key.CodeF8, + "f9": key.CodeF9, + "f10": key.CodeF10, + "f11": key.CodeF11, + "f12": key.CodeF12, + "insert": key.CodeInsert, + "home": key.CodeHome, + "end": key.CodeEnd, + "pageUp": key.CodePageUp, + "pageDown": key.CodePageDown, + "left": key.CodeLeftArrow, + "right": key.CodeRightArrow, + "up": key.CodeUpArrow, + "down": key.CodeDownArrow, + "leftalt": key.CodeLeftAlt, + "leftctrl": key.CodeLeftControl, + "leftshift": key.CodeLeftShift, + "rightalt": key.CodeRightAlt, + "rightctrl": key.CodeRightControl, + "rightshift": key.CodeRightShift, + "leftsuper": key.CodeLeftGUI, + "rightsuper": key.CodeRightGUI, + "spacebar": key.CodeSpacebar, + } + + scancodeIndex := make(map[string]key.Code) + scancodeIndex["abcdefghijklmnopqrstuvwxyz"] = key.CodeA + scancodeIndex["ABCDEFGHIJKLMNOPQRSTUVWXYZ"] = key.CodeA + scancodeIndex["1234567890"] = key.Code1 + scancodeIndex["!@#$%^&*()"] = key.Code1 + scancodeIndex[" "] = key.CodeSpacebar + scancodeIndex["-=[]\\"] = key.CodeHyphenMinus + scancodeIndex["_+{}|"] = key.CodeHyphenMinus + scancodeIndex[";'`,./"] = key.CodeSemicolon + scancodeIndex[":\"~<>?"] = key.CodeSemicolon + + var scancodeMap = make(map[rune]key.Code) + for chars, start := range scancodeIndex { + for i, r := range chars { + scancodeMap[r] = start + key.Code(i) + } + } + + return &usbDriver{ + sendImpl: send, + specialMap: special, + interval: keyInterval, + scancodeMap: scancodeMap, + } +} + +func (d *usbDriver) keyEvent(k key.Code, down bool) error { + if err := d.sendImpl(k, down); err != nil { + return err + } + time.Sleep(d.interval) + return nil +} + +func (d *usbDriver) Flush() error { + return nil +} + +func (d *usbDriver) SendKey(k rune, action KeyAction) error { + keyShift := unicode.IsUpper(k) || strings.ContainsRune(shiftedChars, k) + keyCode := d.scancodeMap[k] + log.Printf("Sending char '%c', code %s, shift %v", k, keyCode, keyShift) + return d.keyEvent(keyCode, keyShift) +} + +func (d *usbDriver) SendSpecial(special string, action KeyAction) (err error) { + keyCode, ok := d.specialMap[special] + if !ok { + return fmt.Errorf("special %s not found.", special) + } + log.Printf("Special code '<%s>' found, replacing with: %s", special, keyCode) + + switch action { + case KeyOn: + err = d.keyEvent(keyCode, true) + case KeyOff, KeyPress: + err = d.keyEvent(keyCode, false) + } + + return err +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/vnc_driver.go b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/vnc_driver.go new file mode 100644 index 000000000..f1b98ae8d --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/vnc_driver.go @@ -0,0 +1,149 @@ +package bootcommand + +import ( + "fmt" + "log" + "os" + "strings" + "time" + "unicode" +) + +const KeyLeftShift uint32 = 0xFFE1 + +type VNCKeyEvent interface { + KeyEvent(uint32, bool) error +} + +type vncDriver struct { + c VNCKeyEvent + interval time.Duration + specialMap map[string]uint32 + // keyEvent can set this error which will prevent it from continuing + err error +} + +func NewVNCDriver(c VNCKeyEvent, interval time.Duration) *vncDriver { + // We delay (default 100ms) between each key event to allow for CPU or + // network latency. See PackerKeyEnv for tuning. + keyInterval := PackerKeyDefault + if delay, err := time.ParseDuration(os.Getenv(PackerKeyEnv)); err == nil { + keyInterval = delay + } + // override interval based on builder-specific override. + if interval > time.Duration(0) { + keyInterval = interval + } + + // Scancodes reference: https://github.com/qemu/qemu/blob/master/ui/vnc_keysym.h + sMap := make(map[string]uint32) + sMap["bs"] = 0xFF08 + sMap["del"] = 0xFFFF + sMap["down"] = 0xFF54 + sMap["end"] = 0xFF57 + sMap["enter"] = 0xFF0D + sMap["esc"] = 0xFF1B + sMap["f1"] = 0xFFBE + sMap["f2"] = 0xFFBF + sMap["f3"] = 0xFFC0 + sMap["f4"] = 0xFFC1 + sMap["f5"] = 0xFFC2 + sMap["f6"] = 0xFFC3 + sMap["f7"] = 0xFFC4 + sMap["f8"] = 0xFFC5 + sMap["f9"] = 0xFFC6 + sMap["f10"] = 0xFFC7 + sMap["f11"] = 0xFFC8 + sMap["f12"] = 0xFFC9 + sMap["home"] = 0xFF50 + sMap["insert"] = 0xFF63 + sMap["left"] = 0xFF51 + sMap["leftalt"] = 0xFFE9 + sMap["leftctrl"] = 0xFFE3 + sMap["leftshift"] = 0xFFE1 + sMap["leftsuper"] = 0xFFEB + sMap["menu"] = 0xFF67 + sMap["pagedown"] = 0xFF56 + sMap["pageup"] = 0xFF55 + sMap["return"] = 0xFF0D + sMap["right"] = 0xFF53 + sMap["rightalt"] = 0xFFEA + sMap["rightctrl"] = 0xFFE4 + sMap["rightshift"] = 0xFFE2 + sMap["rightsuper"] = 0xFFEC + sMap["spacebar"] = 0x020 + sMap["tab"] = 0xFF09 + sMap["up"] = 0xFF52 + + return &vncDriver{ + c: c, + interval: keyInterval, + specialMap: sMap, + } +} + +func (d *vncDriver) keyEvent(k uint32, down bool) error { + if d.err != nil { + return nil + } + if err := d.c.KeyEvent(k, down); err != nil { + d.err = err + return err + } + time.Sleep(d.interval) + return nil +} + +// Flush does nothing here +func (d *vncDriver) Flush() error { + return nil +} + +func (d *vncDriver) SendKey(key rune, action KeyAction) error { + keyShift := unicode.IsUpper(key) || strings.ContainsRune(shiftedChars, key) + keyCode := uint32(key) + log.Printf("Sending char '%c', code 0x%X, shift %v", key, keyCode, keyShift) + + switch action { + case KeyOn: + if keyShift { + d.keyEvent(KeyLeftShift, true) + } + d.keyEvent(keyCode, true) + case KeyOff: + if keyShift { + d.keyEvent(KeyLeftShift, false) + } + d.keyEvent(keyCode, false) + case KeyPress: + if keyShift { + d.keyEvent(KeyLeftShift, true) + } + d.keyEvent(keyCode, true) + d.keyEvent(keyCode, false) + if keyShift { + d.keyEvent(KeyLeftShift, false) + } + } + return d.err +} + +func (d *vncDriver) SendSpecial(special string, action KeyAction) error { + keyCode, ok := d.specialMap[special] + if !ok { + return fmt.Errorf("special %s not found.", special) + } + log.Printf("Special code '<%s>' found, replacing with: 0x%X", special, keyCode) + + switch action { + case KeyOn: + d.keyEvent(keyCode, true) + case KeyOff: + d.keyEvent(keyCode, false) + case KeyPress: + d.keyEvent(keyCode, true) + d.keyEvent(keyCode, false) + } + + return d.err +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/cleanup.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/cleanup.go new file mode 100644 index 000000000..d31d25ffd --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/cleanup.go @@ -0,0 +1,10 @@ +package chroot + +import ( + "github.com/hashicorp/packer-plugin-sdk/multistep" +) + +// Cleanup is an interface that some steps implement for early cleanup. +type Cleanup interface { + CleanupFunc(multistep.StateBag) error +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/communicator.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/communicator.go new file mode 100644 index 000000000..23a785249 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/communicator.go @@ -0,0 +1,144 @@ +package chroot + +import ( + "bytes" + "context" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "syscall" + + "github.com/hashicorp/packer-plugin-sdk/common" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/tmp" +) + +// Communicator is a special communicator that works by executing +// commands locally but within a chroot. +type Communicator struct { + Chroot string + CmdWrapper common.CommandWrapper +} + +func (c *Communicator) Start(ctx context.Context, cmd *packersdk.RemoteCmd) error { + // need extra escapes for the command since we're wrapping it in quotes + cmd.Command = strconv.Quote(cmd.Command) + command, err := c.CmdWrapper( + fmt.Sprintf("chroot %s /bin/sh -c %s", c.Chroot, cmd.Command)) + if err != nil { + return err + } + + localCmd := common.ShellCommand(command) + localCmd.Stdin = cmd.Stdin + localCmd.Stdout = cmd.Stdout + localCmd.Stderr = cmd.Stderr + log.Printf("Executing: %s %#v", localCmd.Path, localCmd.Args) + if err := localCmd.Start(); err != nil { + return err + } + + go func() { + exitStatus := 0 + if err := localCmd.Wait(); err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitStatus = 1 + + // There is no process-independent way to get the REAL + // exit status so we just try to go deeper. + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitStatus = status.ExitStatus() + } + } + } + + log.Printf( + "Chroot execution exited with '%d': '%s'", + exitStatus, cmd.Command) + cmd.SetExited(exitStatus) + }() + + return nil +} + +func (c *Communicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error { + dst = filepath.Join(c.Chroot, dst) + log.Printf("Uploading to chroot dir: %s", dst) + tf, err := tmp.File("packer-amazon-chroot") + if err != nil { + return fmt.Errorf("Error preparing shell script: %s", err) + } + defer os.Remove(tf.Name()) + + if _, err := io.Copy(tf, r); err != nil { + return err + } + + cpCmd, err := c.CmdWrapper(fmt.Sprintf("cp %s %s", tf.Name(), dst)) + if err != nil { + return err + } + + return common.ShellCommand(cpCmd).Run() +} + +func (c *Communicator) UploadDir(dst string, src string, exclude []string) error { + // If src ends with a trailing "/", copy from "src/." so that + // directory contents (including hidden files) are copied, but the + // directory "src" is omitted. BSD does this automatically when + // the source contains a trailing slash, but linux does not. + if src[len(src)-1] == '/' { + src = src + "." + } + + // TODO: remove any file copied if it appears in `exclude` + chrootDest := filepath.Join(c.Chroot, dst) + + log.Printf("Uploading directory '%s' to '%s'", src, chrootDest) + cpCmd, err := c.CmdWrapper(fmt.Sprintf("cp -R '%s' %s", src, chrootDest)) + if err != nil { + return err + } + + var stderr bytes.Buffer + cmd := common.ShellCommand(cpCmd) + cmd.Env = append(cmd.Env, "LANG=C") + cmd.Env = append(cmd.Env, os.Environ()...) + cmd.Stderr = &stderr + err = cmd.Run() + if err == nil { + return err + } + + if strings.Contains(stderr.String(), "No such file") { + // This just means that the directory was empty. Just ignore it. + return nil + } + + return err +} + +func (c *Communicator) DownloadDir(src string, dst string, exclude []string) error { + return fmt.Errorf("DownloadDir is not implemented for amazon-chroot") +} + +func (c *Communicator) Download(src string, w io.Writer) error { + src = filepath.Join(c.Chroot, src) + log.Printf("Downloading from chroot dir: %s", src) + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.Copy(w, f); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/doc.go new file mode 100644 index 000000000..26e8259fb --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/doc.go @@ -0,0 +1,24 @@ +/* +Package chroot provides convenience tooling specific to chroot builders. + +Chroot builders work by creating a new volume from an existing source image and +attaching it into an already-running instance. Once attached, a chroot is used +to provision the system within that volume. After provisioning, the volume is +detached, snapshotted, and a cloud-specific image is made. + +Using this process, minutes can be shaved off image build processes because a +new instance doesn't need to be launched in the cloud before provisioning can +take place. + +There are some restrictions, however. The host instance where the volume is +attached to must be a similar system (generally the same OS version, kernel +versions, etc.) as the image being built. Additionally, this process is much +more expensive because the instance used to perform the build must be kept +running persistently in order to build images, whereas the other non-chroot +cloud image builders start instances on-demand to build images as needed. + +The HashiCorp-maintained Amazon and Azure builder plugins have chroot builders +which use this option and can serve as an example for how the chroot steps and +communicator are used. +*/ +package chroot diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/interpolate_context_provider.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/interpolate_context_provider.go new file mode 100644 index 000000000..9aab670b5 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/interpolate_context_provider.go @@ -0,0 +1,7 @@ +package chroot + +import "github.com/hashicorp/packer-plugin-sdk/template/interpolate" + +type interpolateContextProvider interface { + GetContext() interpolate.Context +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/run_local_commands.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/run_local_commands.go new file mode 100644 index 000000000..946ecda8e --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/run_local_commands.go @@ -0,0 +1,42 @@ +package chroot + +import ( + "context" + "fmt" + + "github.com/hashicorp/packer-plugin-sdk/common" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + sl "github.com/hashicorp/packer-plugin-sdk/shell-local" + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" +) + +func RunLocalCommands(commands []string, wrappedCommand common.CommandWrapper, ictx interpolate.Context, ui packersdk.Ui) error { + ctx := context.TODO() + for _, rawCmd := range commands { + intCmd, err := interpolate.Render(rawCmd, &ictx) + if err != nil { + return fmt.Errorf("Error interpolating: %s", err) + } + + command, err := wrappedCommand(intCmd) + if err != nil { + return fmt.Errorf("Error wrapping command: %s", err) + } + + ui.Say(fmt.Sprintf("Executing command: %s", command)) + comm := &sl.Communicator{ + ExecuteCommand: []string{"sh", "-c", command}, + } + cmd := &packersdk.RemoteCmd{Command: command} + if err := cmd.RunWithUi(ctx, comm, ui); err != nil { + return fmt.Errorf("Error executing command: %s", err) + } + if cmd.ExitStatus() != 0 { + return fmt.Errorf( + "Received non-zero exit code %d from command: %s", + cmd.ExitStatus(), + command) + } + } + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_chroot_provision.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_chroot_provision.go new file mode 100644 index 000000000..4730619e6 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_chroot_provision.go @@ -0,0 +1,46 @@ +package chroot + +import ( + "context" + "log" + + "github.com/hashicorp/packer-plugin-sdk/common" + "github.com/hashicorp/packer-plugin-sdk/multistep" + "github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepChrootProvision provisions the instance within a chroot. +type StepChrootProvision struct { +} + +func (s *StepChrootProvision) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + hook := state.Get("hook").(packersdk.Hook) + mountPath := state.Get("mount_path").(string) + ui := state.Get("ui").(packersdk.Ui) + wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper) + + // Create our communicator + comm := &Communicator{ + Chroot: mountPath, + CmdWrapper: wrappedCommand, + } + + // Loads hook data from builder's state, if it has been set. + hookData := commonsteps.PopulateProvisionHookData(state) + + // Update state generated_data with complete hookData + // to make them accessible by post-processors + state.Put("generated_data", hookData) + + // Provision + log.Println("Running the provision hook") + if err := hook.Run(ctx, packersdk.HookProvision, ui, comm, hookData); err != nil { + state.Put("error", err) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *StepChrootProvision) Cleanup(state multistep.StateBag) {} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_copy_files.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_copy_files.go new file mode 100644 index 000000000..869acb2bf --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_copy_files.go @@ -0,0 +1,103 @@ +package chroot + +import ( + "bytes" + "context" + "fmt" + "log" + "path/filepath" + "runtime" + + "github.com/hashicorp/packer-plugin-sdk/common" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepCopyFiles copies some files from the host into the chroot environment. +// +// Produces: +// copy_files_cleanup CleanupFunc - A function to clean up the copied files +// early. +type StepCopyFiles struct { + Files []string + files []string +} + +func (s *StepCopyFiles) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + mountPath := state.Get("mount_path").(string) + ui := state.Get("ui").(packersdk.Ui) + wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper) + stderr := new(bytes.Buffer) + + s.files = make([]string, 0, len(s.Files)) + if len(s.Files) > 0 { + ui.Say("Copying files from host to chroot...") + var removeDestinationOption string + switch runtime.GOOS { + case "freebsd": + // The -f option here is closer to GNU --remove-destination than + // what POSIX says -f should do. + removeDestinationOption = "-f" + default: + // This is the GNU binutils version. + removeDestinationOption = "--remove-destination" + } + for _, path := range s.Files { + ui.Message(path) + chrootPath := filepath.Join(mountPath, path) + log.Printf("Copying '%s' to '%s'", path, chrootPath) + + cmdText, err := wrappedCommand(fmt.Sprintf("cp %s %s %s", removeDestinationOption, path, chrootPath)) + if err != nil { + err := fmt.Errorf("Error building copy command: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + stderr.Reset() + cmd := common.ShellCommand(cmdText) + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + err := fmt.Errorf( + "Error copying file: %s\nnStderr: %s", err, stderr.String()) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + s.files = append(s.files, chrootPath) + } + } + + state.Put("copy_files_cleanup", s) + return multistep.ActionContinue +} + +func (s *StepCopyFiles) Cleanup(state multistep.StateBag) { + ui := state.Get("ui").(packersdk.Ui) + if err := s.CleanupFunc(state); err != nil { + ui.Error(err.Error()) + } +} + +func (s *StepCopyFiles) CleanupFunc(state multistep.StateBag) error { + wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper) + if s.files != nil { + for _, file := range s.files { + log.Printf("Removing: %s", file) + localCmdText, err := wrappedCommand(fmt.Sprintf("rm -f %s", file)) + if err != nil { + return err + } + + localCmd := common.ShellCommand(localCmdText) + if err := localCmd.Run(); err != nil { + return err + } + } + } + + s.files = nil + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_early_cleanup.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_early_cleanup.go new file mode 100644 index 000000000..f91681a11 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_early_cleanup.go @@ -0,0 +1,39 @@ +package chroot + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepEarlyCleanup performs some of the cleanup steps early in order to +// prepare for snapshotting and creating an AMI. +type StepEarlyCleanup struct{} + +func (s *StepEarlyCleanup) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packersdk.Ui) + cleanupKeys := []string{ + "copy_files_cleanup", + "mount_extra_cleanup", + "mount_device_cleanup", + "attach_cleanup", + } + + for _, key := range cleanupKeys { + c := state.Get(key).(Cleanup) + log.Printf("Running cleanup func: %s", key) + if err := c.CleanupFunc(state); err != nil { + err := fmt.Errorf("Error cleaning up: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + } + + return multistep.ActionContinue +} + +func (s *StepEarlyCleanup) Cleanup(state multistep.StateBag) {} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_mount_extra.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_mount_extra.go new file mode 100644 index 000000000..843a385cc --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_mount_extra.go @@ -0,0 +1,138 @@ +package chroot + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "syscall" + + "github.com/hashicorp/packer-plugin-sdk/common" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepMountExtra mounts the attached device. +// +// Produces: +// mount_extra_cleanup CleanupFunc - To perform early cleanup +type StepMountExtra struct { + ChrootMounts [][]string + mounts []string +} + +func (s *StepMountExtra) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + mountPath := state.Get("mount_path").(string) + ui := state.Get("ui").(packersdk.Ui) + wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper) + + s.mounts = make([]string, 0, len(s.ChrootMounts)) + + ui.Say("Mounting additional paths within the chroot...") + for _, mountInfo := range s.ChrootMounts { + innerPath := mountPath + mountInfo[2] + + if err := os.MkdirAll(innerPath, 0755); err != nil { + err := fmt.Errorf("Error creating mount directory: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + flags := "-t " + mountInfo[0] + if mountInfo[0] == "bind" { + flags = "--bind" + } + + ui.Message(fmt.Sprintf("Mounting: %s", mountInfo[2])) + stderr := new(bytes.Buffer) + mountCommand, err := wrappedCommand(fmt.Sprintf( + "mount %s %s %s", + flags, + mountInfo[1], + innerPath)) + if err != nil { + err := fmt.Errorf("Error creating mount command: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + cmd := common.ShellCommand(mountCommand) + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + err := fmt.Errorf( + "Error mounting: %s\nStderr: %s", err, stderr.String()) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + s.mounts = append(s.mounts, innerPath) + } + + state.Put("mount_extra_cleanup", s) + return multistep.ActionContinue +} + +func (s *StepMountExtra) Cleanup(state multistep.StateBag) { + ui := state.Get("ui").(packersdk.Ui) + + if err := s.CleanupFunc(state); err != nil { + ui.Error(err.Error()) + return + } +} + +func (s *StepMountExtra) CleanupFunc(state multistep.StateBag) error { + if s.mounts == nil { + return nil + } + + wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper) + for len(s.mounts) > 0 { + var path string + lastIndex := len(s.mounts) - 1 + path, s.mounts = s.mounts[lastIndex], s.mounts[:lastIndex] + + grepCommand, err := wrappedCommand(fmt.Sprintf("grep %s /proc/mounts", path)) + if err != nil { + return fmt.Errorf("Error creating grep command: %s", err) + } + + // Before attempting to unmount, + // check to see if path is already unmounted + stderr := new(bytes.Buffer) + cmd := common.ShellCommand(grepCommand) + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + if status, ok := exitError.Sys().(syscall.WaitStatus); ok { + exitStatus := status.ExitStatus() + if exitStatus == 1 { + // path has already been unmounted + // just skip this path + continue + } + } + } + } + + unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", path)) + if err != nil { + return fmt.Errorf("Error creating unmount command: %s", err) + } + + stderr = new(bytes.Buffer) + cmd = common.ShellCommand(unmountCommand) + cmd.Stderr = stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf( + "Error unmounting device: %s\nStderr: %s", err, stderr.String()) + } + } + + s.mounts = nil + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_post_mount_commands.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_post_mount_commands.go new file mode 100644 index 000000000..803c61f20 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_post_mount_commands.go @@ -0,0 +1,48 @@ +package chroot + +import ( + "context" + + "github.com/hashicorp/packer-plugin-sdk/common" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +type postMountCommandsData struct { + Device string + MountPath string +} + +// StepPostMountCommands allows running arbitrary commands after mounting the +// device, but prior to the bind mount and copy steps. +type StepPostMountCommands struct { + Commands []string +} + +func (s *StepPostMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(interpolateContextProvider) + device := state.Get("device").(string) + mountPath := state.Get("mount_path").(string) + ui := state.Get("ui").(packersdk.Ui) + wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper) + + if len(s.Commands) == 0 { + return multistep.ActionContinue + } + + ictx := config.GetContext() + ictx.Data = &postMountCommandsData{ + Device: device, + MountPath: mountPath, + } + + ui.Say("Running post-mount commands...") + if err := RunLocalCommands(s.Commands, wrappedCommand, ictx, ui); err != nil { + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + return multistep.ActionContinue +} + +func (s *StepPostMountCommands) Cleanup(state multistep.StateBag) {} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_pre_mount_commands.go b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_pre_mount_commands.go new file mode 100644 index 000000000..8434dd27f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_pre_mount_commands.go @@ -0,0 +1,42 @@ +package chroot + +import ( + "context" + + "github.com/hashicorp/packer-plugin-sdk/common" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +type preMountCommandsData struct { + Device string +} + +// StepPreMountCommands sets up the a new block device when building from scratch +type StepPreMountCommands struct { + Commands []string +} + +func (s *StepPreMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + config := state.Get("config").(interpolateContextProvider) + device := state.Get("device").(string) + ui := state.Get("ui").(packersdk.Ui) + wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper) + + if len(s.Commands) == 0 { + return multistep.ActionContinue + } + + ictx := config.GetContext() + ictx.Data = &preMountCommandsData{Device: device} + + ui.Say("Running device setup commands...") + if err := RunLocalCommands(s.Commands, wrappedCommand, ictx, ui); err != nil { + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + return multistep.ActionContinue +} + +func (s *StepPreMountCommands) Cleanup(state multistep.StateBag) {} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/common/command.go b/vendor/github.com/hashicorp/packer-plugin-sdk/common/command.go new file mode 100644 index 000000000..aa9d2d308 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/common/command.go @@ -0,0 +1,27 @@ +// Package common provides the PackerConfig structure that gets passed to every +// plugin and contains information populated by the Packer core. This config +// contains data about command line flags that were used, as well as template +// information and information about the Packer core's version. It also +// proivdes string constants to use to access that config. +package common + +import ( + "os/exec" +) + +// CommandWrapper is a type that given a command, will modify that +// command in-flight. This might return an error. +// For example, your command could be `foo` and your CommandWrapper could be +// func(s string) (string, error) { +// return fmt.Sprintf("/bin/sh/ %s", s) +// } +// Using the CommandWrapper, you can set environment variables or perform +// string interpolation once rather than many times, to save some lines of code +// if similar wrapping needs to be performed many times during a plugin run. +type CommandWrapper func(string) (string, error) + +// ShellCommand takes a command string and returns an *exec.Cmd to execute +// it within the context of a shell (/bin/sh). +func ShellCommand(command string) *exec.Cmd { + return exec.Command("/bin/sh", "-c", command) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/common/packer_config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/common/packer_config.go new file mode 100644 index 000000000..34c3091a9 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/common/packer_config.go @@ -0,0 +1,53 @@ +package common + +const ( + // This is the key in configurations that is set to the name of the + // build. + BuildNameConfigKey = "packer_build_name" + + // This is the key in the configuration that is set to the type + // of the builder that is run. This is useful for provisioners and + // such who want to make use of this. + BuilderTypeConfigKey = "packer_builder_type" + + // this is the key in the configuration that is set to the version of the + // Packer Core. This can be used by plugins to set user agents, etc, without + // having to import the Core to find out the Packer version. + CoreVersionConfigKey = "packer_core_version" + + // This is the key in configurations that is set to "true" when Packer + // debugging is enabled. + DebugConfigKey = "packer_debug" + + // This is the key in configurations that is set to "true" when Packer + // force build is enabled. + ForceConfigKey = "packer_force" + + // This key determines what to do when a normal multistep step fails + // - "cleanup" - run cleanup steps + // - "abort" - exit without cleanup + // - "ask" - ask the user + OnErrorConfigKey = "packer_on_error" + + // TemplatePathKey is the path to the template that configured this build + TemplatePathKey = "packer_template_path" + + // This key contains a map[string]string of the user variables for + // template processing. + UserVariablesConfigKey = "packer_user_variables" +) + +// PackerConfig is a struct that contains the configuration keys that +// are sent by packer, properly tagged already so mapstructure can load +// them. Embed this structure into your configuration class to get access to +// this information from the Packer Core. +type PackerConfig struct { + PackerBuildName string `mapstructure:"packer_build_name"` + PackerBuilderType string `mapstructure:"packer_builder_type"` + PackerCoreVersion string `mapstructure:"packer_core_version"` + PackerDebug bool `mapstructure:"packer_debug"` + PackerForce bool `mapstructure:"packer_force"` + PackerOnError string `mapstructure:"packer_on_error"` + PackerUserVars map[string]string `mapstructure:"packer_user_variables"` + PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables"` +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/comm_host.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/comm_host.go new file mode 100644 index 000000000..a07b0b400 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/comm_host.go @@ -0,0 +1,26 @@ +package communicator + +import ( + "fmt" + "log" + + "github.com/hashicorp/packer-plugin-sdk/multistep" +) + +// CommHost determines the IP address of the cloud instance that Packer +// should connect to. A custom CommHost function can be implemented in each +// builder if need be; this is a generic function that should work for most +// cloud builders. +func CommHost(host string, statebagKey string) func(multistep.StateBag) (string, error) { + return func(state multistep.StateBag) (string, error) { + if host != "" { + log.Printf("Using host value: %s", host) + return host, nil + } + ipAddress, hasIP := state.Get(statebagKey).(string) + if !hasIP { + return "", fmt.Errorf("Failed to retrieve IP address.") + } + return ipAddress, nil + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.go new file mode 100644 index 000000000..c0615a4f0 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.go @@ -0,0 +1,632 @@ +//go:generate struct-markdown +//go:generate mapstructure-to-hcl2 -type Config,SSH,WinRM,SSHTemporaryKeyPair + +package communicator + +import ( + "errors" + "fmt" + "io/ioutil" + "net" + "os" + "time" + + "github.com/hashicorp/hcl/v2/hcldec" + helperssh "github.com/hashicorp/packer-plugin-sdk/communicator/ssh" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/pathing" + packerssh "github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh" + "github.com/hashicorp/packer-plugin-sdk/template/config" + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" + "github.com/masterzen/winrm" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +// Config is the common configuration a builder uses to define and configure a Packer +// communicator. Embed this struct in your builder config to implement +// communicator support. +type Config struct { + // Packer currently supports three kinds of communicators: + // + // - `none` - No communicator will be used. If this is set, most + // provisioners also can't be used. + // + // - `ssh` - An SSH connection will be established to the machine. This + // is usually the default. + // + // - `winrm` - A WinRM connection will be established. + // + // In addition to the above, some builders have custom communicators they + // can use. For example, the Docker builder has a "docker" communicator + // that uses `docker exec` and `docker cp` to execute scripts and copy + // files. + Type string `mapstructure:"communicator"` + // We recommend that you enable SSH or WinRM as the very last step in your + // guest's bootstrap script, but sometimes you may have a race condition + // where you need Packer to wait before attempting to connect to your + // guest. + // + // If you end up in this situation, you can use the template option + // `pause_before_connecting`. By default, there is no pause. For example if + // you set `pause_before_connecting` to `10m` Packer will check whether it + // can connect, as normal. But once a connection attempt is successful, it + // will disconnect and then wait 10 minutes before connecting to the guest + // and beginning provisioning. + PauseBeforeConnect time.Duration `mapstructure:"pause_before_connecting"` + + SSH `mapstructure:",squash"` + WinRM `mapstructure:",squash"` +} + +// The SSH config defines configuration for the SSH communicator. +type SSH struct { + // The address to SSH to. This usually is automatically configured by the + // builder. + SSHHost string `mapstructure:"ssh_host"` + // The port to connect to SSH. This defaults to `22`. + SSHPort int `mapstructure:"ssh_port"` + // The username to connect to SSH with. Required if using SSH. + SSHUsername string `mapstructure:"ssh_username"` + // A plaintext password to use to authenticate with SSH. + SSHPassword string `mapstructure:"ssh_password"` + // If specified, this is the key that will be used for SSH with the + // machine. The key must match a key pair name loaded up into the remote. + // By default, this is blank, and Packer will generate a temporary keypair + // unless [`ssh_password`](#ssh_password) is used. + // [`ssh_private_key_file`](#ssh_private_key_file) or + // [`ssh_agent_auth`](#ssh_agent_auth) must be specified when + // [`ssh_keypair_name`](#ssh_keypair_name) is utilized. + SSHKeyPairName string `mapstructure:"ssh_keypair_name" undocumented:"true"` + // The name of the temporary key pair to generate. By default, Packer + // generates a name that looks like `packer_`, where <UUID> is + // a 36 character unique identifier. + SSHTemporaryKeyPairName string `mapstructure:"temporary_key_pair_name" undocumented:"true"` + SSHTemporaryKeyPair `mapstructure:",squash"` + // This overrides the value of ciphers supported by default by golang. + // The default value is [ + // "aes128-gcm@openssh.com", + // "chacha20-poly1305@openssh.com", + // "aes128-ctr", "aes192-ctr", "aes256-ctr", + // ] + // + // Valid options for ciphers include: + // "aes128-ctr", "aes192-ctr", "aes256-ctr", "aes128-gcm@openssh.com", + // "chacha20-poly1305@openssh.com", + // "arcfour256", "arcfour128", "arcfour", "aes128-cbc", "3des-cbc", + SSHCiphers []string `mapstructure:"ssh_ciphers"` + // If true, Packer will attempt to remove its temporary key from + // `~/.ssh/authorized_keys` and `/root/.ssh/authorized_keys`. This is a + // mostly cosmetic option, since Packer will delete the temporary private + // key from the host system regardless of whether this is set to true + // (unless the user has set the `-debug` flag). Defaults to "false"; + // currently only works on guests with `sed` installed. + SSHClearAuthorizedKeys bool `mapstructure:"ssh_clear_authorized_keys"` + // If set, Packer will override the value of key exchange (kex) altorighms + // supported by default by golang. Acceptable values include: + // "curve25519-sha256@libssh.org", "ecdh-sha2-nistp256", + // "ecdh-sha2-nistp384", "ecdh-sha2-nistp521", + // "diffie-hellman-group14-sha1", and "diffie-hellman-group1-sha1". + SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms"` + // Path to a PEM encoded private key file to use to authenticate with SSH. + // The `~` can be used in path and will be expanded to the home directory + // of current user. + SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file" undocumented:"true"` + // Path to user certificate used to authenticate with SSH. + // The `~` can be used in path and will be expanded to the + // home directory of current user. + SSHCertificateFile string `mapstructure:"ssh_certificate_file"` + // If `true`, a PTY will be requested for the SSH connection. This defaults + // to `false`. + SSHPty bool `mapstructure:"ssh_pty"` + // The time to wait for SSH to become available. Packer uses this to + // determine when the machine has booted so this is usually quite long. + // Example value: `10m`. + SSHTimeout time.Duration `mapstructure:"ssh_timeout"` + // Deprecated in favor of SSHTimeout + SSHWaitTimeout time.Duration `mapstructure:"ssh_wait_timeout" undocumented:"true"` + // If true, the local SSH agent will be used to authenticate connections to + // the source instance. No temporary keypair will be created, and the + // values of [`ssh_password`](#ssh_password) and + // [`ssh_private_key_file`](#ssh_private_key_file) will be ignored. The + // environment variable `SSH_AUTH_SOCK` must be set for this option to work + // properly. + SSHAgentAuth bool `mapstructure:"ssh_agent_auth" undocumented:"true"` + // If true, SSH agent forwarding will be disabled. Defaults to `false`. + SSHDisableAgentForwarding bool `mapstructure:"ssh_disable_agent_forwarding"` + // The number of handshakes to attempt with SSH once it can connect. This + // defaults to `10`. + SSHHandshakeAttempts int `mapstructure:"ssh_handshake_attempts"` + // A bastion host to use for the actual SSH connection. + SSHBastionHost string `mapstructure:"ssh_bastion_host"` + // The port of the bastion host. Defaults to `22`. + SSHBastionPort int `mapstructure:"ssh_bastion_port"` + // If `true`, the local SSH agent will be used to authenticate with the + // bastion host. Defaults to `false`. + SSHBastionAgentAuth bool `mapstructure:"ssh_bastion_agent_auth"` + // The username to connect to the bastion host. + SSHBastionUsername string `mapstructure:"ssh_bastion_username"` + // The password to use to authenticate with the bastion host. + SSHBastionPassword string `mapstructure:"ssh_bastion_password"` + // If `true`, the keyboard-interactive used to authenticate with bastion host. + SSHBastionInteractive bool `mapstructure:"ssh_bastion_interactive"` + // Path to a PEM encoded private key file to use to authenticate with the + // bastion host. The `~` can be used in path and will be expanded to the + // home directory of current user. + SSHBastionPrivateKeyFile string `mapstructure:"ssh_bastion_private_key_file"` + // Path to user certificate used to authenticate with bastion host. + // The `~` can be used in path and will be expanded to the + //home directory of current user. + SSHBastionCertificateFile string `mapstructure:"ssh_bastion_certificate_file"` + // `scp` or `sftp` - How to transfer files, Secure copy (default) or SSH + // File Transfer Protocol. + SSHFileTransferMethod string `mapstructure:"ssh_file_transfer_method"` + // A SOCKS proxy host to use for SSH connection + SSHProxyHost string `mapstructure:"ssh_proxy_host"` + // A port of the SOCKS proxy. Defaults to `1080`. + SSHProxyPort int `mapstructure:"ssh_proxy_port"` + // The optional username to authenticate with the proxy server. + SSHProxyUsername string `mapstructure:"ssh_proxy_username"` + // The optional password to use to authenticate with the proxy server. + SSHProxyPassword string `mapstructure:"ssh_proxy_password"` + // How often to send "keep alive" messages to the server. Set to a negative + // value (`-1s`) to disable. Example value: `10s`. Defaults to `5s`. + SSHKeepAliveInterval time.Duration `mapstructure:"ssh_keep_alive_interval"` + // The amount of time to wait for a remote command to end. This might be + // useful if, for example, packer hangs on a connection after a reboot. + // Example: `5m`. Disabled by default. + SSHReadWriteTimeout time.Duration `mapstructure:"ssh_read_write_timeout"` + + // Tunneling + + // + SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels"` + // + SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels"` + + // SSH Internals + SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true"` + SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true"` +} + +// When no ssh credentials are specified, Packer will generate a temporary SSH +// keypair for the instance. You can change the algorithm type and bits +// settings. +type SSHTemporaryKeyPair struct { + // `dsa` | `ecdsa` | `ed25519` | `rsa` ( the default ) + // + // Specifies the type of key to create. The possible values are 'dsa', + // 'ecdsa', 'ed25519', or 'rsa'. + SSHTemporaryKeyPairType string `mapstructure:"temporary_key_pair_type"` + // Specifies the number of bits in the key to create. For RSA keys, the + // minimum size is 1024 bits and the default is 4096 bits. Generally, 3072 + // bits is considered sufficient. DSA keys must be exactly 1024 bits as + // specified by FIPS 186-2. For ECDSA keys, bits determines the key length + // by selecting from one of three elliptic curve sizes: 256, 384 or 521 + // bits. Attempting to use bit lengths other than these three values for + // ECDSA keys will fail. Ed25519 keys have a fixed length and bits will be + // ignored. + SSHTemporaryKeyPairBits int `mapstructure:"temporary_key_pair_bits"` +} + +// The WinRM config defines configuration for the WinRM communicator. +type WinRM struct { + // The username to use to connect to WinRM. + WinRMUser string `mapstructure:"winrm_username"` + // The password to use to connect to WinRM. + WinRMPassword string `mapstructure:"winrm_password"` + // The address for WinRM to connect to. + // + // NOTE: If using an Amazon EBS builder, you can specify the interface + // WinRM connects to via + // [`ssh_interface`](/docs/builders/amazon-ebs#ssh_interface) + WinRMHost string `mapstructure:"winrm_host"` + // Setting this to `true` adds the remote + // `host:port` to the `NO_PROXY` environment variable. This has the effect of + // bypassing any configured proxies when connecting to the remote host. + // Default to `false`. + WinRMNoProxy bool `mapstructure:"winrm_no_proxy"` + // The WinRM port to connect to. This defaults to `5985` for plain + // unencrypted connection and `5986` for SSL when `winrm_use_ssl` is set to + // true. + WinRMPort int `mapstructure:"winrm_port"` + // The amount of time to wait for WinRM to become available. This defaults + // to `30m` since setting up a Windows machine generally takes a long time. + WinRMTimeout time.Duration `mapstructure:"winrm_timeout"` + // If `true`, use HTTPS for WinRM. + WinRMUseSSL bool `mapstructure:"winrm_use_ssl"` + // If `true`, do not check server certificate chain and host name. + WinRMInsecure bool `mapstructure:"winrm_insecure"` + // If `true`, NTLMv2 authentication (with session security) will be used + // for WinRM, rather than default (basic authentication), removing the + // requirement for basic authentication to be enabled within the target + // guest. Further reading for remote connection authentication can be found + // [here](https://msdn.microsoft.com/en-us/library/aa384295(v=vs.85).aspx). + WinRMUseNTLM bool `mapstructure:"winrm_use_ntlm"` + WinRMTransportDecorator func() winrm.Transporter +} + +// The ConfigSpec funcs are used by the Packer core to parse HCL2 templates. +func (c *SSH) ConfigSpec() hcldec.ObjectSpec { return c.FlatMapstructure().HCL2Spec() } + +// The ConfigSpec funcs are used by the Packer core to parse HCL2 templates. +func (c *WinRM) ConfigSpec() hcldec.ObjectSpec { return c.FlatMapstructure().HCL2Spec() } + +// Configure parses the json template into the Config structs +func (c *SSH) Configure(raws ...interface{}) ([]string, error) { + err := config.Decode(c, nil, raws...) + return nil, err +} + +// Configure parses the json template into the Config structs +func (c *WinRM) Configure(raws ...interface{}) ([]string, error) { + err := config.Decode(c, nil, raws...) + return nil, err +} + +var ( + _ packersdk.ConfigurableCommunicator = new(SSH) + _ packersdk.ConfigurableCommunicator = new(WinRM) +) + +// SSHInterface defines whether to use public or private, addresses, and whether +// to use IPv4 or IPv6. +type SSHInterface struct { + // One of `public_ip`, `private_ip`, `public_dns`, or `private_dns`. If + // set, either the public IP address, private IP address, public DNS name + // or private DNS name will used as the host for SSH. The default behaviour + // if inside a VPC is to use the public IP address if available, otherwise + // the private IP address will be used. If not in a VPC the public DNS name + // will be used. Also works for WinRM. + // + // Where Packer is configured for an outbound proxy but WinRM traffic + // should be direct, `ssh_interface` must be set to `private_dns` and + // `.compute.internal` included in the `NO_PROXY` environment + // variable. + SSHInterface string `mapstructure:"ssh_interface"` + // The IP version to use for SSH connections, valid values are `4` and `6`. + // Useful on dual stacked instances where the default behavior is to + // connect via whichever IP address is returned first from the OpenStack + // API. + SSHIPVersion string `mapstructure:"ssh_ip_version"` +} + +// ReadSSHPrivateKeyFile returns the SSH private key bytes. +func (c *Config) ReadSSHPrivateKeyFile() ([]byte, error) { + var privateKey []byte + + if c.SSHPrivateKeyFile != "" { + keyPath, err := pathing.ExpandUser(c.SSHPrivateKeyFile) + if err != nil { + return []byte{}, fmt.Errorf("Error expanding path for SSH private key: %s", err) + } + + privateKey, err = ioutil.ReadFile(keyPath) + if err != nil { + return privateKey, fmt.Errorf("Error on reading SSH private key: %s", err) + } + } + return privateKey, nil +} + +// SSHConfigFunc returns a function that can be used for the SSH communicator +// config for connecting to the instance created over SSH using the private key +// or password. +func (c *Config) SSHConfigFunc() func(multistep.StateBag) (*ssh.ClientConfig, error) { + return func(state multistep.StateBag) (*ssh.ClientConfig, error) { + sshConfig := &ssh.ClientConfig{ + User: c.SSHUsername, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + if len(c.SSHCiphers) != 0 { + sshConfig.Config.Ciphers = c.SSHCiphers + } + + if len(c.SSHKEXAlgos) != 0 { + sshConfig.Config.KeyExchanges = c.SSHKEXAlgos + } + + if c.SSHAgentAuth { + authSock := os.Getenv("SSH_AUTH_SOCK") + if authSock == "" { + return nil, fmt.Errorf("SSH_AUTH_SOCK is not set") + } + + sshAgent, err := net.Dial("unix", authSock) + if err != nil { + return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err) + } + + sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)) + } + + var privateKeys [][]byte + if c.SSHPrivateKeyFile != "" { + privateKey, err := c.ReadSSHPrivateKeyFile() + if err != nil { + return nil, err + } + privateKeys = append(privateKeys, privateKey) + } + + // aws,alicloud,cloudstack,digitalOcean,oneAndOne,openstack,oracle & profitbricks key + if iKey, hasKey := state.GetOk("privateKey"); hasKey { + privateKeys = append(privateKeys, []byte(iKey.(string))) + } + + if len(c.SSHPrivateKey) != 0 { + privateKeys = append(privateKeys, c.SSHPrivateKey) + } + + certPath := "" + if c.SSHCertificateFile != "" { + var err error + certPath, err = pathing.ExpandUser(c.SSHCertificateFile) + if err != nil { + return nil, err + } + } + + for _, key := range privateKeys { + + signer, err := ssh.ParsePrivateKey(key) + if err != nil { + return nil, fmt.Errorf("Error on parsing SSH private key: %s", err) + } + + if certPath != "" { + signer, err = helperssh.ReadCertificate(certPath, signer) + if err != nil { + return nil, err + } + } + + sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer)) + } + + if c.SSHPassword != "" { + sshConfig.Auth = append(sshConfig.Auth, + ssh.Password(c.SSHPassword), + ssh.KeyboardInteractive(packerssh.PasswordKeyboardInteractive(c.SSHPassword)), + ) + } + return sshConfig, nil + } +} + +// Port returns the port that will be used for access based on config. +func (c *Config) Port() int { + switch c.Type { + case "ssh": + return c.SSHPort + case "winrm": + return c.WinRMPort + default: + return 0 + } +} + +// Host returns the host that will be used for access based on config. +func (c *Config) Host() string { + switch c.Type { + case "ssh": + return c.SSHHost + case "winrm": + return c.WinRMHost + default: + return "" + } +} + +// User returns the user that will be used for access based on config. +func (c *Config) User() string { + switch c.Type { + case "ssh": + return c.SSHUsername + case "winrm": + return c.WinRMUser + default: + return "" + } +} + +// Password returns the password that will be used for access based on config. +func (c *Config) Password() string { + switch c.Type { + case "ssh": + return c.SSHPassword + case "winrm": + return c.WinRMPassword + default: + return "" + } +} + +func (c *Config) Prepare(ctx *interpolate.Context) []error { + if c.Type == "" { + c.Type = "ssh" + } + + var errs []error + switch c.Type { + case "ssh": + if es := c.prepareSSH(ctx); len(es) > 0 { + errs = append(errs, es...) + } + case "winrm": + if es := c.prepareWinRM(ctx); len(es) > 0 { + errs = append(errs, es...) + } + case "docker", "dockerWindowsContainer", "none": + break + default: + return []error{fmt.Errorf("Communicator type %s is invalid", c.Type)} + } + + return errs +} + +func (c *Config) prepareSSH(ctx *interpolate.Context) []error { + if c.SSHPort == 0 { + c.SSHPort = 22 + } + + if c.SSHTimeout == 0 { + c.SSHTimeout = 5 * time.Minute + } + + if c.SSHKeepAliveInterval == 0 { + c.SSHKeepAliveInterval = 5 * time.Second + } + + if c.SSHHandshakeAttempts == 0 { + c.SSHHandshakeAttempts = 10 + } + + if c.SSHBastionHost != "" { + if c.SSHBastionPort == 0 { + c.SSHBastionPort = 22 + } + + if c.SSHBastionPrivateKeyFile == "" && c.SSHPrivateKeyFile != "" { + c.SSHBastionPrivateKeyFile = c.SSHPrivateKeyFile + } + + if c.SSHBastionCertificateFile == "" && c.SSHCertificateFile != "" { + c.SSHBastionCertificateFile = c.SSHCertificateFile + } + + } + + if c.SSHProxyHost != "" { + if c.SSHProxyPort == 0 { + c.SSHProxyPort = 1080 + } + } + + if c.SSHFileTransferMethod == "" { + c.SSHFileTransferMethod = "scp" + } + + // Backwards compatibility + if c.SSHWaitTimeout != 0 { + c.SSHTimeout = c.SSHWaitTimeout + } + + // Validation + var errs []error + if c.SSHUsername == "" { + errs = append(errs, errors.New("An ssh_username must be specified\n Note: some builders used to default ssh_username to \"root\".")) + } + + if c.SSHPrivateKeyFile != "" { + path, err := pathing.ExpandUser(c.SSHPrivateKeyFile) + if err != nil { + errs = append(errs, fmt.Errorf( + "ssh_private_key_file is invalid: %s", err)) + } else if _, err := os.Stat(path); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_private_key_file is invalid: %s", err)) + } else { + if c.SSHCertificateFile != "" { + certPath, err := pathing.ExpandUser(c.SSHCertificateFile) + if err != nil { + errs = append(errs, fmt.Errorf("invalid identity certificate: #{err}")) + } + + if _, err := helperssh.FileSignerWithCert(path, certPath); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_private_key_file is invalid: %s", err)) + } + } else { + if _, err := helperssh.FileSigner(path); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_private_key_file is invalid: %s", err)) + } + } + } + } + + if c.SSHBastionHost != "" && !c.SSHBastionAgentAuth { + if c.SSHBastionPassword == "" && c.SSHBastionPrivateKeyFile == "" { + errs = append(errs, errors.New( + "ssh_bastion_password or ssh_bastion_private_key_file must be specified")) + } else if c.SSHBastionPrivateKeyFile != "" { + path, err := pathing.ExpandUser(c.SSHBastionPrivateKeyFile) + if err != nil { + errs = append(errs, fmt.Errorf( + "ssh_bastion_private_key_file is invalid: %s", err)) + } else if _, err := os.Stat(path); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_bastion_private_key_file is invalid: %s", err)) + } else { + if c.SSHBastionCertificateFile != "" { + certPath, err := pathing.ExpandUser(c.SSHBastionCertificateFile) + if err != nil { + errs = append(errs, fmt.Errorf("invalid identity certificate: #{err}")) + } + if _, err := helperssh.FileSignerWithCert(path, certPath); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_bastion_private_key_file is invalid: %s", err)) + } + } else { + if _, err := helperssh.FileSigner(path); err != nil { + errs = append(errs, fmt.Errorf( + "ssh_bastion_private_key_file is invalid: %s", err)) + } + } + } + } + } + + if c.SSHFileTransferMethod != "scp" && c.SSHFileTransferMethod != "sftp" { + errs = append(errs, fmt.Errorf( + "ssh_file_transfer_method ('%s') is invalid, valid methods: sftp, scp", + c.SSHFileTransferMethod)) + } + + if c.SSHBastionHost != "" && c.SSHProxyHost != "" { + errs = append(errs, errors.New("please specify either ssh_bastion_host or ssh_proxy_host, not both")) + } + + for _, v := range c.SSHLocalTunnels { + _, err := helperssh.ParseTunnelArgument(v, packerssh.UnsetTunnel) + if err != nil { + errs = append(errs, fmt.Errorf( + "ssh_local_tunnels ('%s') is invalid: %s", v, err)) + } + } + + for _, v := range c.SSHRemoteTunnels { + _, err := helperssh.ParseTunnelArgument(v, packerssh.UnsetTunnel) + if err != nil { + errs = append(errs, fmt.Errorf( + "ssh_remote_tunnels ('%s') is invalid: %s", v, err)) + } + } + + return errs +} + +func (c *Config) prepareWinRM(ctx *interpolate.Context) (errs []error) { + if c.WinRMPort == 0 && c.WinRMUseSSL { + c.WinRMPort = 5986 + } else if c.WinRMPort == 0 { + c.WinRMPort = 5985 + } + + if c.WinRMTimeout == 0 { + c.WinRMTimeout = 30 * time.Minute + } + + if c.WinRMUseNTLM == true { + c.WinRMTransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} } + } + + if c.WinRMUser == "" { + errs = append(errs, errors.New("winrm_username must be specified.")) + } + + return errs +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.hcl2spec.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.hcl2spec.go new file mode 100644 index 000000000..302f87a8c --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.hcl2spec.go @@ -0,0 +1,288 @@ +// Code generated by "mapstructure-to-hcl2 -type Config,SSH,WinRM,SSHTemporaryKeyPair"; DO NOT EDIT. + +package communicator + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// FlatConfig is an auto-generated flat version of Config. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatConfig struct { + Type *string `mapstructure:"communicator" cty:"communicator" hcl:"communicator"` + PauseBeforeConnect *string `mapstructure:"pause_before_connecting" cty:"pause_before_connecting" hcl:"pause_before_connecting"` + SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"` + SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"` + SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"` + SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"` + SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"` + SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"` + SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"` + SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"` + SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"` + SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"` + SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"` + SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"` + SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"` + SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"` + SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"` + SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"` + SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"` + SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"` + SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"` + SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"` + SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"` + SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"` + SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"` + SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"` + SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"` + SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"` + SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"` + SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"` + SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"` + SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"` + SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"` + SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"` + SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"` + SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"` + SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"` + SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"` + SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"` + SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"` + WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"` + WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"` + WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"` + WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"` + WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"` + WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"` + WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"` + WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"` + WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"` +} + +// FlatMapstructure returns a new FlatConfig. +// FlatConfig is an auto-generated flat version of Config. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatConfig) +} + +// HCL2Spec returns the hcl spec of a Config. +// This spec is used by HCL to read the fields of Config. +// The decoded values from this spec will then be applied to a FlatConfig. +func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false}, + "pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false}, + "ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false}, + "ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false}, + "ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false}, + "ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false}, + "ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false}, + "temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false}, + "temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false}, + "temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false}, + "ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false}, + "ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false}, + "ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false}, + "ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false}, + "ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false}, + "ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false}, + "ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false}, + "ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false}, + "ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false}, + "ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false}, + "ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false}, + "ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false}, + "ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false}, + "ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false}, + "ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false}, + "ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false}, + "ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false}, + "ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false}, + "ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false}, + "ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false}, + "ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false}, + "ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false}, + "ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false}, + "ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false}, + "ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false}, + "ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false}, + "ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false}, + "ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false}, + "ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false}, + "ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false}, + "winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false}, + "winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false}, + "winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false}, + "winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false}, + "winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false}, + "winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false}, + "winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false}, + "winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false}, + "winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false}, + } + return s +} + +// FlatSSH is an auto-generated flat version of SSH. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatSSH struct { + SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"` + SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"` + SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"` + SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"` + SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"` + SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"` + SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"` + SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"` + SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"` + SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"` + SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"` + SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"` + SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"` + SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"` + SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"` + SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"` + SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"` + SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"` + SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"` + SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"` + SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"` + SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"` + SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"` + SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"` + SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"` + SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"` + SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"` + SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"` + SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"` + SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"` + SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"` + SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"` + SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"` + SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"` + SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"` + SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"` + SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"` + SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"` +} + +// FlatMapstructure returns a new FlatSSH. +// FlatSSH is an auto-generated flat version of SSH. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*SSH) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatSSH) +} + +// HCL2Spec returns the hcl spec of a SSH. +// This spec is used by HCL to read the fields of SSH. +// The decoded values from this spec will then be applied to a FlatSSH. +func (*FlatSSH) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false}, + "ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false}, + "ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false}, + "ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false}, + "ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false}, + "temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false}, + "temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false}, + "temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false}, + "ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false}, + "ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false}, + "ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false}, + "ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false}, + "ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false}, + "ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false}, + "ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false}, + "ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false}, + "ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false}, + "ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false}, + "ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false}, + "ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false}, + "ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false}, + "ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false}, + "ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false}, + "ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false}, + "ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false}, + "ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false}, + "ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false}, + "ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false}, + "ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false}, + "ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false}, + "ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false}, + "ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false}, + "ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false}, + "ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false}, + "ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false}, + "ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false}, + "ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false}, + "ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false}, + } + return s +} + +// FlatSSHTemporaryKeyPair is an auto-generated flat version of SSHTemporaryKeyPair. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatSSHTemporaryKeyPair struct { + SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"` + SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"` +} + +// FlatMapstructure returns a new FlatSSHTemporaryKeyPair. +// FlatSSHTemporaryKeyPair is an auto-generated flat version of SSHTemporaryKeyPair. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*SSHTemporaryKeyPair) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatSSHTemporaryKeyPair) +} + +// HCL2Spec returns the hcl spec of a SSHTemporaryKeyPair. +// This spec is used by HCL to read the fields of SSHTemporaryKeyPair. +// The decoded values from this spec will then be applied to a FlatSSHTemporaryKeyPair. +func (*FlatSSHTemporaryKeyPair) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false}, + "temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false}, + } + return s +} + +// FlatWinRM is an auto-generated flat version of WinRM. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatWinRM struct { + WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"` + WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"` + WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"` + WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"` + WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"` + WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"` + WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"` + WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"` + WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"` +} + +// FlatMapstructure returns a new FlatWinRM. +// FlatWinRM is an auto-generated flat version of WinRM. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*WinRM) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatWinRM) +} + +// HCL2Spec returns the hcl spec of a WinRM. +// This spec is used by HCL to read the fields of WinRM. +// The decoded values from this spec will then be applied to a FlatWinRM. +func (*FlatWinRM) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false}, + "winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false}, + "winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false}, + "winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false}, + "winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false}, + "winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false}, + "winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false}, + "winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false}, + "winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false}, + } + return s +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/doc.go new file mode 100644 index 000000000..e5990c9a6 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/doc.go @@ -0,0 +1,10 @@ +/* +Package communicator provides common steps for connecting to an instance +using the Packer communicator. These steps can be implemented by builders. +Normally, a builder will want to implement StepConnect, which is smart enough +to then determine which kind of communicator, and therefore which kind of +substep, it should implement. + +Various helper functions are also supplied. +*/ +package communicator diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/key_pair.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/key_pair.go new file mode 100644 index 000000000..0ca6b95ec --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/key_pair.go @@ -0,0 +1,260 @@ +package ssh + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "strings" + + gossh "golang.org/x/crypto/ssh" +) + +const ( + // defaultRsaBits is the default bits of entropy for a new RSA + // key pair. That's a lot of bits. + defaultRsaBits = 4096 + + // Markers for various SSH key pair types. + Default KeyPairType = "" + Rsa KeyPairType = "RSA" + Ecdsa KeyPairType = "ECDSA" + Dsa KeyPairType = "DSA" + Ed25519 KeyPairType = "ED25519" +) + +// KeyPairType represents different types of SSH key pairs. +// See the 'const' block for details. +type KeyPairType string + +func (o KeyPairType) String() string { + return string(o) +} + +// KeyPair represents an SSH key pair. +type KeyPair struct { + // PrivateKeyPemBlock represents the key pair's private key in + // ASN.1 Distinguished Encoding Rules (DER) format in a + // Privacy-Enhanced Mail (PEM) block. + PrivateKeyPemBlock []byte + + // PublicKeyAuthorizedKeysLine represents the key pair's public key + // as a line in OpenSSH authorized_keys. + PublicKeyAuthorizedKeysLine []byte + + // Comment is the key pair's comment. This is typically used + // to identify the key pair's owner in the SSH user's + // 'authorized_keys' file. + Comment string +} + +// KeyPairFromPrivateKey returns a KeyPair loaded from an existing private key. +// +// Supported key pair types include: +// - DSA +// - ECDSA +// - ED25519 +// - RSA +func KeyPairFromPrivateKey(config FromPrivateKeyConfig) (KeyPair, error) { + privateKey, err := gossh.ParseRawPrivateKey(config.RawPrivateKeyPemBlock) + if err != nil { + return KeyPair{}, err + } + + switch pk := privateKey.(type) { + case crypto.Signer: + // crypto.Signer is implemented by ecdsa.PrivateKey, + // ed25519.PrivateKey, and rsa.PrivateKey - separate cases + // for each PrivateKey type would be redundant. + publicKey, err := gossh.NewPublicKey(pk.Public()) + if err != nil { + return KeyPair{}, err + } + return KeyPair{ + Comment: config.Comment, + PrivateKeyPemBlock: config.RawPrivateKeyPemBlock, + PublicKeyAuthorizedKeysLine: authorizedKeysLine(publicKey, config.Comment), + }, nil + case *dsa.PrivateKey: + publicKey, err := gossh.NewPublicKey(&pk.PublicKey) + if err != nil { + return KeyPair{}, err + } + return KeyPair{ + Comment: config.Comment, + PrivateKeyPemBlock: config.RawPrivateKeyPemBlock, + PublicKeyAuthorizedKeysLine: authorizedKeysLine(publicKey, config.Comment), + }, nil + } + + return KeyPair{}, fmt.Errorf("Cannot parse existing SSH key pair - unknown key pair type") +} + +// FromPrivateKeyConfig describes how an SSH key pair should be loaded from an +// existing private key. +type FromPrivateKeyConfig struct { + // RawPrivateKeyPemBlock is the raw private key that the key pair + // should be loaded from. + RawPrivateKeyPemBlock []byte + + // Comment is the key pair's comment. This is typically used + // to identify the key pair's owner in the SSH user's + // 'authorized_keys' file. + Comment string +} + +// NewKeyPair generates a new SSH key pair using the specified +// CreateKeyPairConfig. +func NewKeyPair(config CreateKeyPairConfig) (KeyPair, error) { + if config.Type == Default { + config.Type = Ecdsa + } + + switch config.Type { + case Ecdsa: + return newEcdsaKeyPair(config) + case Rsa: + return newRsaKeyPair(config) + } + + return KeyPair{}, fmt.Errorf("Unable to generate new key pair, type %s is not supported", + config.Type.String()) +} + +// newEcdsaKeyPair returns a new ECDSA SSH key pair. +func newEcdsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) { + var curve elliptic.Curve + + switch config.Bits { + case 0: + config.Bits = 521 + fallthrough + case 521: + curve = elliptic.P521() + case 384: + curve = elliptic.P384() + case 256: + curve = elliptic.P256() + case 224: + // Not supported by "golang.org/x/crypto/ssh". + return KeyPair{}, fmt.Errorf("golang.org/x/crypto/ssh does not support %d bits", config.Bits) + default: + return KeyPair{}, fmt.Errorf("crypto/elliptic does not support %d bits", config.Bits) + } + + privateKey, err := ecdsa.GenerateKey(curve, rand.Reader) + if err != nil { + return KeyPair{}, err + } + + sshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return KeyPair{}, err + } + + privateRaw, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return KeyPair{}, err + } + + privatePem, err := rawPemBlock(&pem.Block{ + Type: "EC PRIVATE KEY", + Headers: nil, + Bytes: privateRaw, + }) + if err != nil { + return KeyPair{}, err + } + + return KeyPair{ + PrivateKeyPemBlock: privatePem, + PublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment), + Comment: config.Comment, + }, nil +} + +// newRsaKeyPair returns a new RSA SSH key pair. +func newRsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) { + if config.Bits == 0 { + config.Bits = defaultRsaBits + } + + privateKey, err := rsa.GenerateKey(rand.Reader, config.Bits) + if err != nil { + return KeyPair{}, err + } + + sshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return KeyPair{}, err + } + + privatePemBlock, err := rawPemBlock(&pem.Block{ + Type: "RSA PRIVATE KEY", + Headers: nil, + Bytes: x509.MarshalPKCS1PrivateKey(privateKey), + }) + if err != nil { + return KeyPair{}, err + } + + return KeyPair{ + PrivateKeyPemBlock: privatePemBlock, + PublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment), + Comment: config.Comment, + }, nil +} + +// CreateKeyPairConfig describes how an SSH key pair should be created. +type CreateKeyPairConfig struct { + // Type describes the key pair's type. + Type KeyPairType + + // Bits represents the key pair's bits of entropy. E.g., 4096 for + // a 4096 bit RSA key pair, or 521 for a ECDSA key pair with a + // 521-bit curve. + Bits int + + // Comment is the resulting key pair's comment. This is typically + // used to identify the key pair's owner in the SSH user's + // 'authorized_keys' file. + Comment string +} + +// rawPemBlock encodes a pem.Block to a slice of bytes. +func rawPemBlock(block *pem.Block) ([]byte, error) { + buffer := bytes.NewBuffer(nil) + + err := pem.Encode(buffer, block) + if err != nil { + return []byte{}, err + } + + return buffer.Bytes(), nil +} + +// authorizedKeysLine serializes key for inclusion in an OpenSSH +// authorized_keys file. The return value ends without newline so +// a comment can be appended to the end. +func authorizedKeysLine(key gossh.PublicKey, comment string) []byte { + marshaledPublicKey := gossh.MarshalAuthorizedKey(key) + + // Remove the mandatory unix new line. Awful, but the go + // ssh library automatically appends a unix new line. + // We remove it so a key comment can be safely appended to the + // end of the string. + marshaledPublicKey = bytes.TrimSpace(marshaledPublicKey) + + if len(strings.TrimSpace(comment)) > 0 { + marshaledPublicKey = append(marshaledPublicKey, ' ') + marshaledPublicKey = append(marshaledPublicKey, comment...) + } + + return marshaledPublicKey +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/ssh.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/ssh.go new file mode 100644 index 000000000..9c330bc27 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/ssh.go @@ -0,0 +1,117 @@ +// Package SSH provides tooling for generating a temporary SSH keypair, and +// provides tooling for connecting to an instance via a tunnel. +package ssh + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "time" + + "golang.org/x/crypto/ssh" +) + +func parseKeyFile(path string) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + keyBytes, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + // We parse the private key on our own first so that we can + // show a nicer error if the private key has a password. + block, _ := pem.Decode(keyBytes) + if block == nil { + return nil, fmt.Errorf( + "Failed to read key '%s': no key found", path) + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return nil, fmt.Errorf( + "Failed to read key '%s': password protected keys are\n"+ + "not supported. Please decrypt the key prior to use.", path) + } + return keyBytes, nil +} + +// FileSigner returns an ssh.Signer for a key file. +func FileSigner(path string) (ssh.Signer, error) { + + keyBytes, err := parseKeyFile(path) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + + signer, err := ssh.ParsePrivateKey(keyBytes) + if err != nil { + return nil, fmt.Errorf("Error setting up SSH config: %s", err) + } + + return signer, nil +} + +func ReadCertificate(certificatePath string, keySigner ssh.Signer) (ssh.Signer, error) { + + if certificatePath == "" { + return keySigner, fmt.Errorf("no certificate file provided") + } + + // Load the certificate + cert, err := ioutil.ReadFile(certificatePath) + if err != nil { + return nil, fmt.Errorf("unable to read certificate file: %v", err) + } + + pk, _, _, _, err := ssh.ParseAuthorizedKey(cert) + if err != nil { + return nil, fmt.Errorf("unable to parse public key: %v", err) + } + + certificate, ok := pk.(*ssh.Certificate) + + if !ok { + return nil, fmt.Errorf("Error loading certificate") + } + + err = checkValidCert(certificate) + + if err != nil { + return nil, fmt.Errorf("%s not a valid cert: %v", certificatePath, err) + } + + certSigner, err := ssh.NewCertSigner(certificate, keySigner) + if err != nil { + return nil, fmt.Errorf("failed to create cert signer: %v", err) + } + + return certSigner, nil +} + +// FileSigner returns an ssh.Signer for a key file. +func FileSignerWithCert(path string, certificatePath string) (ssh.Signer, error) { + + keySigner, err := FileSigner(path) + + if err != nil { + return nil, err + } + return ReadCertificate(certificatePath, keySigner) +} + +func checkValidCert(cert *ssh.Certificate) error { + const CertTimeInfinity = 1<<64 - 1 + unixNow := time.Now().Unix() + + if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { + return fmt.Errorf("ssh: cert is not yet valid") + } + if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { + return fmt.Errorf("ssh: cert has expired") + } + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/tunnel.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/tunnel.go new file mode 100644 index 000000000..7e30f0ef5 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/tunnel.go @@ -0,0 +1,45 @@ +package ssh + +import ( + "fmt" + "net" + "strconv" + "strings" + + "github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh" +) + +// ParseTunnelArgument parses an SSH tunneling argument compatible with the openssh client form. +// Valid formats: +// `port:host:hostport` +// NYI `[bind_address:]port:host:hostport` +func ParseTunnelArgument(forward string, direction ssh.TunnelDirection) (ssh.TunnelSpec, error) { + parts := strings.SplitN(forward, ":", 2) + if len(parts) != 2 { + return ssh.TunnelSpec{}, fmt.Errorf("Error parsing tunnel '%s': %v", forward, parts) + } + listeningPort, forwardingAddr := parts[0], parts[1] + + _, sPort, err := net.SplitHostPort(forwardingAddr) + if err != nil { + return ssh.TunnelSpec{}, fmt.Errorf("Error parsing forwarding, must be a tcp address: %s", err) + } + _, err = strconv.Atoi(sPort) + if err != nil { + return ssh.TunnelSpec{}, fmt.Errorf("Error parsing forwarding port, must be a valid port: %s", err) + } + _, err = strconv.Atoi(listeningPort) + if err != nil { + return ssh.TunnelSpec{}, fmt.Errorf("Error parsing listening port, must be a valid port: %s", err) + } + + return ssh.TunnelSpec{ + Direction: direction, + ForwardAddr: forwardingAddr, + ForwardType: "tcp", + ListenAddr: fmt.Sprintf("localhost:%s", listeningPort), + ListenType: "tcp", + }, nil + // So we parsed all that, and are just going to ignore it now. We would + // have used the information to set the type here. +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/algorithm_enumer.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/algorithm_enumer.go new file mode 100644 index 000000000..1a300735b --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/algorithm_enumer.go @@ -0,0 +1,52 @@ +// Code generated by "enumer -type Algorithm -transform snake"; DO NOT EDIT. + +// +package sshkey + +import ( + "fmt" +) + +const _AlgorithmName = "rsadsaecdsaed25519" + +var _AlgorithmIndex = [...]uint8{0, 3, 6, 11, 18} + +func (i Algorithm) String() string { + if i < 0 || i >= Algorithm(len(_AlgorithmIndex)-1) { + return fmt.Sprintf("Algorithm(%d)", i) + } + return _AlgorithmName[_AlgorithmIndex[i]:_AlgorithmIndex[i+1]] +} + +var _AlgorithmValues = []Algorithm{0, 1, 2, 3} + +var _AlgorithmNameToValueMap = map[string]Algorithm{ + _AlgorithmName[0:3]: 0, + _AlgorithmName[3:6]: 1, + _AlgorithmName[6:11]: 2, + _AlgorithmName[11:18]: 3, +} + +// AlgorithmString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func AlgorithmString(s string) (Algorithm, error) { + if val, ok := _AlgorithmNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Algorithm values", s) +} + +// AlgorithmValues returns all values of the enum +func AlgorithmValues() []Algorithm { + return _AlgorithmValues +} + +// IsAAlgorithm returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Algorithm) IsAAlgorithm() bool { + for _, v := range _AlgorithmValues { + if i == v { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/generate.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/generate.go new file mode 100644 index 000000000..a0f47f62e --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/generate.go @@ -0,0 +1,255 @@ +package sshkey + +import ( + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "fmt" + "io" + "math/big" + + "golang.org/x/crypto/ssh" +) + +type Algorithm int + +//go:generate enumer -type Algorithm -transform snake +const ( + RSA Algorithm = iota + DSA + ECDSA + ED25519 +) + +var ( + ErrUnknownAlgorithm = fmt.Errorf("sshkey: unknown private key algorithm") + ErrInvalidRSAKeySize = fmt.Errorf("sshkey: invalid private key rsa size: must be more than 1024") + ErrInvalidECDSAKeySize = fmt.Errorf("sshkey: invalid private key ecdsa size, must be one of 256, 384 or 521") + ErrInvalidDSAKeySize = fmt.Errorf("sshkey: invalid private key dsa size, must be one of 1024, 2048 or 3072") +) + +// Pair represents an ssh key pair, as in +type Pair struct { + Private []byte + Public []byte +} + +func NewPair(public, private interface{}) (*Pair, error) { + kb, err := x509.MarshalPKCS8PrivateKey(private) + if err != nil { + return nil, err + } + + privBlk := &pem.Block{ + Type: "PRIVATE KEY", + Headers: nil, + Bytes: kb, + } + + publicKey, err := ssh.NewPublicKey(public) + if err != nil { + return nil, err + } + return &Pair{ + Private: pem.EncodeToMemory(privBlk), + Public: ssh.MarshalAuthorizedKey(publicKey), + }, nil +} + +// PairFromED25519 marshalls a valid pair of openssh pem for ED25519 keypairs. +// NewPair can handle ed25519 pairs but generates the wrong format apparently: +// `Load key "id_ed25519": invalid format` is the error that happens when I try +// to ssh with such a key. +func PairFromED25519(public ed25519.PublicKey, private ed25519.PrivateKey) (*Pair, error) { + // see https://github.com/golang/crypto/blob/7f63de1d35b0f77fa2b9faea3e7deb402a2383c8/ssh/keys.go#L1273-L1443 + key := struct { + Pub []byte + Priv []byte + Comment string + Pad []byte `ssh:"rest"` + }{ + Pub: public, + Priv: private, + } + keyBytes := ssh.Marshal(key) + + pk1 := struct { + Check1 uint32 + Check2 uint32 + Keytype string + Rest []byte `ssh:"rest"` + }{ + Keytype: ssh.KeyAlgoED25519, + Rest: keyBytes, + } + pk1Bytes := ssh.Marshal(pk1) + + k := struct { + CipherName string + KdfName string + KdfOpts string + NumKeys uint32 + PubKey []byte + PrivKeyBlock []byte + }{ + CipherName: "none", + KdfName: "none", + KdfOpts: "", + NumKeys: 1, + PrivKeyBlock: pk1Bytes, + } + + const opensshV1Magic = "openssh-key-v1\x00" + + privBlk := &pem.Block{ + Type: "OPENSSH PRIVATE KEY", + Headers: nil, + Bytes: append([]byte(opensshV1Magic), ssh.Marshal(k)...), + } + publicKey, err := ssh.NewPublicKey(public) + if err != nil { + return nil, err + } + return &Pair{ + Private: pem.EncodeToMemory(privBlk), + Public: ssh.MarshalAuthorizedKey(publicKey), + }, nil +} + +// PairFromDSA marshalls a valid pair of openssh pem for dsa keypairs. +// x509.MarshalPKCS8PrivateKey does not know how to deal with dsa keys. +func PairFromDSA(key *dsa.PrivateKey) (*Pair, error) { + // see https://github.com/golang/crypto/blob/7f63de1d35b0f77fa2b9faea3e7deb402a2383c8/ssh/keys.go#L1186-L1195 + // and https://linux.die.net/man/1/dsa + k := struct { + Version int + P *big.Int + Q *big.Int + G *big.Int + Pub *big.Int + Priv *big.Int + }{ + Version: 0, + P: key.P, + Q: key.Q, + G: key.G, + Pub: key.Y, + Priv: key.X, + } + kb, err := asn1.Marshal(k) + if err != nil { + return nil, err + } + privBlk := &pem.Block{ + Type: "DSA PRIVATE KEY", + Headers: nil, + Bytes: kb, + } + publicKey, err := ssh.NewPublicKey(&key.PublicKey) + if err != nil { + return nil, err + } + return &Pair{ + Private: pem.EncodeToMemory(privBlk), + Public: ssh.MarshalAuthorizedKey(publicKey), + }, nil +} + +// GeneratePair generates a Private/Public key pair using algorithm t. +// +// When rand is nil "crypto/rand".Reader will be used. +// +// bits specifies the number of bits in the key to create. For RSA keys, the +// minimum size is 1024 bits and the default is 3072 bits. Generally, 3072 bits +// is considered sufficient. DSA keys must be exactly 1024 bits - or 2 or 3 +// times that - as specified by FIPS 186-2. For ECDSA keys, bits determines the +// key length by selecting from one of three elliptic curve sizes: 256, 384 or +// 521 bits. Attempting to use bit lengths other than these three values for +// ECDSA keys will fail. Ed25519 keys have a fixed length and the bits will +// be ignored. +func GeneratePair(t Algorithm, rand io.Reader, bits int) (*Pair, error) { + if rand == nil { + rand = cryptorand.Reader + } + switch t { + case DSA: + if bits == 0 { + // currently the ssh package can only decode 1024 bits dsa keys, so + // that's going be the default for now see + // https://github.com/golang/crypto/blob/7f63de1d35b0f77fa2b9faea3e7deb402a2383c8/ssh/keys.go#L411-L420 + bits = 1024 + } + var sizes dsa.ParameterSizes + switch bits { + case 1024: + sizes = dsa.L1024N160 + case 2048: + sizes = dsa.L2048N256 + case 3072: + sizes = dsa.L3072N256 + default: + return nil, ErrInvalidDSAKeySize + } + + params := dsa.Parameters{} + if err := dsa.GenerateParameters(¶ms, rand, sizes); err != nil { + return nil, err + } + + dsakey := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: params, + }, + } + if err := dsa.GenerateKey(dsakey, rand); err != nil { + return nil, err + } + return PairFromDSA(dsakey) + case ECDSA: + if bits == 0 { + bits = 521 + } + var ecdsakey *ecdsa.PrivateKey + var err error + switch bits { + case 256: + ecdsakey, err = ecdsa.GenerateKey(elliptic.P256(), rand) + case 384: + ecdsakey, err = ecdsa.GenerateKey(elliptic.P384(), rand) + case 521: + ecdsakey, err = ecdsa.GenerateKey(elliptic.P521(), rand) + default: + ecdsakey, err = nil, ErrInvalidECDSAKeySize + } + if err != nil { + return nil, err + } + return NewPair(&ecdsakey.PublicKey, ecdsakey) + case ED25519: + publicKey, privateKey, err := ed25519.GenerateKey(rand) + if err != nil { + return nil, err + } + return PairFromED25519(publicKey, privateKey) + case RSA: + if bits == 0 { + bits = 4096 + } + if bits < 1024 { + return nil, ErrInvalidRSAKeySize + } + rsakey, err := rsa.GenerateKey(rand, bits) + if err != nil { + return nil, err + } + return NewPair(&rsakey.PublicKey, rsakey) + default: + return nil, ErrUnknownAlgorithm + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect.go new file mode 100644 index 000000000..988123d3c --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect.go @@ -0,0 +1,139 @@ +package communicator + +import ( + "context" + "fmt" + "log" + "time" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/none" + gossh "golang.org/x/crypto/ssh" +) + +// StepConnect is a multistep Step implementation that connects to +// the proper communicator and stores it in the "communicator" key in the +// state bag. +type StepConnect struct { + // Config is the communicator config struct + Config *Config + + // Host should return a host that can be connected to for communicator + // connections. + Host func(multistep.StateBag) (string, error) + + // The fields below are callbacks to assist with connecting to SSH. + // + // SSHConfig should return the default configuration for + // connecting via SSH. + SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) + SSHPort func(multistep.StateBag) (int, error) + + // The fields below are callbacks to assist with connecting to WinRM. + // + // WinRMConfig should return the default configuration for + // connecting via WinRM. + WinRMConfig func(multistep.StateBag) (*WinRMConfig, error) + WinRMPort func(multistep.StateBag) (int, error) + + // CustomConnect can be set to have custom connectors for specific + // types. These take highest precedence so you can also override + // existing types. + CustomConnect map[string]multistep.Step + + substep multistep.Step +} + +func (s *StepConnect) pause(pauseLen time.Duration, ctx context.Context) bool { + // Use a select to determine if we get cancelled during the wait + select { + case <-ctx.Done(): + return true + case <-time.After(pauseLen): + } + log.Printf("Pause over; connecting...") + return false +} + +func (s *StepConnect) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packersdk.Ui) + + typeMap := map[string]multistep.Step{ + "none": nil, + "ssh": &StepConnectSSH{ + Config: s.Config, + Host: s.Host, + SSHConfig: s.SSHConfig, + SSHPort: s.SSHPort, + }, + "winrm": &StepConnectWinRM{ + Config: s.Config, + Host: s.Host, + WinRMConfig: s.WinRMConfig, + WinRMPort: s.WinRMPort, + }, + } + for k, v := range s.CustomConnect { + typeMap[k] = v + } + + step, ok := typeMap[s.Config.Type] + if !ok { + state.Put("error", fmt.Errorf("unknown communicator type: %s", s.Config.Type)) + return multistep.ActionHalt + } + + if step == nil { + if comm, err := none.New("none"); err != nil { + err := fmt.Errorf("Failed to set communicator 'none': %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + + } else { + state.Put("communicator", comm) + log.Printf("[INFO] communicator disabled, will not connect") + } + return multistep.ActionContinue + } + + if host, err := s.Host(state); err == nil { + ui.Say(fmt.Sprintf("Using %s communicator to connect: %s", s.Config.Type, host)) + } else { + log.Printf("[DEBUG] Unable to get address during connection step: %s", err) + } + + s.substep = step + action := s.substep.Run(ctx, state) + if action == multistep.ActionHalt { + return action + } + + if s.Config.PauseBeforeConnect > 0 { + ui.Say(fmt.Sprintf("Pausing %s before connecting...", + s.Config.PauseBeforeConnect.String())) + cancelled := s.pause(s.Config.PauseBeforeConnect, ctx) + if cancelled { + return multistep.ActionHalt + } + // After pause is complete, re-run the connect substep to make sure + // you've connected properly + action := s.substep.Run(ctx, state) + if action == multistep.ActionHalt { + return action + } + } + + // Put communicator config into state so we can pass it to provisioners + // for specialized interpolation later + state.Put("communicator_config", s.Config) + + return multistep.ActionContinue +} + +func (s *StepConnect) Cleanup(state multistep.StateBag) { + if s.substep != nil { + s.substep.Cleanup(state) + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_ssh.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_ssh.go new file mode 100644 index 000000000..7c6ed150f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_ssh.go @@ -0,0 +1,320 @@ +package communicator + +import ( + "context" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "strings" + "time" + + "golang.org/x/crypto/ssh/terminal" + + helperssh "github.com/hashicorp/packer-plugin-sdk/communicator/ssh" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/pathing" + "github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh" + gossh "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" + "golang.org/x/net/proxy" +) + +// StepConnectSSH is a step that only connects to SSH. +// +// In general, you should use StepConnect. +type StepConnectSSH struct { + // All the fields below are documented on StepConnect + Config *Config + Host func(multistep.StateBag) (string, error) + SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error) + SSHPort func(multistep.StateBag) (int, error) +} + +func (s *StepConnectSSH) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packersdk.Ui) + + var comm packersdk.Communicator + var err error + + subCtx, cancel := context.WithCancel(ctx) + waitDone := make(chan bool, 1) + go func() { + ui.Say("Waiting for SSH to become available...") + comm, err = s.waitForSSH(state, subCtx) + cancel() // just to make 'possible context leak' analysis happy + waitDone <- true + }() + + log.Printf("[INFO] Waiting for SSH, up to timeout: %s", s.Config.SSHTimeout) + timeout := time.After(s.Config.SSHTimeout) + for { + // Wait for either SSH to become available, a timeout to occur, + // or an interrupt to come through. + select { + case <-waitDone: + if err != nil { + ui.Error(fmt.Sprintf("Error waiting for SSH: %s", err)) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say("Connected to SSH!") + state.Put("communicator", comm) + return multistep.ActionContinue + case <-timeout: + err := fmt.Errorf("Timeout waiting for SSH.") + state.Put("error", err) + ui.Error(err.Error()) + cancel() + return multistep.ActionHalt + case <-ctx.Done(): + // The step sequence was cancelled, so cancel waiting for SSH + // and just start the halting process. + cancel() + log.Println("[WARN] Interrupt detected, quitting waiting for SSH.") + return multistep.ActionHalt + case <-time.After(1 * time.Second): + } + } +} + +func (s *StepConnectSSH) Cleanup(multistep.StateBag) { +} + +func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, ctx context.Context) (packersdk.Communicator, error) { + // Determine if we're using a bastion host, and if so, retrieve + // that configuration. This configuration doesn't change so we + // do this one before entering the retry loop. + var bProto, bAddr string + var bConf *gossh.ClientConfig + var pAddr string + var pAuth *proxy.Auth + if s.Config.SSHBastionHost != "" { + // The protocol is hardcoded for now, but may be configurable one day + bProto = "tcp" + bAddr = fmt.Sprintf( + "%s:%d", s.Config.SSHBastionHost, s.Config.SSHBastionPort) + + conf, err := sshBastionConfig(s.Config) + if err != nil { + return nil, fmt.Errorf("Error configuring bastion: %s", err) + } + bConf = conf + } + + if s.Config.SSHProxyHost != "" { + pAddr = fmt.Sprintf("%s:%d", s.Config.SSHProxyHost, s.Config.SSHProxyPort) + if s.Config.SSHProxyUsername != "" { + pAuth = new(proxy.Auth) + pAuth.User = s.Config.SSHProxyUsername + pAuth.Password = s.Config.SSHProxyPassword + } + + } + + handshakeAttempts := 0 + + var comm packersdk.Communicator + first := true + for { + // Don't check for cancel or wait on first iteration + if !first { + select { + case <-ctx.Done(): + log.Println("[DEBUG] SSH wait cancelled. Exiting loop.") + return nil, errors.New("SSH wait cancelled") + case <-time.After(5 * time.Second): + } + } + first = false + + // First we request the TCP connection information + host, err := s.Host(state) + if err != nil { + log.Printf("[DEBUG] Error getting SSH address: %s", err) + continue + } + // store host and port in config so we can access them from provisioners + s.Config.SSHHost = host + port := s.Config.SSHPort + if s.SSHPort != nil { + port, err = s.SSHPort(state) + if err != nil { + log.Printf("[DEBUG] Error getting SSH port: %s", err) + continue + } + s.Config.SSHPort = port + } + state.Put("communicator_config", s.Config) + + // Retrieve the SSH configuration + sshConfig, err := s.SSHConfig(state) + if err != nil { + log.Printf("[DEBUG] Error getting SSH config: %s", err) + continue + } + + // Attempt to connect to SSH port + var connFunc func() (net.Conn, error) + address := fmt.Sprintf("%s:%d", host, port) + if bAddr != "" { + // We're using a bastion host, so use the bastion connfunc + connFunc = ssh.BastionConnectFunc( + bProto, bAddr, bConf, "tcp", address) + } else if pAddr != "" { + // Connect via SOCKS5 proxy + connFunc = ssh.ProxyConnectFunc(pAddr, pAuth, "tcp", address) + } else { + // No bastion host, connect directly + connFunc = ssh.ConnectFunc("tcp", address) + } + + nc, err := connFunc() + if err != nil { + log.Printf("[DEBUG] TCP connection to SSH ip/port failed: %s", err) + continue + } + nc.Close() + + // Parse out all the requested Port Tunnels that will go over our SSH connection + var tunnels []ssh.TunnelSpec + for _, v := range s.Config.SSHLocalTunnels { + t, err := helperssh.ParseTunnelArgument(v, ssh.LocalTunnel) + if err != nil { + return nil, fmt.Errorf( + "Error parsing port forwarding: %s", err) + } + tunnels = append(tunnels, t) + } + for _, v := range s.Config.SSHRemoteTunnels { + t, err := helperssh.ParseTunnelArgument(v, ssh.RemoteTunnel) + if err != nil { + return nil, fmt.Errorf( + "Error parsing port forwarding: %s", err) + } + tunnels = append(tunnels, t) + } + + // Then we attempt to connect via SSH + config := &ssh.Config{ + Connection: connFunc, + SSHConfig: sshConfig, + Pty: s.Config.SSHPty, + DisableAgentForwarding: s.Config.SSHDisableAgentForwarding, + UseSftp: s.Config.SSHFileTransferMethod == "sftp", + KeepAliveInterval: s.Config.SSHKeepAliveInterval, + Timeout: s.Config.SSHReadWriteTimeout, + Tunnels: tunnels, + } + + log.Printf("[INFO] Attempting SSH connection to %s...", address) + comm, err = ssh.New(address, config) + if err != nil { + log.Printf("[DEBUG] SSH handshake err: %s", err) + + // Only count this as an attempt if we were able to attempt + // to authenticate. Note this is very brittle since it depends + // on the string of the error... but I don't see any other way. + if strings.Contains(err.Error(), "authenticate") { + log.Printf( + "[DEBUG] Detected authentication error. Increasing handshake attempts.") + err = fmt.Errorf("Packer experienced an authentication error "+ + "when trying to connect via SSH. This can happen if your "+ + "username/password are wrong. You may want to double-check"+ + " your credentials as part of your debugging process. "+ + "original error: %s", + err) + handshakeAttempts += 1 + } + + if handshakeAttempts < s.Config.SSHHandshakeAttempts { + // Try to connect via SSH a handful of times. We sleep here + // so we don't get a ton of authentication errors back to back. + time.Sleep(2 * time.Second) + continue + } + + return nil, err + } + + break + } + + return comm, nil +} + +func sshBastionConfig(config *Config) (*gossh.ClientConfig, error) { + auth := make([]gossh.AuthMethod, 0, 2) + + if config.SSHBastionInteractive { + var c io.ReadWriteCloser + if terminal.IsTerminal(int(os.Stdin.Fd())) { + c = os.Stdin + } else { + tty, err := os.Open("/dev/tty") + if err != nil { + return nil, err + } + defer tty.Close() + c = tty + } + auth = append(auth, gossh.KeyboardInteractive(ssh.KeyboardInteractive(c))) + } + + if config.SSHBastionPassword != "" { + auth = append(auth, + gossh.Password(config.SSHBastionPassword), + gossh.KeyboardInteractive( + ssh.PasswordKeyboardInteractive(config.SSHBastionPassword))) + } + + if config.SSHBastionPrivateKeyFile != "" { + path, err := pathing.ExpandUser(config.SSHBastionPrivateKeyFile) + if err != nil { + return nil, fmt.Errorf( + "Error expanding path for SSH bastion private key: %s", err) + } + + if config.SSHBastionCertificateFile != "" { + identityPath, err := pathing.ExpandUser(config.SSHBastionCertificateFile) + if err != nil { + return nil, fmt.Errorf("Error expanding path for SSH bastion identity certificate: %s", err) + } + signer, err := helperssh.FileSignerWithCert(path, identityPath) + if err != nil { + return nil, err + } + auth = append(auth, gossh.PublicKeys(signer)) + } else { + signer, err := helperssh.FileSigner(path) + if err != nil { + return nil, err + } + auth = append(auth, gossh.PublicKeys(signer)) + } + } + + if config.SSHBastionAgentAuth { + authSock := os.Getenv("SSH_AUTH_SOCK") + if authSock == "" { + return nil, fmt.Errorf("SSH_AUTH_SOCK is not set") + } + + sshAgent, err := net.Dial("unix", authSock) + if err != nil { + return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err) + } + + auth = append(auth, gossh.PublicKeysCallback(agent.NewClient(sshAgent).Signers)) + } + + return &gossh.ClientConfig{ + User: config.SSHBastionUsername, + Auth: auth, + HostKeyCallback: gossh.InsecureIgnoreHostKey(), + }, nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_winrm.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_winrm.go new file mode 100644 index 000000000..015a7ab74 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_winrm.go @@ -0,0 +1,239 @@ +package communicator + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "log" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm" + winrmcmd "github.com/masterzen/winrm" + "golang.org/x/net/http/httpproxy" +) + +// StepConnectWinRM is a multistep Step implementation that waits for WinRM +// to become available. It gets the connection information from a single +// configuration when creating the step. +// +// Uses: +// ui packersdk.Ui +// +// Produces: +// communicator packersdk.Communicator +type StepConnectWinRM struct { + // All the fields below are documented on StepConnect + Config *Config + Host func(multistep.StateBag) (string, error) + WinRMConfig func(multistep.StateBag) (*WinRMConfig, error) + WinRMPort func(multistep.StateBag) (int, error) +} + +func (s *StepConnectWinRM) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packersdk.Ui) + + var comm packersdk.Communicator + var err error + + subCtx, cancel := context.WithCancel(ctx) + waitDone := make(chan bool, 1) + go func() { + ui.Say("Waiting for WinRM to become available...") + comm, err = s.waitForWinRM(state, subCtx) + cancel() // just to make 'possible context leak' analysis happy + waitDone <- true + }() + + log.Printf("Waiting for WinRM, up to timeout: %s", s.Config.WinRMTimeout) + timeout := time.After(s.Config.WinRMTimeout) + for { + // Wait for either WinRM to become available, a timeout to occur, + // or an interrupt to come through. + select { + case <-waitDone: + if err != nil { + ui.Error(fmt.Sprintf("Error waiting for WinRM: %s", err)) + return multistep.ActionHalt + } + + ui.Say("Connected to WinRM!") + state.Put("communicator", comm) + return multistep.ActionContinue + case <-timeout: + err := fmt.Errorf("Timeout waiting for WinRM.") + state.Put("error", err) + ui.Error(err.Error()) + cancel() + return multistep.ActionHalt + case <-ctx.Done(): + // The step sequence was cancelled, so cancel waiting for WinRM + // and just start the halting process. + cancel() + log.Println("Interrupt detected, quitting waiting for WinRM.") + return multistep.ActionHalt + case <-time.After(1 * time.Second): + } + } +} + +func (s *StepConnectWinRM) Cleanup(multistep.StateBag) { +} + +func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, ctx context.Context) (packersdk.Communicator, error) { + var comm packersdk.Communicator + first := true + for { + // Don't check for cancel or wait on first iteration + if !first { + select { + case <-ctx.Done(): + log.Println("[INFO] WinRM wait cancelled. Exiting loop.") + return nil, errors.New("WinRM wait cancelled") + case <-time.After(5 * time.Second): + } + } + first = false + + host, err := s.Host(state) + if err != nil { + log.Printf("[DEBUG] Error getting WinRM host: %s", err) + continue + } + s.Config.WinRMHost = host + + port := s.Config.WinRMPort + if s.WinRMPort != nil { + port, err = s.WinRMPort(state) + if err != nil { + log.Printf("[DEBUG] Error getting WinRM port: %s", err) + continue + } + s.Config.WinRMPort = port + } + + state.Put("communicator_config", s.Config) + + user := s.Config.WinRMUser + password := s.Config.WinRMPassword + if s.WinRMConfig != nil { + config, err := s.WinRMConfig(state) + if err != nil { + log.Printf("[DEBUG] Error getting WinRM config: %s", err) + continue + } + + if config.Username != "" { + user = config.Username + } + if config.Password != "" { + password = config.Password + s.Config.WinRMPassword = password + } + } + + if s.Config.WinRMNoProxy { + if err := setNoProxy(host, port); err != nil { + return nil, fmt.Errorf("Error setting no_proxy: %s", err) + } + s.Config.WinRMTransportDecorator = ProxyTransportDecorator + } + + log.Println("[INFO] Attempting WinRM connection...") + comm, err = winrm.New(&winrm.Config{ + Host: host, + Port: port, + Username: user, + Password: password, + Timeout: s.Config.WinRMTimeout, + Https: s.Config.WinRMUseSSL, + Insecure: s.Config.WinRMInsecure, + TransportDecorator: s.Config.WinRMTransportDecorator, + }) + if err != nil { + log.Printf("[ERROR] WinRM connection err: %s", err) + continue + } + + break + } + // run an "echo" command to make sure winrm is actually connected before moving on. + var connectCheckCommand = winrmcmd.Powershell(`if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'}; echo "WinRM connected."`) + var retryableSleep = 5 * time.Second + // run an "echo" command to make sure that the winrm is connected + for { + cmd := &packersdk.RemoteCmd{Command: connectCheckCommand} + var buf, buf2 bytes.Buffer + cmd.Stdout = &buf + cmd.Stdout = io.MultiWriter(cmd.Stdout, &buf2) + select { + case <-ctx.Done(): + log.Println("WinRM wait canceled, exiting loop") + return comm, fmt.Errorf("WinRM wait canceled") + case <-time.After(retryableSleep): + } + + log.Printf("Checking that WinRM is connected with: '%s'", connectCheckCommand) + ui := state.Get("ui").(packersdk.Ui) + err := cmd.RunWithUi(ctx, comm, ui) + + if err != nil { + log.Printf("Communication connection err: %s", err) + continue + } + + log.Printf("Connected to machine") + stdoutToRead := buf2.String() + if !strings.Contains(stdoutToRead, "WinRM connected.") { + log.Printf("echo didn't succeed; retrying...") + continue + } + break + } + + return comm, nil +} + +// setNoProxy configures the $NO_PROXY env var +func setNoProxy(host string, port int) error { + current := os.Getenv("NO_PROXY") + p := fmt.Sprintf("%s:%d", host, port) + if current == "" { + return os.Setenv("NO_PROXY", p) + } + if !strings.Contains(current, p) { + return os.Setenv("NO_PROXY", strings.Join([]string{current, p}, ",")) + } + return nil +} + +// The net/http ProxyFromEnvironment only loads the environment once, when the +// code is initialized rather than when it's executed. This means that if your +// wrapping code sets the NO_PROXY env var (as Packer does!), it will be +// ignored. Re-loading the environment vars is more expensive, but it is the +// easiest way to work around this limitation. +func RefreshProxyFromEnvironment(req *http.Request) (*url.URL, error) { + return envProxyFunc()(req.URL) +} + +func envProxyFunc() func(*url.URL) (*url.URL, error) { + envProxyFuncValue := httpproxy.FromEnvironment().ProxyFunc() + return envProxyFuncValue +} + +// ProxyTransportDecorator is a custom Transporter that reloads HTTP Proxy settings at client runtime. +// The net/http ProxyFromEnvironment only loads the environment once, when the +// code is initialized rather than when it's executed. This means that if your +// wrapping code sets the NO_PROXY env var (as Packer does!), it will be +// ignored. Re-loading the environment vars is more expensive, but it is the +// easiest way to work around this limitation. +func ProxyTransportDecorator() winrmcmd.Transporter { + return winrmcmd.NewClientWithProxyFunc(RefreshProxyFromEnvironment) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_debug_ssh_keys.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_debug_ssh_keys.go new file mode 100644 index 000000000..baf58a0cc --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_debug_ssh_keys.go @@ -0,0 +1,33 @@ +package communicator + +import ( + "context" + "fmt" + "io/ioutil" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepDumpSSHKey is a multistep Step implementation that writes the ssh +// keypair somewhere. +type StepDumpSSHKey struct { + Path string + SSH *SSH +} + +func (s *StepDumpSSHKey) Run(_ context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packersdk.Ui) + + ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.Path)) + + err := ioutil.WriteFile(s.Path, s.SSH.SSHPrivateKey, 0700) + if err != nil { + state.Put("error", fmt.Errorf("Error saving debug key: %s", err)) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *StepDumpSSHKey) Cleanup(state multistep.StateBag) {} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_ssh_keygen.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_ssh_keygen.go new file mode 100644 index 000000000..9c94ea9c7 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_ssh_keygen.go @@ -0,0 +1,65 @@ +package communicator + +import ( + "context" + "fmt" + + "github.com/hashicorp/packer-plugin-sdk/communicator/sshkey" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepSSHKeyGen is a Packer build step that generates SSH key pairs. +type StepSSHKeyGen struct { + CommConf *Config + SSHTemporaryKeyPair +} + +// Run executes the Packer build step that generates SSH key pairs. +// The key pairs are added to the ssh config +func (s *StepSSHKeyGen) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packersdk.Ui) + comm := s.CommConf + + if comm.SSHPrivateKeyFile != "" { + ui.Say("Using existing SSH private key") + privateKeyBytes, err := comm.ReadSSHPrivateKeyFile() + if err != nil { + state.Put("error", err) + return multistep.ActionHalt + } + + comm.SSHPrivateKey = privateKeyBytes + comm.SSHPublicKey = nil + + return multistep.ActionContinue + } + + algorithm := s.SSHTemporaryKeyPair.SSHTemporaryKeyPairType + if algorithm == "" { + algorithm = sshkey.RSA.String() + } + a, err := sshkey.AlgorithmString(algorithm) + if err != nil { + err := fmt.Errorf("%w: possible algorithm types are `dsa` | `ecdsa` | `ed25519` | `rsa` ( the default )", err) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say(fmt.Sprintf("Creating temporary %s SSH key for instance...", a.String())) + pair, err := sshkey.GeneratePair(a, nil, s.SSHTemporaryKeyPairBits) + if err != nil { + err := fmt.Errorf("Error creating temporary ssh key: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + comm.SSHPrivateKey = pair.Private + comm.SSHPublicKey = pair.Public + + return multistep.ActionContinue +} + +// Nothing to clean up. SSH keys are associated with a single GCE instance. +func (s *StepSSHKeyGen) Cleanup(state multistep.StateBag) {} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/testing.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/testing.go new file mode 100644 index 000000000..d45a59c3d --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/testing.go @@ -0,0 +1,48 @@ +package communicator + +import ( + "testing" + + "github.com/hashicorp/packer-plugin-sdk/tmp" +) + +func TestPEM(t *testing.T) string { + tf, err := tmp.File("packer") + if err != nil { + t.Fatalf("err: %s", err) + } + tf.Write([]byte(TestPEMContents)) + tf.Close() + + return tf.Name() +} + +const TestPEMContents = ` +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAxd4iamvrwRJvtNDGQSIbNvvIQN8imXTRWlRY62EvKov60vqu +hh+rDzFYAIIzlmrJopvOe0clqmi3mIP9dtkjPFrYflq52a2CF5q+BdwsJXuRHbJW +LmStZUwW1khSz93DhvhmK50nIaczW63u4EO/jJb3xj+wxR1Nkk9bxi3DDsYFt8SN +AzYx9kjlEYQ/+sI4/ATfmdV9h78SVotjScupd9KFzzi76gWq9gwyCBLRynTUWlyD +2UOfJRkOvhN6/jKzvYfVVwjPSfA9IMuooHdScmC4F6KBKJl/zf/zETM0XyzIDNmH +uOPbCiljq2WoRM+rY6ET84EO0kVXbfx8uxUsqQIDAQABAoIBAQCkPj9TF0IagbM3 +5BSs/CKbAWS4dH/D4bPlxx4IRCNirc8GUg+MRb04Xz0tLuajdQDqeWpr6iLZ0RKV +BvreLF+TOdV7DNQ4XE4gSdJyCtCaTHeort/aordL3l0WgfI7mVk0L/yfN1PEG4YG +E9q1TYcyrB3/8d5JwIkjabxERLglCcP+geOEJp+QijbvFIaZR/n2irlKW4gSy6ko +9B0fgUnhkHysSg49ChHQBPQ+o5BbpuLrPDFMiTPTPhdfsvGGcyCGeqfBA56oHcSF +K02Fg8OM+Bd1lb48LAN9nWWY4WbwV+9bkN3Ym8hO4c3a/Dxf2N7LtAQqWZzFjvM3 +/AaDvAgBAoGBAPLD+Xn1IYQPMB2XXCXfOuJewRY7RzoVWvMffJPDfm16O7wOiW5+ +2FmvxUDayk4PZy6wQMzGeGKnhcMMZTyaq2g/QtGfrvy7q1Lw2fB1VFlVblvqhoJa +nMJojjC4zgjBkXMHsRLeTmgUKyGs+fdFbfI6uejBnnf+eMVUMIdJ+6I9AoGBANCn +kWO9640dttyXURxNJ3lBr2H3dJOkmD6XS+u+LWqCSKQe691Y/fZ/ZL0Oc4Mhy7I6 +hsy3kDQ5k2V0fkaNODQIFJvUqXw2pMewUk8hHc9403f4fe9cPrL12rQ8WlQw4yoC +v2B61vNczCCUDtGxlAaw8jzSRaSI5s6ax3K7enbdAoGBAJB1WYDfA2CoAQO6y9Sl +b07A/7kQ8SN5DbPaqrDrBdJziBQxukoMJQXJeGFNUFD/DXFU5Fp2R7C86vXT7HIR +v6m66zH+CYzOx/YE6EsUJms6UP9VIVF0Rg/RU7teXQwM01ZV32LQ8mswhTH20o/3 +uqMHmxUMEhZpUMhrfq0isyApAoGAe1UxGTXfj9AqkIVYylPIq2HqGww7+jFmVEj1 +9Wi6S6Sq72ffnzzFEPkIQL/UA4TsdHMnzsYKFPSbbXLIWUeMGyVTmTDA5c0e5XIR +lPhMOKCAzv8w4VUzMnEkTzkFY5JqFCD/ojW57KvDdNZPVB+VEcdxyAW6aKELXMAc +eHLc1nkCgYEApm/motCTPN32nINZ+Vvywbv64ZD+gtpeMNP3CLrbe1X9O+H52AXa +1jCoOldWR8i2bs2NVPcKZgdo6fFULqE4dBX7Te/uYEIuuZhYLNzRO1IKU/YaqsXG +3bfQ8hKYcSnTfE0gPtLDnqCIxTocaGLSHeG3TH9fTw+dA8FvWpUztI4= +-----END RSA PRIVATE KEY----- +` diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/winrm.go b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/winrm.go new file mode 100644 index 000000000..afdf2569d --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/communicator/winrm.go @@ -0,0 +1,8 @@ +package communicator + +// WinRMConfig is configuration that can be returned at runtime to +// dynamically configure WinRM. +type WinRMConfig struct { + Username string + Password string +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/doc.go new file mode 100644 index 000000000..0a89b5030 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/doc.go @@ -0,0 +1,5 @@ +/* +Package filelock makes it easy to create and check file locks for concurrent +processes. +*/ +package filelock diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock.go b/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock.go new file mode 100644 index 000000000..c83816ee6 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock.go @@ -0,0 +1,11 @@ +// +build !solaris + +package filelock + +import "github.com/gofrs/flock" + +type Flock = flock.Flock + +func New(path string) *Flock { + return flock.New(path) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock_solaris.go b/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock_solaris.go new file mode 100644 index 000000000..06685254c --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock_solaris.go @@ -0,0 +1,11 @@ +// build solaris + +package filelock + +// Flock is a noop on solaris for now. +// TODO(azr): PR github.com/gofrs/flock for this. +type Flock = Noop + +func New(string) *Flock { + return &Flock{} +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/noop.go b/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/noop.go new file mode 100644 index 000000000..ebf8f1967 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/filelock/noop.go @@ -0,0 +1,8 @@ +package filelock + +// this lock does nothing +type Noop struct{} + +func (_ *Noop) Lock() (bool, error) { return true, nil } +func (_ *Noop) TryLock() (bool, error) { return true, nil } +func (_ *Noop) Unlock() error { return nil } diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/doc.go new file mode 100644 index 000000000..fb9e6e8c4 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/doc.go @@ -0,0 +1,11 @@ +/* +Package guestexec provides a shim for running common operating system commands +on the guest/remote instance that is being provisioned. It helps provisioners +which need to perform operating-system specific calls do so in a way that is +simple and repeatable. + +Note that to successfully use this package your provisioner must have knowledge +of the guest type, which is not information that builders generally collect -- +your provisioner will have to require guest information in its config. +*/ +package guestexec diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/elevated.go b/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/elevated.go new file mode 100644 index 000000000..1a03a64b2 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/elevated.go @@ -0,0 +1,203 @@ +package guestexec + +import ( + "bytes" + "encoding/xml" + "fmt" + "log" + "strings" + "text/template" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/uuid" +) + +type ElevatedProvisioner interface { + Communicator() packersdk.Communicator + ElevatedUser() string + ElevatedPassword() string +} + +type elevatedOptions struct { + User string + Password string + TaskName string + TaskDescription string + LogFile string + XMLEscapedCommand string + ScriptFile string +} + +var psEscape = strings.NewReplacer( + "$", "`$", + "\"", "`\"", + "`", "``", + "'", "`'", +) + +var elevatedTemplate = template.Must(template.New("ElevatedCommand").Parse(` +$name = "{{.TaskName}}" +$log = [System.Environment]::ExpandEnvironmentVariables("{{.LogFile}}") +$s = New-Object -ComObject "Schedule.Service" +$s.Connect() +$t = $s.NewTask($null) +$xml = [xml]@' + + + + {{.TaskDescription}} + + + + {{.User}} + Password + HighestAvailable + + + + IgnoreNew + false + false + true + false + false + + false + false + + true + true + false + false + false + PT0S + 4 + + + + cmd + /c {{.XMLEscapedCommand}} + + + +'@ +$logon_type = 1 +$password = "{{.Password}}" +if ($password.Length -eq 0) { + $logon_type = 5 + $password = $null + $ns = New-Object System.Xml.XmlNamespaceManager($xml.NameTable) + $ns.AddNamespace("ns", $xml.DocumentElement.NamespaceURI) + $node = $xml.SelectSingleNode("/ns:Task/ns:Principals/ns:Principal/ns:LogonType", $ns) + $node.ParentNode.RemoveChild($node) | Out-Null +} +$t.XmlText = $xml.OuterXml +if (Test-Path variable:global:ProgressPreference){$ProgressPreference="SilentlyContinue"} +$f = $s.GetFolder("\") +$f.RegisterTaskDefinition($name, $t, 6, "{{.User}}", $password, $logon_type, $null) | Out-Null +$t = $f.GetTask("\$name") +$t.Run($null) | Out-Null +$timeout = 10 +$sec = 0 +while ((!($t.state -eq 4)) -and ($sec -lt $timeout)) { + Start-Sleep -s 1 + $sec++ +} + +$line = 0 +do { + Start-Sleep -m 100 + if (Test-Path $log) { + Get-Content $log | select -skip $line | ForEach { + $line += 1 + Write-Output "$_" + } + } +} while (!($t.state -eq 3)) +$result = $t.LastTaskResult +if (Test-Path $log) { + Remove-Item $log -Force -ErrorAction SilentlyContinue | Out-Null +} + +$script = [System.Environment]::ExpandEnvironmentVariables("{{.ScriptFile}}") +if (Test-Path $script) { + Remove-Item $script -Force -ErrorAction SilentlyContinue | Out-Null +} +$f = $s.GetFolder("\") +$f.DeleteTask("\$name", "") + +[System.Runtime.Interopservices.Marshal]::ReleaseComObject($s) | Out-Null +exit $result`)) + +func GenerateElevatedRunner(command string, p ElevatedProvisioner) (uploadedPath string, err error) { + log.Printf("Building elevated command wrapper for: %s", command) + + var buffer bytes.Buffer + + // Output from the elevated command cannot be returned directly to the + // Packer console. In order to be able to view output from elevated + // commands and scripts an indirect approach is used by which the commands + // output is first redirected to file. The output file is then 'watched' + // by Packer while the elevated command is running and any content + // appearing in the file is written out to the console. Below the portion + // of command required to redirect output from the command to file is + // built and appended to the existing command string + taskName := fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID()) + // Only use %ENVVAR% format for environment variables when setting the log + // file path; Do NOT use $env:ENVVAR format as it won't be expanded + // correctly in the elevatedTemplate + logFile := `%SYSTEMROOT%/Temp/` + taskName + ".out" + command += fmt.Sprintf(" > %s 2>&1", logFile) + + // elevatedTemplate wraps the command in a single quoted XML text string + // so we need to escape characters considered 'special' in XML. + err = xml.EscapeText(&buffer, []byte(command)) + if err != nil { + return "", fmt.Errorf("Error escaping characters special to XML in command %s: %s", command, err) + } + escapedCommand := buffer.String() + log.Printf("Command [%s] converted to [%s] for use in XML string", command, escapedCommand) + buffer.Reset() + + // Escape chars special to PowerShell in the ElevatedUser string + elevatedUser := p.ElevatedUser() + escapedElevatedUser := psEscape.Replace(elevatedUser) + if escapedElevatedUser != elevatedUser { + log.Printf("Elevated user %s converted to %s after escaping chars special to PowerShell", + elevatedUser, escapedElevatedUser) + } + + // Escape chars special to PowerShell in the ElevatedPassword string + elevatedPassword := p.ElevatedPassword() + escapedElevatedPassword := psEscape.Replace(elevatedPassword) + if escapedElevatedPassword != elevatedPassword { + log.Printf("Elevated password %s converted to %s after escaping chars special to PowerShell", + elevatedPassword, escapedElevatedPassword) + } + + uuid := uuid.TimeOrderedUUID() + path := fmt.Sprintf(`C:/Windows/Temp/packer-elevated-shell-%s.ps1`, uuid) + + // Generate command + err = elevatedTemplate.Execute(&buffer, elevatedOptions{ + User: escapedElevatedUser, + Password: escapedElevatedPassword, + TaskName: taskName, + TaskDescription: "Packer elevated task", + ScriptFile: path, + LogFile: logFile, + XMLEscapedCommand: escapedCommand, + }) + + if err != nil { + fmt.Printf("Error creating elevated template: %s", err) + return "", err + } + log.Printf("Uploading elevated shell wrapper for command [%s] to [%s]", command, path) + err = p.Communicator().Upload(path, &buffer, nil) + if err != nil { + return "", fmt.Errorf("Error preparing elevated powershell script: %s", err) + } + + return fmt.Sprintf("powershell -executionpolicy bypass -file \"%s\"", path), err +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/guest_commands.go b/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/guest_commands.go new file mode 100644 index 000000000..549ba3ccb --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/guest_commands.go @@ -0,0 +1,86 @@ +package guestexec + +import ( + "fmt" + "strings" +) + +const UnixOSType = "unix" +const WindowsOSType = "windows" +const DefaultOSType = UnixOSType + +type guestOSTypeCommand struct { + chmod string + mkdir string + removeDir string + statPath string + mv string +} + +var guestOSTypeCommands = map[string]guestOSTypeCommand{ + UnixOSType: { + chmod: "chmod %s '%s'", + mkdir: "mkdir -p '%s'", + removeDir: "rm -rf '%s'", + statPath: "stat '%s'", + mv: "mv '%s' '%s'", + }, + WindowsOSType: { + chmod: "echo 'skipping chmod %s %s'", // no-op + mkdir: "powershell.exe -Command \"New-Item -ItemType directory -Force -ErrorAction SilentlyContinue -Path %s\"", + removeDir: "powershell.exe -Command \"rm %s -recurse -force\"", + statPath: "powershell.exe -Command { if (test-path %s) { exit 0 } else { exit 1 } }", + mv: "powershell.exe -Command \"mv %s %s -force\"", + }, +} + +type GuestCommands struct { + GuestOSType string + Sudo bool +} + +func NewGuestCommands(osType string, sudo bool) (*GuestCommands, error) { + _, ok := guestOSTypeCommands[osType] + if !ok { + return nil, fmt.Errorf("Invalid osType: \"%s\"", osType) + } + return &GuestCommands{GuestOSType: osType, Sudo: sudo}, nil +} + +func (g *GuestCommands) Chmod(path string, mode string) string { + return g.sudo(fmt.Sprintf(g.commands().chmod, mode, g.escapePath(path))) +} + +func (g *GuestCommands) CreateDir(path string) string { + return g.sudo(fmt.Sprintf(g.commands().mkdir, g.escapePath(path))) +} + +func (g *GuestCommands) RemoveDir(path string) string { + return g.sudo(fmt.Sprintf(g.commands().removeDir, g.escapePath(path))) +} + +func (g *GuestCommands) commands() guestOSTypeCommand { + return guestOSTypeCommands[g.GuestOSType] +} + +func (g *GuestCommands) escapePath(path string) string { + if g.GuestOSType == WindowsOSType { + return strings.Replace(path, " ", "` ", -1) + } + return path +} + +func (g *GuestCommands) StatPath(path string) string { + return g.sudo(fmt.Sprintf(g.commands().statPath, g.escapePath(path))) +} + +func (g *GuestCommands) MovePath(srcPath string, dstPath string) string { + return g.sudo(fmt.Sprintf(g.commands().mv, g.escapePath(srcPath), g.escapePath(dstPath))) +} + +func (g *GuestCommands) sudo(cmd string) string { + if g.GuestOSType == UnixOSType && g.Sudo { + return "sudo " + cmd + } + return cmd +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/json/unmarshal.go b/vendor/github.com/hashicorp/packer-plugin-sdk/json/unmarshal.go new file mode 100644 index 000000000..cbb27f487 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/json/unmarshal.go @@ -0,0 +1,40 @@ +package json + +import ( + "bytes" + "encoding/json" + "fmt" +) + +// Unmarshal is wrapper around json.Unmarshal that returns user-friendly +// errors when there are syntax errors. +func Unmarshal(data []byte, i interface{}) error { + err := json.Unmarshal(data, i) + if err != nil { + syntaxErr, ok := err.(*json.SyntaxError) + if !ok { + return err + } + + // We have a syntax error. Extract out the line number and friends. + // https://groups.google.com/forum/#!topic/golang-nuts/fizimmXtVfc + newline := []byte{'\x0a'} + + // Calculate the start/end position of the line where the error is + start := bytes.LastIndex(data[:syntaxErr.Offset], newline) + 1 + end := len(data) + if idx := bytes.Index(data[start:], newline); idx >= 0 { + end = start + idx + } + + // Count the line number we're on plus the offset in the line + line := bytes.Count(data[:start], newline) + 1 + pos := int(syntaxErr.Offset) - start - 1 + + err = fmt.Errorf("Error in line %d, char %d: %s\n%s", + line, pos, syntaxErr, data[start:end]) + return err + } + + return nil +} diff --git a/vendor/github.com/dylanmei/winrmtest/LICENSE b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/LICENSE.md similarity index 96% rename from vendor/github.com/dylanmei/winrmtest/LICENSE rename to vendor/github.com/hashicorp/packer-plugin-sdk/multistep/LICENSE.md index aac5c68e7..c9d6b768c 100644 --- a/vendor/github.com/dylanmei/winrmtest/LICENSE +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/LICENSE.md @@ -1,4 +1,4 @@ -Copyright (c) 2014-2015 Dylan Meissner +Copyright (c) 2013 Mitchell Hashimoto MIT License diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/basic_runner.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/basic_runner.go new file mode 100644 index 000000000..308894042 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/basic_runner.go @@ -0,0 +1,82 @@ +package multistep + +import ( + "context" + "sync" + "sync/atomic" +) + +type runState int32 + +const ( + stateIdle runState = iota + stateRunning + stateCancelling +) + +// BasicRunner is a Runner that just runs the given slice of steps. +type BasicRunner struct { + // Steps is a slice of steps to run. Once set, this should _not_ be + // modified. + Steps []Step + + l sync.Mutex + state runState +} + +func (b *BasicRunner) Run(ctx context.Context, state StateBag) { + + b.l.Lock() + if b.state != stateIdle { + panic("already running") + } + + doneCh := make(chan struct{}) + b.state = stateRunning + b.l.Unlock() + + defer func() { + b.l.Lock() + b.state = stateIdle + close(doneCh) + b.l.Unlock() + }() + + // This goroutine listens for cancels and puts the StateCancelled key + // as quickly as possible into the state bag to mark it. + go func() { + select { + case <-ctx.Done(): + state.Put(StateCancelled, true) + case <-doneCh: + } + }() + + for _, step := range b.Steps { + if step == nil { + continue + } + if err := ctx.Err(); err != nil { + state.Put(StateCancelled, true) + break + } + // We also check for cancellation here since we can't be sure + // the goroutine that is running to set it actually ran. + if runState(atomic.LoadInt32((*int32)(&b.state))) == stateCancelling { + state.Put(StateCancelled, true) + break + } + + action := step.Run(ctx, state) + defer step.Cleanup(state) + + if _, ok := state.GetOk(StateCancelled); ok { + break + } + + if action == ActionHalt { + state.Put(StateHalted, true) + break + } + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/doc.go new file mode 100644 index 000000000..d3f2717fe --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/doc.go @@ -0,0 +1,15 @@ +/* +The commonsteps package contains the multistep runner that comprises the main +architectural convention of Packer builder plugins. It enables builders +to respect global Packer flags like "on-error" and "debug". It also contains +a selection of convenience "multistep" steps that perform globally relevant +tasks that many or most builders will want to implement -- for example, +launching Packer's internal HTTP server for serving files to the instance. + +It also provides step_provision, which contains the hooks necessary for allowing +provisioners to run inside your builder. + +While it is possible to create a simple builder without using the multistep +runner or step_provision, your builder will lack core Packer functionality. +*/ +package commonsteps diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/extra_iso_config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/extra_iso_config.go new file mode 100644 index 000000000..04692924d --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/extra_iso_config.go @@ -0,0 +1,99 @@ +//go:generate struct-markdown + +package commonsteps + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" +) + +// An iso (CD) containing custom files can be made available for your build. +// +// By default, no extra CD will be attached. All files listed in this setting +// get placed into the root directory of the CD and the CD is attached as the +// second CD device. +// +// This config exists to work around modern operating systems that have no +// way to mount floppy disks, which was our previous go-to for adding files at +// boot time. +type CDConfig struct { + // A list of files to place onto a CD that is attached when the VM is + // booted. This can include either files or directories; any directories + // will be copied onto the CD recursively, preserving directory structure + // hierarchy. Symlinks will have the link's target copied into the directory + // tree on the CD where the symlink was. File globbing is allowed. + // + // Usage example (JSON): + // + // ```json + // "cd_files": ["./somedirectory/meta-data", "./somedirectory/user-data"], + // "cd_label": "cidata", + // ``` + // + // Usage example (HCL): + // + // ```hcl + // cd_files = ["./somedirectory/meta-data", "./somedirectory/user-data"] + // cd_label = "cidata" + // ``` + // + // The above will create a CD with two files, user-data and meta-data in the + // CD root. This specific example is how you would create a CD that can be + // used for an Ubuntu 20.04 autoinstall. + // + // Since globbing is also supported, + // + // ```hcl + // cd_files = ["./somedirectory/*"] + // cd_label = "cidata" + // ``` + // + // Would also be an acceptable way to define the above cd. The difference + // between providing the directory with or without the glob is whether the + // directory itself or its contents will be at the CD root. + // + // Use of this option assumes that you have a command line tool installed + // that can handle the iso creation. Packer will use one of the following + // tools: + // + // * xorriso + // * mkisofs + // * hdiutil (normally found in macOS) + // * oscdimg (normally found in Windows as part of the Windows ADK) + CDFiles []string `mapstructure:"cd_files"` + CDLabel string `mapstructure:"cd_label"` +} + +func (c *CDConfig) Prepare(ctx *interpolate.Context) []error { + var errs []error + var err error + + if c.CDFiles == nil { + c.CDFiles = make([]string, 0) + } + + // Create new file list based on globbing. + var files []string + for _, path := range c.CDFiles { + if strings.ContainsAny(path, "*?[") { + var globbedFiles []string + globbedFiles, err = filepath.Glob(path) + if len(globbedFiles) > 0 { + files = append(files, globbedFiles...) + } + } else { + _, err = os.Stat(path) + files = append(files, path) + } + if err != nil { + errs = append(errs, fmt.Errorf("Bad CD disk file '%s': %s", path, err)) + } + c.CDFiles = files + } + + return errs +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/floppy_config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/floppy_config.go new file mode 100644 index 000000000..e2ec86ae7 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/floppy_config.go @@ -0,0 +1,74 @@ +//go:generate struct-markdown + +package commonsteps + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" +) + +// A floppy can be made available for your build. This is most useful for +// unattended Windows installs, which look for an Autounattend.xml file on +// removable media. By default, no floppy will be attached. All files listed in +// this setting get placed into the root directory of the floppy and the floppy +// is attached as the first floppy device. The summary size of the listed files +// must not exceed 1.44 MB. The supported ways to move large files into the OS +// are using `http_directory` or [the file +// provisioner](/docs/provisioners/file). +type FloppyConfig struct { + // A list of files to place onto a floppy disk that is attached when the VM + // is booted. Currently, no support exists for creating sub-directories on + // the floppy. Wildcard characters (\\*, ?, and \[\]) are allowed. Directory + // names are also allowed, which will add all the files found in the + // directory to the floppy. + FloppyFiles []string `mapstructure:"floppy_files"` + // A list of directories to place onto the floppy disk recursively. This is + // similar to the `floppy_files` option except that the directory structure + // is preserved. This is useful for when your floppy disk includes drivers + // or if you just want to organize it's contents as a hierarchy. Wildcard + // characters (\\*, ?, and \[\]) are allowed. The maximum summary size of + // all files in the listed directories are the same as in `floppy_files`. + FloppyDirectories []string `mapstructure:"floppy_dirs"` + FloppyLabel string `mapstructure:"floppy_label"` +} + +func (c *FloppyConfig) Prepare(ctx *interpolate.Context) []error { + var errs []error + var err error + + if c.FloppyFiles == nil { + c.FloppyFiles = make([]string, 0) + } + + for _, path := range c.FloppyFiles { + if strings.ContainsAny(path, "*?[") { + _, err = filepath.Glob(path) + } else { + _, err = os.Stat(path) + } + if err != nil { + errs = append(errs, fmt.Errorf("Bad Floppy disk file '%s': %s", path, err)) + } + } + + if c.FloppyDirectories == nil { + c.FloppyDirectories = make([]string, 0) + } + + for _, path := range c.FloppyDirectories { + if strings.ContainsAny(path, "*?[") { + _, err = filepath.Glob(path) + } else { + _, err = os.Stat(path) + } + if err != nil { + errs = append(errs, fmt.Errorf("Bad Floppy disk directory '%s': %s", path, err)) + } + } + + return errs +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/http_config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/http_config.go new file mode 100644 index 000000000..21fee8dd6 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/http_config.go @@ -0,0 +1,70 @@ +//go:generate struct-markdown + +package commonsteps + +import ( + "errors" + + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" +) + +// Packer will create an http server serving `http_directory` when it is set, a +// random free port will be selected and the architecture of the directory +// referenced will be available in your builder. +// +// Example usage from a builder: +// +// `wget http://{{ .HTTPIP }}:{{ .HTTPPort }}/foo/bar/preseed.cfg` +type HTTPConfig struct { + // Path to a directory to serve using an HTTP server. The files in this + // directory will be available over HTTP that will be requestable from the + // virtual machine. This is useful for hosting kickstart files and so on. + // By default this is an empty string, which means no HTTP server will be + // started. The address and port of the HTTP server will be available as + // variables in `boot_command`. This is covered in more detail below. + HTTPDir string `mapstructure:"http_directory"` + // These are the minimum and maximum port to use for the HTTP server + // started to serve the `http_directory`. Because Packer often runs in + // parallel, Packer will choose a randomly available port in this range to + // run the HTTP server. If you want to force the HTTP server to be on one + // port, make this minimum and maximum port the same. By default the values + // are `8000` and `9000`, respectively. + HTTPPortMin int `mapstructure:"http_port_min"` + HTTPPortMax int `mapstructure:"http_port_max"` + // This is the bind address for the HTTP server. Defaults to 0.0.0.0 so that + // it will work with any network interface. + HTTPAddress string `mapstructure:"http_bind_address"` + // This is the bind interface for the HTTP server. Defaults to the first + // interface with a non-loopback address. Either `http_bind_address` or + // `http_interface` can be specified. + HTTPInterface string `mapstructure:"http_interface" undocumented:"true"` +} + +func (c *HTTPConfig) Prepare(ctx *interpolate.Context) []error { + // Validation + var errs []error + + if c.HTTPPortMin == 0 { + c.HTTPPortMin = 8000 + } + + if c.HTTPPortMax == 0 { + c.HTTPPortMax = 9000 + } + + if c.HTTPAddress == "" { + c.HTTPAddress = "0.0.0.0" + } + + if c.HTTPPortMin > c.HTTPPortMax { + errs = append(errs, + errors.New("http_port_min must be less than http_port_max")) + } + + if c.HTTPInterface != "" && c.HTTPAddress == "0.0.0.0" { + errs = append(errs, + errors.New("either http_interface of http_bind_address can be specified")) + } + + return errs +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/iso_config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/iso_config.go new file mode 100644 index 000000000..ae54a3f7f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/iso_config.go @@ -0,0 +1,193 @@ +//go:generate struct-markdown + +package commonsteps + +import ( + "context" + "errors" + "fmt" + "log" + "os" + "strings" + + getter "github.com/hashicorp/go-getter/v2" + urlhelper "github.com/hashicorp/go-getter/v2/helper/url" + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" +) + +// By default, Packer will symlink, download or copy image files to the Packer +// cache into a "`hash($iso_url+$iso_checksum).$iso_target_extension`" file. +// Packer uses [hashicorp/go-getter](https://github.com/hashicorp/go-getter) in +// file mode in order to perform a download. +// +// go-getter supports the following protocols: +// +// * Local files +// * Git +// * Mercurial +// * HTTP +// * Amazon S3 +// +// Examples: +// go-getter can guess the checksum type based on `iso_checksum` length, and it is +// also possible to specify the checksum type. +// +// In JSON: +// +// ```json +// "iso_checksum": "946a6077af6f5f95a51f82fdc44051c7aa19f9cfc5f737954845a6050543d7c2", +// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso" +// ``` +// +// ```json +// "iso_checksum": "file:ubuntu.org/..../ubuntu-14.04.1-server-amd64.iso.sum", +// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso" +// ``` +// +// ```json +// "iso_checksum": "file://./shasums.txt", +// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso" +// ``` +// +// ```json +// "iso_checksum": "file:./shasums.txt", +// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso" +// ``` +// +// In HCL2: +// +// ```hcl +// iso_checksum = "946a6077af6f5f95a51f82fdc44051c7aa19f9cfc5f737954845a6050543d7c2" +// iso_url = "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso" +// ``` +// +// ```hcl +// iso_checksum = "file:ubuntu.org/..../ubuntu-14.04.1-server-amd64.iso.sum" +// iso_url = "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso" +// ``` +// +// ```hcl +// iso_checksum = "file://./shasums.txt" +// iso_url = "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso" +// ``` +// +// ```hcl +// iso_checksum = "file:./shasums.txt", +// iso_url = "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso" +// ``` +// +type ISOConfig struct { + // The checksum for the ISO file or virtual hard drive file. The type of + // the checksum is specified within the checksum field as a prefix, ex: + // "md5:{$checksum}". The type of the checksum can also be omitted and + // Packer will try to infer it based on string length. Valid values are + // "none", "{$checksum}", "md5:{$checksum}", "sha1:{$checksum}", + // "sha256:{$checksum}", "sha512:{$checksum}" or "file:{$path}". Here is a + // list of valid checksum values: + // * md5:090992ba9fd140077b0661cb75f7ce13 + // * 090992ba9fd140077b0661cb75f7ce13 + // * sha1:ebfb681885ddf1234c18094a45bbeafd91467911 + // * ebfb681885ddf1234c18094a45bbeafd91467911 + // * sha256:ed363350696a726b7932db864dda019bd2017365c9e299627830f06954643f93 + // * ed363350696a726b7932db864dda019bd2017365c9e299627830f06954643f93 + // * file:http://releases.ubuntu.com/20.04/MD5SUMS + // * file:file://./local/path/file.sum + // * file:./local/path/file.sum + // * none + // Although the checksum will not be verified when it is set to "none", + // this is not recommended since these files can be very large and + // corruption does happen from time to time. + ISOChecksum string `mapstructure:"iso_checksum" required:"true"` + // A URL to the ISO containing the installation image or virtual hard drive + // (VHD or VHDX) file to clone. + RawSingleISOUrl string `mapstructure:"iso_url" required:"true"` + // Multiple URLs for the ISO to download. Packer will try these in order. + // If anything goes wrong attempting to download or while downloading a + // single URL, it will move on to the next. All URLs must point to the same + // file (same checksum). By default this is empty and `iso_url` is used. + // Only one of `iso_url` or `iso_urls` can be specified. + ISOUrls []string `mapstructure:"iso_urls"` + // The path where the iso should be saved after download. By default will + // go in the packer cache, with a hash of the original filename and + // checksum as its name. + TargetPath string `mapstructure:"iso_target_path"` + // The extension of the iso file after download. This defaults to `iso`. + TargetExtension string `mapstructure:"iso_target_extension"` +} + +func (c *ISOConfig) Prepare(*interpolate.Context) (warnings []string, errs []error) { + if len(c.ISOUrls) != 0 && c.RawSingleISOUrl != "" { + errs = append( + errs, errors.New("Only one of iso_url or iso_urls must be specified")) + return + } + + if c.RawSingleISOUrl != "" { + // make sure only array is set + c.ISOUrls = append([]string{c.RawSingleISOUrl}, c.ISOUrls...) + c.RawSingleISOUrl = "" + } + + if len(c.ISOUrls) == 0 { + errs = append( + errs, errors.New("One of iso_url or iso_urls must be specified")) + return + } + if c.TargetExtension == "" { + c.TargetExtension = "iso" + } + c.TargetExtension = strings.ToLower(c.TargetExtension) + + // Warnings + if c.ISOChecksum == "none" { + warnings = append(warnings, + "A checksum of 'none' was specified. Since ISO files are so big,\n"+ + "a checksum is highly recommended.") + return warnings, errs + } else if c.ISOChecksum == "" { + errs = append(errs, fmt.Errorf("A checksum must be specified")) + } else { + // ESX5Driver.VerifyChecksum is ran remotely but should not download a + // checksum file, therefore in case it is a file, we need to download + // it now and compute the checksum now, we transform it back to a + // checksum string so that it can be simply read in the VerifyChecksum. + // + // Doing this also has the added benefit of failing early if a checksum + // is incorrect or if getting it should fail. + u, err := urlhelper.Parse(c.ISOUrls[0]) + if err != nil { + return warnings, append(errs, fmt.Errorf("url parse: %s", err)) + } + + q := u.Query() + if c.ISOChecksum != "" { + q.Set("checksum", c.ISOChecksum) + } + u.RawQuery = q.Encode() + + wd, err := os.Getwd() + if err != nil { + log.Printf("Getwd: %v", err) + // here we ignore the error in case the + // working directory is not needed. + } + + req := &getter.Request{ + Src: u.String(), + Pwd: wd, + } + cksum, err := defaultGetterClient.GetChecksum(context.TODO(), req) + if err != nil { + errs = append(errs, fmt.Errorf("%v in %q", err, req.URL().Query().Get("checksum"))) + } else { + c.ISOChecksum = cksum.String() + } + } + + if strings.HasSuffix(strings.ToLower(c.ISOChecksum), ".iso") { + errs = append(errs, fmt.Errorf("Error parsing checksum:"+ + " .iso is not a valid checksum ending")) + } + + return warnings, errs +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_debug.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_debug.go new file mode 100644 index 000000000..a53071bb9 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_debug.go @@ -0,0 +1,51 @@ +package commonsteps + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// MultistepDebugFn will return a proper multistep.DebugPauseFn to +// use for debugging if you're using multistep in your builder. +func MultistepDebugFn(ui packersdk.Ui) multistep.DebugPauseFn { + return func(loc multistep.DebugLocation, name string, state multistep.StateBag) { + var locationString string + switch loc { + case multistep.DebugLocationAfterRun: + locationString = "after run of" + case multistep.DebugLocationBeforeCleanup: + locationString = "before cleanup of" + default: + locationString = "at" + } + + message := fmt.Sprintf( + "Pausing %s step '%s'. Press enter to continue.", + locationString, name) + + result := make(chan string, 1) + go func() { + line, err := ui.Ask(message) + if err != nil { + log.Printf("Error asking for input: %s", err) + } + + result <- line + }() + + for { + select { + case <-result: + return + case <-time.After(100 * time.Millisecond): + if _, ok := state.GetOk(multistep.StateCancelled); ok { + return + } + } + } + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_runner.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_runner.go new file mode 100644 index 000000000..b4d823a8e --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_runner.go @@ -0,0 +1,219 @@ +package commonsteps + +import ( + "context" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/packer-plugin-sdk/common" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +func newRunner(steps []multistep.Step, config common.PackerConfig, ui packersdk.Ui) (multistep.Runner, multistep.DebugPauseFn) { + switch config.PackerOnError { + case "", "cleanup": + case "abort": + for i, step := range steps { + steps[i] = abortStep{ + step: step, + cleanupProv: false, + ui: ui, + } + } + case "ask": + for i, step := range steps { + steps[i] = askStep{step, ui} + } + case "run-cleanup-provisioner": + for i, step := range steps { + steps[i] = abortStep{ + step: step, + cleanupProv: true, + ui: ui, + } + } + } + + if config.PackerDebug { + pauseFn := MultistepDebugFn(ui) + return &multistep.DebugRunner{Steps: steps, PauseFn: pauseFn}, pauseFn + } else { + return &multistep.BasicRunner{Steps: steps}, nil + } +} + +// NewRunner returns a multistep.Runner that runs steps augmented with support +// for -debug and -on-error command line arguments. +func NewRunner(steps []multistep.Step, config common.PackerConfig, ui packersdk.Ui) multistep.Runner { + runner, _ := newRunner(steps, config, ui) + return runner +} + +// NewRunnerWithPauseFn returns a multistep.Runner that runs steps augmented +// with support for -debug and -on-error command line arguments. With -debug it +// puts the multistep.DebugPauseFn that will pause execution between steps into +// the state under the key "pauseFn". +func NewRunnerWithPauseFn(steps []multistep.Step, config common.PackerConfig, ui packersdk.Ui, state multistep.StateBag) multistep.Runner { + runner, pauseFn := newRunner(steps, config, ui) + if pauseFn != nil { + state.Put("pauseFn", pauseFn) + } + return runner +} + +func typeName(i interface{}) string { + return reflect.Indirect(reflect.ValueOf(i)).Type().Name() +} + +type abortStep struct { + step multistep.Step + cleanupProv bool + ui packersdk.Ui +} + +func (s abortStep) InnerStepName() string { + return typeName(s.step) +} + +func (s abortStep) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + return s.step.Run(ctx, state) +} + +func (s abortStep) Cleanup(state multistep.StateBag) { + if s.InnerStepName() == typeName(StepProvision{}) && s.cleanupProv { + s.step.Cleanup(state) + return + } + + shouldCleanup := handleAbortsAndInterupts(state, s.ui, typeName(s.step)) + if !shouldCleanup { + return + } + s.step.Cleanup(state) +} + +type askStep struct { + step multistep.Step + ui packersdk.Ui +} + +func (s askStep) InnerStepName() string { + return typeName(s.step) +} + +func (s askStep) Run(ctx context.Context, state multistep.StateBag) (action multistep.StepAction) { + for { + action = s.step.Run(ctx, state) + + if action != multistep.ActionHalt { + return + } + + err, ok := state.GetOk("error") + if ok { + s.ui.Error(fmt.Sprintf("%s", err)) + } + + switch ask(s.ui, typeName(s.step), state) { + case askCleanup: + return + case askAbort: + state.Put("aborted", true) + return + case askRetry: + continue + } + } +} + +func (s askStep) Cleanup(state multistep.StateBag) { + if _, ok := state.GetOk("aborted"); ok { + shouldCleanup := handleAbortsAndInterupts(state, s.ui, typeName(s.step)) + if !shouldCleanup { + return + } + } + s.step.Cleanup(state) +} + +type askResponse int + +const ( + askCleanup askResponse = iota + askAbort + askRetry +) + +func ask(ui packersdk.Ui, name string, state multistep.StateBag) askResponse { + ui.Say(fmt.Sprintf("Step %q failed", name)) + + result := make(chan askResponse) + go func() { + result <- askPrompt(ui) + }() + + for { + select { + case response := <-result: + return response + case <-time.After(100 * time.Millisecond): + if _, ok := state.GetOk(multistep.StateCancelled); ok { + return askCleanup + } + } + } +} + +func askPrompt(ui packersdk.Ui) askResponse { + for { + line, err := ui.Ask("[c] Clean up and exit, [a] abort without cleanup, or [r] retry step (build may fail even if retry succeeds)?") + if err != nil { + log.Printf("Error asking for input: %s", err) + } + + input := strings.ToLower(line) + "c" + switch input[0] { + case 'c': + return askCleanup + case 'a': + return askAbort + case 'r': + return askRetry + } + ui.Say(fmt.Sprintf("Incorrect input: %#v", line)) + } +} + +func handleAbortsAndInterupts(state multistep.StateBag, ui packersdk.Ui, stepName string) bool { + // if returns false, don't run cleanup. If true, do run cleanup. + _, alreadyLogged := state.GetOk("abort_step_logged") + + err, ok := state.GetOk("error") + if ok && !alreadyLogged { + ui.Error(fmt.Sprintf("%s", err)) + state.Put("abort_step_logged", true) + } + if _, ok := state.GetOk(multistep.StateCancelled); ok { + if !alreadyLogged { + ui.Error("Interrupted, aborting...") + state.Put("abort_step_logged", true) + } else { + ui.Error(fmt.Sprintf("aborted: skipping cleanup of step %q", stepName)) + } + return false + } + if _, ok := state.GetOk(multistep.StateHalted); ok { + if !alreadyLogged { + ui.Error(fmt.Sprintf("Step %q failed, aborting...", stepName)) + state.Put("abort_step_logged", true) + } else { + ui.Error(fmt.Sprintf("aborted: skipping cleanup of step %q", stepName)) + } + return false + } + return true +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_cleanup_temp_keys.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_cleanup_temp_keys.go new file mode 100644 index 000000000..ce643f3d1 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_cleanup_temp_keys.go @@ -0,0 +1,72 @@ +package commonsteps + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/packer-plugin-sdk/communicator" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +type StepCleanupTempKeys struct { + Comm *communicator.Config +} + +func (s *StepCleanupTempKeys) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + // This step is mostly cosmetic; Packer deletes the ephemeral keys anyway + // so there's no realistic situation where these keys can cause issues. + // However, it's nice to clean up after yourself. + + if !s.Comm.SSHClearAuthorizedKeys { + return multistep.ActionContinue + } + + if s.Comm.Type != "ssh" { + return multistep.ActionContinue + } + + if s.Comm.SSHTemporaryKeyPairName == "" { + return multistep.ActionContinue + } + + comm := state.Get("communicator").(packersdk.Communicator) + ui := state.Get("ui").(packersdk.Ui) + + cmd := new(packersdk.RemoteCmd) + + ui.Say("Trying to remove ephemeral keys from authorized_keys files") + + // Per the OpenSSH manual (https://man.openbsd.org/sshd.8), a typical + // line in the 'authorized_keys' file contains several fields that + // are delimited by spaces. Here is an (abbreviated) example of a line: + // ssh-rsa AAAAB3Nza...LiPk== user@example.net + // + // In the above example, 'ssh-rsa' is the key pair type, + // 'AAAAB3Nza...LiPk==' is the base64 encoded public key, + // and 'user@example.net' is a comment (in this case, describing + // who the key belongs to). + // + // In the following 'sed' calls, the comment field will be equal to + // the value of communicator.Config.SSHTemporaryKeyPairName. + // We can remove an authorized public key using 'sed' by looking + // for a line ending in ' packer-key-pair-comment' (note the + // leading space). + // + // TODO: Why create a backup file if you are going to remove it? + cmd.Command = fmt.Sprintf("sed -i.bak '/ %s$/d' ~/.ssh/authorized_keys; rm ~/.ssh/authorized_keys.bak", s.Comm.SSHTemporaryKeyPairName) + if err := cmd.RunWithUi(ctx, comm, ui); err != nil { + log.Printf("Error cleaning up ~/.ssh/authorized_keys; please clean up keys manually: %s", err) + } + cmd = new(packersdk.RemoteCmd) + cmd.Command = fmt.Sprintf("sudo sed -i.bak '/ %s$/d' /root/.ssh/authorized_keys; sudo rm /root/.ssh/authorized_keys.bak", s.Comm.SSHTemporaryKeyPairName) + if err := cmd.RunWithUi(ctx, comm, ui); err != nil { + log.Printf("Error cleaning up /root/.ssh/authorized_keys; please clean up keys manually: %s", err) + } + + return multistep.ActionContinue +} + +func (s *StepCleanupTempKeys) Cleanup(state multistep.StateBag) { +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_cdrom.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_cdrom.go new file mode 100644 index 000000000..97c0869f4 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_cdrom.go @@ -0,0 +1,294 @@ +package commonsteps + +import ( + "context" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/shell-local/localexec" + "github.com/hashicorp/packer-plugin-sdk/tmp" +) + +// StepCreateCD will create a CD disk with the given files. +type StepCreateCD struct { + // Files can be either files or directories. Any files provided here will + // be written to the root of the CD. Directories will be written to the + // root of the CD as well, but will retain their subdirectory structure. + Files []string + Label string + + CDPath string + + filesAdded map[string]bool +} + +func (s *StepCreateCD) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + if len(s.Files) == 0 { + log.Println("No CD files specified. CD disk will not be made.") + return multistep.ActionContinue + } + + ui := state.Get("ui").(packersdk.Ui) + ui.Say("Creating CD disk...") + + if s.Label == "" { + s.Label = "packer" + } else { + log.Printf("CD label is set to %s", s.Label) + } + + // Track what files are added. Used for testing step. + s.filesAdded = make(map[string]bool) + + // Create a temporary file to be our CD drive + CDF, err := tmp.File("packer*.iso") + // Set the path so we can remove it later + CDPath := CDF.Name() + CDF.Close() + os.Remove(CDPath) + if err != nil { + state.Put("error", + fmt.Errorf("Error creating temporary file for CD: %s", err)) + return multistep.ActionHalt + } + + log.Printf("CD path: %s", CDPath) + s.CDPath = CDPath + + // Consolidate all files provided into a single directory to become our + // "root" directory. + rootFolder, err := tmp.Dir("packer_to_cdrom") + if err != nil { + state.Put("error", + fmt.Errorf("Error creating temporary file for CD: %s", err)) + return multistep.ActionHalt + } + + for _, toAdd := range s.Files { + err = s.AddFile(rootFolder, toAdd) + if err != nil { + state.Put("error", + fmt.Errorf("Error creating temporary file for CD: %s", err)) + return multistep.ActionHalt + } + } + + cmd, err := retrieveCDISOCreationCommand(s.Label, rootFolder, CDPath) + if err != nil { + state.Put("error", err) + return multistep.ActionHalt + } + + err = localexec.RunAndStream(cmd, ui, []string{}) + if err != nil { + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Message("Done copying paths from CD_dirs") + + // Set the path to the CD so it can be used later + state.Put("cd_path", CDPath) + + if err != nil { + state.Put("error", err) + return multistep.ActionHalt + } + + return multistep.ActionContinue +} + +func (s *StepCreateCD) Cleanup(multistep.StateBag) { + if s.CDPath != "" { + log.Printf("Deleting CD disk: %s", s.CDPath) + os.Remove(s.CDPath) + } +} + +type cdISOCreationCommand struct { + Name string + Command func(path string, label string, source string, dest string) *exec.Cmd +} + +var supportedCDISOCreationCommands []cdISOCreationCommand = []cdISOCreationCommand{ + { + "xorriso", func(path string, label string, source string, dest string) *exec.Cmd { + return exec.Command( + path, + "-as", "genisoimage", + "-rock", + "-joliet", + "-volid", label, + "-output", dest, + source) + }, + }, + { + "mkisofs", func(path string, label string, source string, dest string) *exec.Cmd { + return exec.Command( + path, + "-joliet", + "-volid", label, + "-o", dest, + source) + }, + }, + { + "hdiutil", func(path string, label string, source string, dest string) *exec.Cmd { + return exec.Command( + path, + "makehybrid", + "-o", dest, + "-hfs", + "-joliet", + "-iso", + "-default-volume-name", label, + source) + }, + }, + { + "oscdimg", func(path string, label string, source string, dest string) *exec.Cmd { + return exec.Command( + path, + "-j1", + "-o", + "-m", + "-l"+label, + source, + dest) + }, + }, +} + +func isCygwinExecutable(path string) bool { + return runtime.GOOS == "windows" && strings.Contains(path, "\\usr\\bin\\") +} + +func toCygwinPath(path string) (string, error) { + c := exec.Command("cygpath", path) + cygwinPath, err := c.Output() + return strings.TrimSpace(string(cygwinPath)), err +} + +func retrieveCDISOCreationCommand(label string, source string, dest string) (*exec.Cmd, error) { + for _, c := range supportedCDISOCreationCommands { + path, err := exec.LookPath(c.Name) + if err != nil { + continue + } + // if we are running a cygwin/msys2 executable we must convert the + // native win32 path to a cygwin/msys2/unix style path. + if isCygwinExecutable(path) { + source, err = toCygwinPath(source) + if err != nil { + return nil, err + } + dest, err = toCygwinPath(dest) + if err != nil { + return nil, err + } + } + return c.Command(path, label, source, dest), nil + } + var commands = make([]string, 0, len(supportedCDISOCreationCommands)) + for _, c := range supportedCDISOCreationCommands { + commands = append(commands, c.Name) + } + return nil, fmt.Errorf( + "could not find a supported CD ISO creation command (the supported commands are: %s)", + strings.Join(commands, ", ")) +} + +func (s *StepCreateCD) AddFile(dst, src string) error { + finfo, err := os.Stat(src) + if err != nil { + return fmt.Errorf("Error adding path to CD: %s", err) + } + + // add a file + if !finfo.IsDir() { + inputF, err := os.Open(src) + if err != nil { + return err + } + defer inputF.Close() + + // Create a new file in the root directory + dest, err := os.Create(filepath.Join(dst, finfo.Name())) + if err != nil { + return fmt.Errorf("Error opening file for copy %s to CD root", src) + } + defer dest.Close() + nBytes, err := io.Copy(dest, inputF) + if err != nil { + return fmt.Errorf("Error copying %s to CD root", src) + } + s.filesAdded[src] = true + log.Printf("Wrote %d bytes to %s", nBytes, finfo.Name()) + return err + } + + // file is a directory, so we need to parse the filename into a path to + // discard and a basename + discardPath, _ := filepath.Split(src) + + // Add a directory and its subdirectories + visit := func(pathname string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + // Clean up pathing so that we preserve the base directory provided by + // the user but not the local pathing to that directory. + allDirs, base := filepath.Split(pathname) + intermediaryDirs := strings.Replace(allDirs, discardPath, "", 1) + + dstPath := filepath.Join(dst, base) + if intermediaryDirs != "" { + dstPath = filepath.Join(dst, intermediaryDirs, base) + } + + // add a file + if !fi.IsDir() { + inputF, err := os.Open(pathname) + if err != nil { + return err + } + defer inputF.Close() + + fileDst, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("Error opening file %s on CD: %s", dstPath, err) + } + defer fileDst.Close() + nBytes, err := io.Copy(fileDst, inputF) + if err != nil { + return fmt.Errorf("Error copying %s to CD: %s", dstPath, err) + } + s.filesAdded[dstPath] = true + log.Printf("Wrote %d bytes to %s", nBytes, dstPath) + return err + } + + if fi.Mode().IsDir() { + // create the directory on the CD, continue walk. + err := os.MkdirAll(dstPath, fi.Mode()) + if err != nil { + err = fmt.Errorf("error creating new directory %s: %s", + dstPath, err) + } + return err + } + return err + } + + return filepath.Walk(src, visit) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_floppy.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_floppy.go new file mode 100644 index 000000000..8c4b0e562 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_floppy.go @@ -0,0 +1,427 @@ +package commonsteps + +import ( + "context" + "fmt" + "io" + "log" + "os" + "path" + "path/filepath" + "strings" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/tmp" + "github.com/mitchellh/go-fs" + "github.com/mitchellh/go-fs/fat" +) + +// StepCreateFloppy will create a floppy disk with the given files. +type StepCreateFloppy struct { + Files []string + Directories []string + Label string + + floppyPath string + + FilesAdded map[string]bool +} + +func (s *StepCreateFloppy) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + if len(s.Files) == 0 && len(s.Directories) == 0 { + log.Println("No floppy files specified. Floppy disk will not be made.") + return multistep.ActionContinue + } + + if s.Label == "" { + s.Label = "packer" + } else { + log.Printf("Floppy label is set to %s", s.Label) + } + + s.FilesAdded = make(map[string]bool) + + ui := state.Get("ui").(packersdk.Ui) + ui.Say("Creating floppy disk...") + + // Create a temporary file to be our floppy drive + floppyF, err := tmp.File("packer") + if err != nil { + state.Put("error", + fmt.Errorf("Error creating temporary file for floppy: %s", err)) + return multistep.ActionHalt + } + defer floppyF.Close() + + // Set the path so we can remove it later + s.floppyPath = floppyF.Name() + + log.Printf("Floppy path: %s", s.floppyPath) + + // Set the size of the file to be a floppy sized + if err := floppyF.Truncate(1440 * 1024); err != nil { + state.Put("error", fmt.Errorf("Error creating floppy: %s", err)) + return multistep.ActionHalt + } + + // BlockDevice backed by the file for our filesystem + log.Println("Initializing block device backed by temporary file") + device, err := fs.NewFileDisk(floppyF) + if err != nil { + state.Put("error", fmt.Errorf("Error creating floppy: %s", err)) + return multistep.ActionHalt + } + + // Format the block device so it contains a valid FAT filesystem + log.Println("Formatting the block device with a FAT filesystem...") + formatConfig := &fat.SuperFloppyConfig{ + FATType: fat.FAT12, + Label: s.Label, + OEMName: s.Label, + } + if err := fat.FormatSuperFloppy(device, formatConfig); err != nil { + state.Put("error", fmt.Errorf("Error creating floppy: %s", err)) + return multistep.ActionHalt + } + + // The actual FAT filesystem + log.Println("Initializing FAT filesystem on block device") + fatFs, err := fat.New(device) + if err != nil { + state.Put("error", fmt.Errorf("Error creating floppy: %s", err)) + return multistep.ActionHalt + } + + // Get the root directory to the filesystem and create a cache for any directories within + log.Println("Reading the root directory from the filesystem") + rootDir, err := fatFs.RootDir() + if err != nil { + state.Put("error", fmt.Errorf("Error creating floppy: %s", err)) + return multistep.ActionHalt + } + cache := fsDirectoryCache(rootDir) + + // Utility functions for walking through a directory grabbing all files flatly + globFiles := func(files []string, list chan string) { + for _, filename := range files { + if strings.ContainsAny(filename, "*?[") { + matches, _ := filepath.Glob(filename) + if err != nil { + continue + } + + for _, match := range matches { + list <- match + } + continue + } + list <- filename + } + close(list) + } + + var crawlDirectoryFiles []string + crawlDirectory := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + crawlDirectoryFiles = append(crawlDirectoryFiles, path) + ui.Message(fmt.Sprintf("Adding file: %s", path)) + } + return nil + } + crawlDirectoryFiles = []string{} + + // Collect files and copy them flatly...because floppy_files is broken on purpose. + var filelist chan string + filelist = make(chan string) + go globFiles(s.Files, filelist) + + ui.Message("Copying files flatly from floppy_files") + for { + filename, ok := <-filelist + if !ok { + break + } + + finfo, err := os.Stat(filename) + if err != nil { + state.Put("error", fmt.Errorf("Error trying to stat : %s : %s", filename, err)) + return multistep.ActionHalt + } + + // walk through directory adding files to the root of the fs + if finfo.IsDir() { + ui.Message(fmt.Sprintf("Copying directory: %s", filename)) + + err := filepath.Walk(filename, crawlDirectory) + if err != nil { + state.Put("error", fmt.Errorf("Error adding file from floppy_files : %s : %s", filename, err)) + return multistep.ActionHalt + } + + for _, crawlfilename := range crawlDirectoryFiles { + if err = s.Add(cache, crawlfilename); err != nil { + state.Put("error", fmt.Errorf("Error adding file from floppy_files : %s : %s", filename, err)) + return multistep.ActionHalt + } + s.FilesAdded[crawlfilename] = true + } + + crawlDirectoryFiles = []string{} + continue + } + + // add just a single file + ui.Message(fmt.Sprintf("Copying file: %s", filename)) + if err = s.Add(cache, filename); err != nil { + state.Put("error", fmt.Errorf("Error adding file from floppy_files : %s : %s", filename, err)) + return multistep.ActionHalt + } + s.FilesAdded[filename] = true + } + ui.Message("Done copying files from floppy_files") + + // Collect all paths (expanding wildcards) into pathqueue + ui.Message("Collecting paths from floppy_dirs") + var pathqueue []string + for _, filename := range s.Directories { + if strings.ContainsAny(filename, "*?[") { + matches, err := filepath.Glob(filename) + if err != nil { + state.Put("error", fmt.Errorf("Error adding path %s to floppy: %s", filename, err)) + return multistep.ActionHalt + } + + for _, filename := range matches { + pathqueue = append(pathqueue, filename) + } + continue + } + pathqueue = append(pathqueue, filename) + } + ui.Message(fmt.Sprintf("Resulting paths from floppy_dirs : %v", pathqueue)) + + // Go over each path in pathqueue and copy it. + for _, src := range pathqueue { + ui.Message(fmt.Sprintf("Recursively copying : %s", src)) + err = s.Add(cache, src) + if err != nil { + state.Put("error", fmt.Errorf("Error adding path %s to floppy: %s", src, err)) + return multistep.ActionHalt + } + } + ui.Message("Done copying paths from floppy_dirs") + + // Set the path to the floppy so it can be used later + state.Put("floppy_path", s.floppyPath) + + return multistep.ActionContinue +} + +func (s *StepCreateFloppy) Add(dircache directoryCache, src string) error { + finfo, err := os.Stat(src) + if err != nil { + return fmt.Errorf("Error adding path to floppy: %s", err) + } + + // add a file + if !finfo.IsDir() { + inputF, err := os.Open(src) + if err != nil { + return err + } + defer inputF.Close() + + d, err := dircache("") + if err != nil { + return err + } + + entry, err := d.AddFile(path.Base(filepath.ToSlash(src))) + if err != nil { + return err + } + + fatFile, err := entry.File() + if err != nil { + return err + } + + _, err = io.Copy(fatFile, inputF) + s.FilesAdded[src] = true + return err + } + + // add a directory and it's subdirectories + basedirectory := filepath.Join(src, "..") + visit := func(pathname string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if fi.Mode().IsDir() { + base, err := removeBase(basedirectory, pathname) + if err != nil { + return err + } + _, err = dircache(filepath.ToSlash(base)) + return err + } + directory, filename := filepath.Split(filepath.ToSlash(pathname)) + + base, err := removeBase(basedirectory, filepath.FromSlash(directory)) + if err != nil { + return err + } + + inputF, err := os.Open(pathname) + if err != nil { + return err + } + defer inputF.Close() + + wd, err := dircache(filepath.ToSlash(base)) + if err != nil { + return err + } + + entry, err := wd.AddFile(filename) + if err != nil { + return err + } + + fatFile, err := entry.File() + if err != nil { + return err + } + + _, err = io.Copy(fatFile, inputF) + s.FilesAdded[pathname] = true + return err + } + + return filepath.Walk(src, visit) +} + +func (s *StepCreateFloppy) Cleanup(multistep.StateBag) { + if s.floppyPath != "" { + log.Printf("Deleting floppy disk: %s", s.floppyPath) + os.Remove(s.floppyPath) + } +} + +// removeBase will take a regular os.PathSeparator-separated path and remove the +// prefix directory base from it. Both paths are converted to their absolute +// formats before the stripping takes place. +func removeBase(base string, path string) (string, error) { + var idx int + var err error + + if res, err := filepath.Abs(path); err == nil { + path = res + } + path = filepath.Clean(path) + + if base, err = filepath.Abs(base); err != nil { + return path, err + } + + c1, c2 := strings.Split(base, string(os.PathSeparator)), strings.Split(path, string(os.PathSeparator)) + for idx = 0; idx < len(c1); idx++ { + if len(c1[idx]) == 0 && len(c2[idx]) != 0 { + break + } + if c1[idx] != c2[idx] { + return "", fmt.Errorf("Path %s is not prefixed by Base %s", path, base) + } + } + return strings.Join(c2[idx:], string(os.PathSeparator)), nil +} + +// fsDirectoryCache returns a function that can be used to grab the fs.Directory +// entry associated with a given path. If an fs.Directory entry is not found +// then it will be created relative to the rootDirectory argument that is +// passed. +type directoryCache func(string) (fs.Directory, error) + +func fsDirectoryCache(rootDirectory fs.Directory) directoryCache { + var cache map[string]fs.Directory + + cache = make(map[string]fs.Directory) + cache[""] = rootDirectory + + Input, Output, Error := make(chan string), make(chan fs.Directory), make(chan error) + go func(Error chan error) { + for { + input := <-Input + if len(input) > 0 { + input = path.Clean(input) + } + + // found a directory, so yield it + res, ok := cache[input] + if ok { + Output <- res + continue + } + component := strings.Split(input, "/") + + // directory not cached, so start at the root and walk each component + // creating them if they're not in cache + var entry fs.Directory + for i := range component { + + // join all of our components into a key + path := strings.Join(component[:i], "/") + + // check if parent directory is cached + res, ok = cache[path] + if !ok { + // add directory into cache + directory, err := entry.AddDirectory(component[i-1]) + if err != nil { + Error <- err + continue + } + res, err = directory.Dir() + if err != nil { + Error <- err + continue + } + cache[path] = res + } + // cool, found a directory + entry = res + } + + // finally create our directory + directory, err := entry.AddDirectory(component[len(component)-1]) + if err != nil { + Error <- err + continue + } + res, err = directory.Dir() + if err != nil { + Error <- err + continue + } + cache[input] = res + + // ..and yield it + Output <- entry + } + }(Error) + + getFilesystemDirectory := func(input string) (fs.Directory, error) { + Input <- input + select { + case res := <-Output: + return res, nil + case err := <-Error: + return *new(fs.Directory), err + } + } + return getFilesystemDirectory +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_download.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_download.go new file mode 100644 index 000000000..0746b3c8f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_download.go @@ -0,0 +1,238 @@ +package commonsteps + +import ( + "context" + "crypto/sha1" + "encoding/hex" + "fmt" + "log" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + + gcs "github.com/hashicorp/go-getter/gcs/v2" + s3 "github.com/hashicorp/go-getter/s3/v2" + getter "github.com/hashicorp/go-getter/v2" + urlhelper "github.com/hashicorp/go-getter/v2/helper/url" + + "github.com/hashicorp/packer-plugin-sdk/filelock" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepDownload downloads a remote file using the download client within +// this package. This step handles setting up the download configuration, +// progress reporting, interrupt handling, etc. +// +// Uses: +// cache packer.Cache +// ui packersdk.Ui +type StepDownload struct { + // The checksum and the type of the checksum for the download + Checksum string + + // A short description of the type of download being done. Example: + // "ISO" or "Guest Additions" + Description string + + // The name of the key where the final path of the ISO will be put + // into the state. + ResultKey string + + // The path where the result should go, otherwise it goes to the + // cache directory. + TargetPath string + + // A list of URLs to attempt to download this thing. + Url []string + + // Extension is the extension to force for the file that is downloaded. + // Some systems require a certain extension. If this isn't set, the + // extension on the URL is used. Otherwise, this will be forced + // on the downloaded file for every URL. + Extension string +} + +var defaultGetterClient = getter.Client{ + Getters: getter.Getters, +} + +func init() { + defaultGetterClient.Getters = append(defaultGetterClient.Getters, new(gcs.Getter)) + defaultGetterClient.Getters = append(defaultGetterClient.Getters, new(s3.Getter)) +} + +func (s *StepDownload) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + if len(s.Url) == 0 { + log.Printf("No URLs were provided to Step Download. Continuing...") + return multistep.ActionContinue + } + + defer log.Printf("Leaving retrieve loop for %s", s.Description) + + ui := state.Get("ui").(packersdk.Ui) + ui.Say(fmt.Sprintf("Retrieving %s", s.Description)) + + var errs []error + + for _, source := range s.Url { + if ctx.Err() != nil { + state.Put("error", fmt.Errorf("Download cancelled: %v", errs)) + return multistep.ActionHalt + } + ui.Say(fmt.Sprintf("Trying %s", source)) + var err error + var dst string + if s.Description == "OVF/OVA" && strings.HasSuffix(source, ".ovf") { + // TODO(adrien): make go-getter allow using files in place. + // ovf files usually point to a file in the same directory, so + // using them in place is the only way. + ui.Say(fmt.Sprintf("Using ovf inplace")) + dst = source + } else { + dst, err = s.download(ctx, ui, source) + } + if err == nil { + state.Put(s.ResultKey, dst) + return multistep.ActionContinue + } + // may be another url will work + errs = append(errs, err) + } + + err := fmt.Errorf("error downloading %s: %v", s.Description, errs) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt +} + +func (s *StepDownload) UseSourceToFindCacheTarget(source string) (*url.URL, string, error) { + u, err := parseSourceURL(source) + if err != nil { + return nil, "", fmt.Errorf("url parse: %s", err) + } + if checksum := u.Query().Get("checksum"); checksum != "" { + s.Checksum = checksum + } + if s.Checksum != "" && s.Checksum != "none" { + // add checksum to url query params as go getter will checksum for us + q := u.Query() + q.Set("checksum", s.Checksum) + u.RawQuery = q.Encode() + } + + // store file under sha1(hash) if set + // hash can sometimes be a checksum url + // otherwise, use sha1(source_url) + var shaSum [20]byte + if s.Checksum != "" && s.Checksum != "none" { + shaSum = sha1.Sum([]byte(s.Checksum)) + } else { + shaSum = sha1.Sum([]byte(u.String())) + } + shaSumString := hex.EncodeToString(shaSum[:]) + + targetPath := s.TargetPath + if targetPath == "" { + targetPath = shaSumString + if s.Extension != "" { + targetPath += "." + s.Extension + } + targetPath, err = packersdk.CachePath(targetPath) + if err != nil { + return nil, "", fmt.Errorf("CachePath: %s", err) + } + } else if filepath.Ext(targetPath) == "" { + // When an absolute path is provided + // this adds the file to the targetPath + if !strings.HasSuffix(targetPath, "/") { + targetPath += "/" + } + targetPath += shaSumString + if s.Extension != "" { + targetPath += "." + s.Extension + } else { + targetPath += ".iso" + } + } + return u, targetPath, nil +} + +func (s *StepDownload) download(ctx context.Context, ui packersdk.Ui, source string) (string, error) { + u, targetPath, err := s.UseSourceToFindCacheTarget(source) + if err != nil { + return "", err + } + lockFile := targetPath + ".lock" + + log.Printf("Acquiring lock for: %s (%s)", u.String(), lockFile) + lock := filelock.New(lockFile) + lock.Lock() + defer lock.Unlock() + + wd, err := os.Getwd() + if err != nil { + log.Printf("get working directory: %v", err) + // here we ignore the error in case the + // working directory is not needed. + // It would be better if the go-getter + // could guess it only in cases it is + // necessary. + } + src := u.String() + if u.Scheme == "" || strings.ToLower(u.Scheme) == "file" { + // If a local filepath, then we need to preprocess to make sure the + // path doens't have any multiple successive path separators; if it + // does, go-getter will read this as a specialized go-getter-specific + // subdirectory command, which it most likely isn't. + src = filepath.Clean(u.String()) + if _, err := os.Stat(filepath.Clean(u.Path)); err != nil { + // Cleaned path isn't present on system so it must be some other + // scheme. Don't error right away; see if go-getter can figure it + // out. + src = u.String() + } + } + + ui.Say(fmt.Sprintf("Trying %s", u.String())) + req := &getter.Request{ + Dst: targetPath, + Src: src, + ProgressListener: ui, + Pwd: wd, + Mode: getter.ModeFile, + Inplace: true, + } + + switch op, err := defaultGetterClient.Get(ctx, req); err.(type) { + case nil: // success ! + ui.Say(fmt.Sprintf("%s => %s", u.String(), op.Dst)) + return op.Dst, nil + case *getter.ChecksumError: + ui.Say(fmt.Sprintf("Checksum did not match, removing %s", targetPath)) + if err := os.Remove(targetPath); err != nil { + ui.Error(fmt.Sprintf("Failed to remove cache file. Please remove manually: %s", targetPath)) + } + return "", err + default: + ui.Say(fmt.Sprintf("Download failed %s", err)) + return "", err + } +} + +func parseSourceURL(source string) (*url.URL, error) { + if runtime.GOOS == "windows" { + // Check that the user specified a UNC path, and promote it to an smb:// uri. + if strings.HasPrefix(source, "\\\\") && len(source) > 2 && source[2] != '?' { + source = filepath.ToSlash(source[2:]) + source = fmt.Sprintf("smb://%s", source) + } + } + + u, err := urlhelper.Parse(source) + return u, err +} + +func (s *StepDownload) Cleanup(multistep.StateBag) {} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_http_server.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_http_server.go new file mode 100644 index 000000000..42a3f5d4f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_http_server.go @@ -0,0 +1,75 @@ +package commonsteps + +import ( + "context" + "fmt" + + "net/http" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + "github.com/hashicorp/packer-plugin-sdk/net" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// This step creates and runs the HTTP server that is serving files from the +// directory specified by the 'http_directory` configuration parameter in the +// template. +// +// Uses: +// ui packersdk.Ui +// +// Produces: +// http_port int - The port the HTTP server started on. +type StepHTTPServer struct { + HTTPDir string + HTTPPortMin int + HTTPPortMax int + HTTPAddress string + + l *net.Listener +} + +func (s *StepHTTPServer) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packersdk.Ui) + + if s.HTTPDir == "" { + state.Put("http_port", 0) + return multistep.ActionContinue + } + + // Find an available TCP port for our HTTP server + var httpAddr string + var err error + s.l, err = net.ListenRangeConfig{ + Min: s.HTTPPortMin, + Max: s.HTTPPortMax, + Addr: s.HTTPAddress, + Network: "tcp", + }.Listen(ctx) + + if err != nil { + err := fmt.Errorf("Error finding port: %s", err) + state.Put("error", err) + ui.Error(err.Error()) + return multistep.ActionHalt + } + + ui.Say(fmt.Sprintf("Starting HTTP server on port %d", s.l.Port)) + + // Start the HTTP server and run it in the background + fileServer := http.FileServer(http.Dir(s.HTTPDir)) + server := &http.Server{Addr: httpAddr, Handler: fileServer} + go server.Serve(s.l) + + // Save the address into the state so it can be accessed in the future + state.Put("http_port", s.l.Port) + + return multistep.ActionContinue +} + +func (s *StepHTTPServer) Cleanup(multistep.StateBag) { + if s.l != nil { + // Close the listener so that the HTTP server stops + s.l.Close() + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_output_dir.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_output_dir.go new file mode 100644 index 000000000..94bc6fe1d --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_output_dir.go @@ -0,0 +1,86 @@ +package commonsteps + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "time" + + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepOutputDir sets up the output directory by creating it if it does +// not exist, deleting it if it does exist and we're forcing, and cleaning +// it up when we're done with it. +type StepOutputDir struct { + Force bool + Path string + + cleanup bool +} + +func (s *StepOutputDir) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + ui := state.Get("ui").(packersdk.Ui) + + if _, err := os.Stat(s.Path); err == nil { + if !s.Force { + err := fmt.Errorf( + "Output directory exists: %s\n\n"+ + "Use the force flag to delete it prior to building.", + s.Path) + state.Put("error", err) + return multistep.ActionHalt + } + + ui.Say("Deleting previous output directory...") + os.RemoveAll(s.Path) + } + + // Enable cleanup + s.cleanup = true + + // Create the directory + if err := os.MkdirAll(s.Path, 0755); err != nil { + state.Put("error", err) + return multistep.ActionHalt + } + + // Make sure we can write in the directory + f, err := os.Create(filepath.Join(s.Path, "_packer_perm_check")) + if err != nil { + err = fmt.Errorf("Couldn't write to output directory: %s", err) + state.Put("error", err) + return multistep.ActionHalt + } + f.Close() + os.Remove(f.Name()) + + return multistep.ActionContinue +} + +func (s *StepOutputDir) Cleanup(state multistep.StateBag) { + if !s.cleanup { + return + } + + _, cancelled := state.GetOk(multistep.StateCancelled) + _, halted := state.GetOk(multistep.StateHalted) + + if cancelled || halted { + ui := state.Get("ui").(packersdk.Ui) + + ui.Say("Deleting output directory...") + for i := 0; i < 5; i++ { + err := os.RemoveAll(s.Path) + if err == nil { + break + } + + log.Printf("Error removing output dir: %s", err) + time.Sleep(2 * time.Second) + } + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_provision.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_provision.go new file mode 100644 index 000000000..3b02dcea5 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_provision.go @@ -0,0 +1,172 @@ +package commonsteps + +import ( + "context" + "fmt" + "log" + "os" + "strconv" + "time" + + "github.com/hashicorp/packer-plugin-sdk/communicator" + "github.com/hashicorp/packer-plugin-sdk/multistep" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// StepProvision runs the provisioners. +// +// Uses: +// communicator packersdk.Communicator +// hook packersdk.Hook +// ui packersdk.Ui +// +// Produces: +// + +const HttpIPNotImplemented = "ERR_HTTP_IP_NOT_IMPLEMENTED_BY_BUILDER" +const HttpPortNotImplemented = "ERR_HTTP_PORT_NOT_IMPLEMENTED_BY_BUILDER" +const HttpAddrNotImplemented = "ERR_HTTP_ADDR_NOT_IMPLEMENTED_BY_BUILDER" + +func PopulateProvisionHookData(state multistep.StateBag) map[string]interface{} { + hookData := make(map[string]interface{}) + + // Load Builder hook data from state, if it has been set. + hd, ok := state.GetOk("generated_data") + if ok { + hookData = hd.(map[string]interface{}) + } + + // Warn user that the id isn't implemented + hookData["ID"] = "ERR_ID_NOT_IMPLEMENTED_BY_BUILDER" + + // instance_id is placed in state by the builders. + // Not yet implemented in Chroot, lxc/lxd, Azure, Qemu. + // Implemented in most others including digitalOcean (droplet id), + // docker (container_id), and clouds which use "server" internally instead + // of instance. + id, ok := state.GetOk("instance_id") + if ok { + hookData["ID"] = id + } + + hookData["PackerRunUUID"] = os.Getenv("PACKER_RUN_UUID") + + // Packer HTTP info + hookData["PackerHTTPIP"] = HttpIPNotImplemented + hookData["PackerHTTPPort"] = HttpPortNotImplemented + hookData["PackerHTTPAddr"] = HttpAddrNotImplemented + + httpPort, okPort := state.GetOk("http_port") + if okPort { + hookData["PackerHTTPPort"] = strconv.Itoa(httpPort.(int)) + } + httIP, okIP := state.GetOk("http_ip") + if okIP { + hookData["PackerHTTPIP"] = httIP.(string) + } + if okPort && okIP { + hookData["PackerHTTPAddr"] = fmt.Sprintf("%s:%s", hookData["PackerHTTPIP"], hookData["PackerHTTPPort"]) + } + + // Read communicator data into hook data + comm, ok := state.GetOk("communicator_config") + if !ok { + log.Printf("Unable to load communicator config from state to populate provisionHookData") + return hookData + } + commConf := comm.(*communicator.Config) + + // Loop over all field values and retrieve them from the ssh config + hookData["Host"] = commConf.Host() + hookData["Port"] = commConf.Port() + hookData["User"] = commConf.User() + hookData["Password"] = commConf.Password() + hookData["ConnType"] = commConf.Type + hookData["SSHPublicKey"] = string(commConf.SSHPublicKey) + hookData["SSHPrivateKey"] = string(commConf.SSHPrivateKey) + hookData["SSHPrivateKeyFile"] = commConf.SSHPrivateKeyFile + hookData["SSHAgentAuth"] = commConf.SSHAgentAuth + + // Backwards compatibility; in practice, WinRMPassword is fulfilled by + // Password. + hookData["WinRMPassword"] = commConf.WinRMPassword + + return hookData +} + +type StepProvision struct { + Comm packersdk.Communicator +} + +func (s *StepProvision) runWithHook(ctx context.Context, state multistep.StateBag, hooktype string) multistep.StepAction { + // hooktype will be either packersdk.HookProvision or packersdk.HookCleanupProvision + comm := s.Comm + if comm == nil { + raw, ok := state.Get("communicator").(packersdk.Communicator) + if ok { + comm = raw.(packersdk.Communicator) + } + } + + hook := state.Get("hook").(packersdk.Hook) + ui := state.Get("ui").(packersdk.Ui) + + hookData := PopulateProvisionHookData(state) + + // Update state generated_data with complete hookData + // to make them accessible by post-processors + state.Put("generated_data", hookData) + + // Run the provisioner in a goroutine so we can continually check + // for cancellations... + if hooktype == packersdk.HookProvision { + log.Println("Running the provision hook") + } else if hooktype == packersdk.HookCleanupProvision { + ui.Say("Provisioning step had errors: Running the cleanup provisioner, if present...") + } + errCh := make(chan error, 1) + go func() { + errCh <- hook.Run(ctx, hooktype, ui, comm, hookData) + }() + + for { + select { + case err := <-errCh: + if err != nil { + if hooktype == packersdk.HookProvision { + // We don't overwrite the error if it's a cleanup + // provisioner being run. + state.Put("error", err) + } else if hooktype == packersdk.HookCleanupProvision { + origErr := state.Get("error").(error) + state.Put("error", fmt.Errorf("Cleanup failed: %s. "+ + "Original Provisioning error: %s", err, origErr)) + } + return multistep.ActionHalt + } + + return multistep.ActionContinue + case <-ctx.Done(): + log.Printf("Cancelling provisioning due to context cancellation: %s", ctx.Err()) + return multistep.ActionHalt + case <-time.After(1 * time.Second): + if _, ok := state.GetOk(multistep.StateCancelled); ok { + log.Println("Cancelling provisioning due to interrupt...") + return multistep.ActionHalt + } + } + } +} + +func (s *StepProvision) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + return s.runWithHook(ctx, state, packersdk.HookProvision) +} + +func (s *StepProvision) Cleanup(state multistep.StateBag) { + // We have a "final" provisioner that gets defined by "error-cleanup-provisioner" + // which we only call if there's an error during the provision run and + // the "error-cleanup-provisioner" is defined. + if _, ok := state.GetOk("error"); ok { + s.runWithHook(context.Background(), state, packersdk.HookCleanupProvision) + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/debug_runner.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/debug_runner.go new file mode 100644 index 000000000..48b509a7f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/debug_runner.go @@ -0,0 +1,118 @@ +package multistep + +import ( + "context" + "fmt" + "reflect" + "sync" +) + +// DebugLocation is the location where the pause is occurring when debugging +// a step sequence. "DebugLocationAfterRun" is after the run of the named +// step. "DebugLocationBeforeCleanup" is before the cleanup of the named +// step. +type DebugLocation uint + +const ( + DebugLocationAfterRun DebugLocation = iota + DebugLocationBeforeCleanup +) + +// StepWrapper is an interface that wrapped steps can implement to expose their +// inner step names to the debug runner. +type StepWrapper interface { + // InnerStepName should return the human readable name of the wrapped step. + InnerStepName() string +} + +// DebugPauseFn is the type signature for the function that is called +// whenever the DebugRunner pauses. It allows the caller time to +// inspect the state of the multi-step sequence at a given step. +type DebugPauseFn func(DebugLocation, string, StateBag) + +// DebugRunner is a Runner that runs the given set of steps in order, +// but pauses between each step until it is told to continue. +type DebugRunner struct { + // Steps is the steps to run. These will be run in order. + Steps []Step + + // PauseFn is the function that is called whenever the debug runner + // pauses. The debug runner continues when this function returns. + // The function is given the state so that the state can be inspected. + PauseFn DebugPauseFn + + l sync.Mutex + runner *BasicRunner +} + +func (r *DebugRunner) Run(ctx context.Context, state StateBag) { + r.l.Lock() + if r.runner != nil { + panic("already running") + } + r.runner = new(BasicRunner) + r.l.Unlock() + + pauseFn := r.PauseFn + + // If no PauseFn is specified, use the default + if pauseFn == nil { + pauseFn = DebugPauseDefault + } + + // Rebuild the steps so that we insert the pause step after each + steps := make([]Step, len(r.Steps)*2) + for i, step := range r.Steps { + if step == nil { + continue + } + steps[i*2] = step + name := "" + if wrapped, ok := step.(StepWrapper); ok { + name = wrapped.InnerStepName() + } else { + name = reflect.Indirect(reflect.ValueOf(step)).Type().Name() + } + steps[(i*2)+1] = &debugStepPause{ + name, + pauseFn, + } + } + + // Then just use a basic runner to run it + r.runner.Steps = steps + r.runner.Run(ctx, state) +} + +// DebugPauseDefault is the default pause function when using the +// DebugRunner if no PauseFn is specified. It outputs some information +// to stderr about the step and waits for keyboard input on stdin before +// continuing. +func DebugPauseDefault(loc DebugLocation, name string, state StateBag) { + var locationString string + switch loc { + case DebugLocationAfterRun: + locationString = "after run of" + case DebugLocationBeforeCleanup: + locationString = "before cleanup of" + } + + fmt.Printf("Pausing %s step '%s'. Press any key to continue.\n", locationString, name) + + var line string + fmt.Scanln(&line) +} + +type debugStepPause struct { + StepName string + PauseFn DebugPauseFn +} + +func (s *debugStepPause) Run(ctx context.Context, state StateBag) StepAction { + s.PauseFn(DebugLocationAfterRun, s.StepName, state) + return ActionContinue +} + +func (s *debugStepPause) Cleanup(state StateBag) { + s.PauseFn(DebugLocationBeforeCleanup, s.StepName, state) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/doc.go new file mode 100644 index 000000000..002069fc9 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/doc.go @@ -0,0 +1,60 @@ +/* +multistep is a Go library for building up complex actions using discrete, +individual "steps." These steps are strung together and run in sequence +to achieve a more complex goal. The runner handles cleanup, cancelling, etc. +if necessary. + +## Basic Example + +Make a step to perform some action. The step can access your "state", +which is passed between steps by the runner. + +```go +type stepAdd struct{} + +func (s *stepAdd) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction { + // Read our value and assert that it is they type we want + value := state.Get("value").(int) + fmt.Printf("Value is %d\n", value) + + // Store some state back + state.Put("value", value + 1) + return multistep.ActionContinue +} + +func (s *stepAdd) Cleanup(multistep.StateBag) { + // This is called after all the steps have run or if the runner is + // cancelled so that cleanup can be performed. +} +``` + +Make a runner and call your array of Steps. + +```go +func main() { + // Our "bag of state" that we read the value from + state := new(multistep.BasicStateBag) + state.Put("value", 0) + + steps := []multistep.Step{ + &stepAdd{}, + &stepAdd{}, + &stepAdd{}, + } + + runner := &multistep.BasicRunner{Steps: steps} + + // Executes the steps + runner.Run(context.Background(), state) +} +``` + +This will produce: + +``` +Value is 0 +Value is 1 +Value is 2 +``` +*/ +package multistep diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/if.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/if.go new file mode 100644 index 000000000..f9705af62 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/if.go @@ -0,0 +1,9 @@ +package multistep + +// if returns step only if on is true. +func If(on bool, step Step) Step { + if on == false { + return &nullStep{} + } + return step +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/multistep.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/multistep.go new file mode 100644 index 000000000..a3e427c0c --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/multistep.go @@ -0,0 +1,71 @@ +// multistep is a library for building up complex actions using individual, +// discrete steps. +package multistep + +import ( + "context" + "strconv" +) + +// A StepAction determines the next step to take regarding multi-step actions. +type StepAction uint + +const ( + ActionContinue StepAction = iota + ActionHalt +) + +// Implement the stringer interface; useful for testing. +func (a StepAction) String() string { + switch a { + case ActionContinue: + return "ActionContinue" + case ActionHalt: + return "ActionHalt" + default: + return "Unexpected value: " + strconv.Itoa(int(a)) + } +} + +// This is the key set in the state bag when using the basic runner to +// signal that the step sequence was cancelled. +const StateCancelled = "cancelled" + +// This is the key set in the state bag when a step halted the sequence. +const StateHalted = "halted" + +// Step is a single step that is part of a potentially large sequence +// of other steps, responsible for performing some specific action. +type Step interface { + // Run is called to perform the action. The passed through context will be + // cancelled when the runner is cancelled. The second parameter is a "state + // bag" of untyped things. Please be very careful about type-checking the + // items in this bag. + // + // The return value determines whether multi-step sequences continue + // or should halt. + Run(context.Context, StateBag) StepAction + + // Cleanup is called in reverse order of the steps that have run + // and allow steps to clean up after themselves. Do not assume if this + // ran that the entire multi-step sequence completed successfully. This + // method can be ran in the face of errors and cancellations as well. + // + // The parameter is the same "state bag" as Run, and represents the + // state at the latest possible time prior to calling Cleanup. + Cleanup(StateBag) +} + +// Runner is a thing that runs one or more steps. +type Runner interface { + // Run runs the steps with the given initial state. + Run(context.Context, StateBag) +} + +type nullStep struct{} + +func (s nullStep) Run(ctx context.Context, state StateBag) StepAction { + return ActionContinue +} + +func (s nullStep) Cleanup(state StateBag) {} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/statebag.go b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/statebag.go new file mode 100644 index 000000000..02a69871f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/multistep/statebag.go @@ -0,0 +1,52 @@ +package multistep + +import "sync" + +// Add context to state bag to prevent changing step signature + +// StateBag holds the state that is used by the Runner and Steps. The +// StateBag implementation must be safe for concurrent access. +type StateBag interface { + Get(string) interface{} + GetOk(string) (interface{}, bool) + Put(string, interface{}) + Remove(string) +} + +// BasicStateBag implements StateBag by using a normal map underneath +// protected by a RWMutex. +type BasicStateBag struct { + data map[string]interface{} + l sync.RWMutex + once sync.Once +} + +func (b *BasicStateBag) Get(k string) interface{} { + result, _ := b.GetOk(k) + return result +} + +func (b *BasicStateBag) GetOk(k string) (interface{}, bool) { + b.l.RLock() + defer b.l.RUnlock() + + result, ok := b.data[k] + return result, ok +} + +func (b *BasicStateBag) Put(k string, v interface{}) { + b.l.Lock() + defer b.l.Unlock() + + // Make sure the map is initialized one time, on write + b.once.Do(func() { + b.data = make(map[string]interface{}) + }) + + // Write the data + b.data[k] = v +} + +func (b *BasicStateBag) Remove(k string) { + delete(b.data, k) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/net/configure_port.go b/vendor/github.com/hashicorp/packer-plugin-sdk/net/configure_port.go new file mode 100644 index 000000000..6cc3d3d6c --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/net/configure_port.go @@ -0,0 +1,143 @@ +// Package net contains some helper wrapping functions for the http and net +// golang libraries that meet Packer-specific needs. +package net + +import ( + "context" + "fmt" + "log" + "math/rand" + "net" + "os" + "strconv" + "time" + + "github.com/hashicorp/packer-plugin-sdk/filelock" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/retry" +) + +var _ net.Listener = &Listener{} + +// Listener wraps a net.Lister with some Packer-specific capabilies. For +// example, until you call Listener.Close, any call to ListenRangeConfig.Listen +// cannot bind to a Port. Packer tries to tell moving parts which port they can +// use, but often the port has to be released before a 3rd party is started, +// like a VNC server. +type Listener struct { + // Listener can be closed but Port will be file locked by packer until + // Close is called. + net.Listener + Port int + Address string + lock *filelock.Flock + cleanupFunc func() error +} + +func (l *Listener) Close() error { + err := l.lock.Unlock() + if err != nil { + log.Printf("cannot unlock lockfile %#v: %v", l, err) + } + err = l.Listener.Close() + if err != nil { + return err + } + + if l.cleanupFunc != nil { + err := l.cleanupFunc() + if err != nil { + log.Printf("cannot cleanup: %#v", err) + } + } + return nil +} + +// ListenRangeConfig contains options for listening to a free address [Min,Max) +// range. ListenRangeConfig wraps a net.ListenConfig. +type ListenRangeConfig struct { + // like "tcp" or "udp". defaults to "tcp". + Network string + Addr string + Min, Max int + net.ListenConfig +} + +// Listen tries to Listen to a random open TCP port in the [min, max) range +// until ctx is cancelled. +// Listen uses net.ListenConfig.Listen internally. +func (lc ListenRangeConfig) Listen(ctx context.Context) (*Listener, error) { + if lc.Network == "" { + lc.Network = "tcp" + } + portRange := lc.Max - lc.Min + + var listener *Listener + + err := retry.Config{ + RetryDelay: func() time.Duration { return 1 * time.Millisecond }, + }.Run(ctx, func(context.Context) error { + port := lc.Min + if portRange > 0 { + port += rand.Intn(portRange) + } + + lockFilePath, err := packersdk.CachePath("port", strconv.Itoa(port)) + if err != nil { + return err + } + + lock := filelock.New(lockFilePath) + locked, err := lock.TryLock() + if err != nil { + return err + } + if !locked { + return ErrPortFileLocked(port) + } + + l, err := lc.ListenConfig.Listen(ctx, lc.Network, fmt.Sprintf("%s:%d", lc.Addr, port)) + if err != nil { + if err := lock.Unlock(); err != nil { + log.Fatalf("Could not unlock file lock for port %d: %v", port, err) + } + return &ErrPortBusy{ + Port: port, + Err: err, + } + } + + cleanupFunc := func() error { + return os.Remove(lockFilePath) + } + + log.Printf("Found available port: %d on IP: %s", port, lc.Addr) + listener = &Listener{ + Address: lc.Addr, + Port: port, + Listener: l, + lock: lock, + cleanupFunc: cleanupFunc, + } + return nil + }) + return listener, err +} + +type ErrPortFileLocked int + +func (port ErrPortFileLocked) Error() string { + return fmt.Sprintf("Port %d is file locked", port) +} + +type ErrPortBusy struct { + Port int + Err error +} + +func (err *ErrPortBusy) Error() string { + if err == nil { + return "" + } + return fmt.Sprintf("port %d cannot be opened: %v", err.Port, err.Err) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/net/default_client.go b/vendor/github.com/hashicorp/packer-plugin-sdk/net/default_client.go new file mode 100644 index 000000000..ae9b30b29 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/net/default_client.go @@ -0,0 +1,14 @@ +package net + +import ( + "net/http" +) + +func HttpClientWithEnvironmentProxy() *http.Client { + httpClient := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + }, + } + return httpClient +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/artifact.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/artifact.go new file mode 100644 index 000000000..8eaf3bcd5 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/artifact.go @@ -0,0 +1,36 @@ +package packer + +// An Artifact is the result of a build, and is the metadata that documents +// what a builder actually created. The exact meaning of the contents is +// specific to each builder, but this interface is used to communicate back +// to the user the result of a build. +type Artifact interface { + // Returns the ID of the builder that was used to create this artifact. + // This is the internal ID of the builder and should be unique to every + // builder. This can be used to identify what the contents of the + // artifact actually are. + BuilderId() string + + // Returns the set of files that comprise this artifact. If an + // artifact is not made up of files, then this will be empty. + Files() []string + + // The ID for the artifact, if it has one. This is not guaranteed to + // be unique every run (like a GUID), but simply provide an identifier + // for the artifact that may be meaningful in some way. For example, + // for Amazon EC2, this value might be the AMI ID. + Id() string + + // Returns human-readable output that describes the artifact created. + // This is used for UI output. It can be multiple lines. + String() string + + // State allows the caller to ask for builder specific state information + // relating to the artifact instance. + State(name string) interface{} + + // Destroy deletes the artifact. Packer calls this for various reasons, + // such as if a post-processor has processed this artifact and it is + // no longer needed. + Destroy() error +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/artifact_mock.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/artifact_mock.go new file mode 100644 index 000000000..8f195c571 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/artifact_mock.go @@ -0,0 +1,54 @@ +package packer + +// MockArtifact is an implementation of Artifact that can be used for tests. +type MockArtifact struct { + BuilderIdValue string + FilesValue []string + IdValue string + StateValues map[string]interface{} + DestroyCalled bool + StringValue string +} + +func (a *MockArtifact) BuilderId() string { + if a.BuilderIdValue == "" { + return "bid" + } + + return a.BuilderIdValue +} + +func (a *MockArtifact) Files() []string { + if a.FilesValue == nil { + return []string{"a", "b"} + } + + return a.FilesValue +} + +func (a *MockArtifact) Id() string { + id := a.IdValue + if id == "" { + id = "id" + } + + return id +} + +func (a *MockArtifact) String() string { + str := a.StringValue + if str == "" { + str = "string" + } + return str +} + +func (a *MockArtifact) State(name string) interface{} { + value := a.StateValues[name] + return value +} + +func (a *MockArtifact) Destroy() error { + a.DestroyCalled = true + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/build.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/build.go new file mode 100644 index 000000000..7bfbb1420 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/build.go @@ -0,0 +1,43 @@ +package packer + +import "context" + +// A Build represents a single job within Packer that is responsible for +// building some machine image artifact. Builds are meant to be parallelized. +type Build interface { + // Name is the name of the build. This is unique across a single template, + // but not absolutely unique. This is meant more to describe to the user + // what is being built rather than being a unique identifier. + Name() string + + // Prepare configures the various components of this build and reports + // any errors in doing so (such as syntax errors, validation errors, etc.). + // It also reports any warnings. + Prepare() ([]string, error) + + // Run runs the actual builder, returning an artifact implementation + // of what is built. If anything goes wrong, an error is returned. + // Run can be context cancelled. + Run(context.Context, Ui) ([]Artifact, error) + + // SetDebug will enable/disable debug mode. Debug mode is always + // enabled by adding the additional key "packer_debug" to boolean + // true in the configuration of the various components. This must + // be called prior to Prepare. + // + // When SetDebug is set to true, parallelism between builds is + // strictly prohibited. + SetDebug(bool) + + // SetForce will enable/disable forcing a build when artifacts exist. + // + // When SetForce is set to true, existing artifacts from the build are + // deleted prior to the build. + SetForce(bool) + + // SetOnError will determine what to do when a normal multistep step fails + // - "cleanup" - run cleanup steps + // - "abort" - exit without cleanup + // - "ask" - ask the user + SetOnError(string) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder.go new file mode 100644 index 000000000..11bdc3046 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder.go @@ -0,0 +1,39 @@ +package packer + +import ( + "context" +) + +// Implementers of Builder are responsible for actually building images +// on some platform given some configuration. +// +// In addition to the documentation on Prepare above: Prepare is sometimes +// configured with a `map[string]interface{}` that has a key "packer_debug". +// This is a boolean value. If it is set to true, then the builder should +// enable a debug mode which allows builder developers and advanced users +// to introspect what is going on during a build. During debug builds, +// parallelism is strictly disabled, so it is safe to request input from +// stdin and so on. +type Builder interface { + HCL2Speccer + + // Prepare is responsible for configuring the builder and validating + // that configuration. Any setup should be done in this method. Note that + // NO side effects should take place in prepare, it is meant as a state + // setup only. Calling Prepare is not necessarily followed by a Run. + // + // The parameters to Prepare are a set of interface{} values of the + // configuration. These are almost always `map[string]interface{}` + // parsed from a template, but no guarantee is made. + // + // Each of the configuration values should merge into the final + // configuration. + // + // Prepare should return a list of variables that will be made accessible to + // users during the provision methods, a list of warnings along with any + // errors that occurred while preparing. + Prepare(...interface{}) ([]string, []string, error) + + // Run is where the actual build should take place. It takes a Build and a Ui. + Run(context.Context, Ui, Hook) (Artifact, error) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder_mock.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder_mock.go new file mode 100644 index 000000000..1bd503348 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder_mock.go @@ -0,0 +1,67 @@ +//go:generate mapstructure-to-hcl2 -type MockBuilder + +package packer + +import ( + "context" + "errors" + + "github.com/hashicorp/hcl/v2/hcldec" +) + +// MockBuilder is an implementation of Builder that can be used for tests. +// You can set some fake return values and you can keep track of what +// methods were called on the builder. It is fairly basic. +type MockBuilder struct { + ArtifactId string + PrepareWarnings []string + RunErrResult bool + RunNilResult bool + + PrepareCalled bool + PrepareConfig []interface{} + RunCalled bool + RunHook Hook + RunUi Ui + CancelCalled bool + RunFn func(ctx context.Context) + + GeneratedVars []string +} + +func (tb *MockBuilder) ConfigSpec() hcldec.ObjectSpec { return tb.FlatMapstructure().HCL2Spec() } + +func (tb *MockBuilder) FlatConfig() interface{} { return tb.FlatMapstructure() } + +func (tb *MockBuilder) Prepare(config ...interface{}) ([]string, []string, error) { + tb.PrepareCalled = true + tb.PrepareConfig = config + return tb.GeneratedVars, tb.PrepareWarnings, nil +} + +func (tb *MockBuilder) Run(ctx context.Context, ui Ui, h Hook) (Artifact, error) { + tb.RunCalled = true + tb.RunHook = h + tb.RunUi = ui + + if tb.RunErrResult { + return nil, errors.New("foo") + } + + if tb.RunNilResult { + return nil, nil + } + if tb.RunFn != nil { + tb.RunFn(ctx) + } + + if h != nil { + if err := h.Run(ctx, HookProvision, ui, new(MockCommunicator), nil); err != nil { + return nil, err + } + } + + return &MockArtifact{ + IdValue: tb.ArtifactId, + }, nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder_mock.hcl2spec.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder_mock.hcl2spec.go new file mode 100644 index 000000000..09ef22087 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/builder_mock.hcl2spec.go @@ -0,0 +1,51 @@ +// Code generated by "mapstructure-to-hcl2 -type MockBuilder"; DO NOT EDIT. + +package packer + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// FlatMockBuilder is an auto-generated flat version of MockBuilder. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatMockBuilder struct { + ArtifactId *string `cty:"artifact_id" hcl:"artifact_id"` + PrepareWarnings []string `cty:"prepare_warnings" hcl:"prepare_warnings"` + RunErrResult *bool `cty:"run_err_result" hcl:"run_err_result"` + RunNilResult *bool `cty:"run_nil_result" hcl:"run_nil_result"` + PrepareCalled *bool `cty:"prepare_called" hcl:"prepare_called"` + PrepareConfig []interface{} `cty:"prepare_config" hcl:"prepare_config"` + RunCalled *bool `cty:"run_called" hcl:"run_called"` + RunHook Hook `cty:"run_hook" hcl:"run_hook"` + RunUi Ui `cty:"run_ui" hcl:"run_ui"` + CancelCalled *bool `cty:"cancel_called" hcl:"cancel_called"` + GeneratedVars []string `cty:"generated_vars" hcl:"generated_vars"` +} + +// FlatMapstructure returns a new FlatMockBuilder. +// FlatMockBuilder is an auto-generated flat version of MockBuilder. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*MockBuilder) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatMockBuilder) +} + +// HCL2Spec returns the hcl spec of a MockBuilder. +// This spec is used by HCL to read the fields of MockBuilder. +// The decoded values from this spec will then be applied to a FlatMockBuilder. +func (*FlatMockBuilder) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "artifact_id": &hcldec.AttrSpec{Name: "artifact_id", Type: cty.String, Required: false}, + "prepare_warnings": &hcldec.AttrSpec{Name: "prepare_warnings", Type: cty.List(cty.String), Required: false}, + "run_err_result": &hcldec.AttrSpec{Name: "run_err_result", Type: cty.Bool, Required: false}, + "run_nil_result": &hcldec.AttrSpec{Name: "run_nil_result", Type: cty.Bool, Required: false}, + "prepare_called": &hcldec.AttrSpec{Name: "prepare_called", Type: cty.Bool, Required: false}, + "prepare_config": &hcldec.AttrSpec{Name: "prepare_config", Type: cty.Bool, Required: false}, /* TODO(azr): could not find type */ + "run_called": &hcldec.AttrSpec{Name: "run_called", Type: cty.Bool, Required: false}, + "run_hook": &hcldec.AttrSpec{Name: "run_hook", Type: cty.Bool, Required: false}, /* TODO(azr): could not find type */ + "run_ui": &hcldec.AttrSpec{Name: "run_ui", Type: cty.Bool, Required: false}, /* TODO(azr): could not find type */ + "cancel_called": &hcldec.AttrSpec{Name: "cancel_called", Type: cty.Bool, Required: false}, + "generated_vars": &hcldec.AttrSpec{Name: "generated_vars", Type: cty.List(cty.String), Required: false}, + } + return s +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/cache.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/cache.go new file mode 100644 index 000000000..b2be017fe --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/cache.go @@ -0,0 +1,35 @@ +package packer + +import ( + "os" + "path/filepath" +) + +var DefaultCacheDir = "packer_cache" + +// CachePath returns an absolute path to a cache file or directory +// +// When the directory is not absolute, CachePath will try to get +// current working directory to be able to return a full path. +// CachePath tries to create the resulting path if it doesn't exist. +// +// CachePath can error in case it cannot find the cwd. +// +// ex: +// PACKER_CACHE_DIR="" CacheDir() => "./packer_cache/ +// PACKER_CACHE_DIR="" CacheDir("foo") => "./packer_cache/foo +// PACKER_CACHE_DIR="bar" CacheDir("foo") => "./bar/foo +// PACKER_CACHE_DIR="/home/there" CacheDir("foo", "bar") => "/home/there/foo/bar +func CachePath(paths ...string) (path string, err error) { + defer func() { + // create the dir based on return path if it doesn't exist + os.MkdirAll(filepath.Dir(path), os.ModePerm) + }() + cacheDir := DefaultCacheDir + if cd := os.Getenv("PACKER_CACHE_DIR"); cd != "" { + cacheDir = cd + } + + paths = append([]string{cacheDir}, paths...) + return filepath.Abs(filepath.Join(paths...)) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/communicator.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/communicator.go new file mode 100644 index 000000000..026737d4b --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/communicator.go @@ -0,0 +1,219 @@ +package packer + +import ( + "context" + "io" + "os" + "strings" + "sync" + "unicode" + + "github.com/mitchellh/iochan" +) + +// CmdDisconnect is a sentinel value to indicate a RemoteCmd +// exited because the remote side disconnected us. +const CmdDisconnect int = 2300218 + +// RemoteCmd represents a remote command being prepared or run. +type RemoteCmd struct { + // Command is the command to run remotely. This is executed as if + // it were a shell command, so you are expected to do any shell escaping + // necessary. + Command string + + // Stdin specifies the process's standard input. If Stdin is + // nil, the process reads from an empty bytes.Buffer. + Stdin io.Reader + + // Stdout and Stderr represent the process's standard output and + // error. + // + // If either is nil, it will be set to ioutil.Discard. + Stdout io.Writer + Stderr io.Writer + + // Once Exited is true, this will contain the exit code of the process. + exitStatus int + + // This thing is a mutex, lock when making modifications concurrently + m sync.Mutex + + exitChInit sync.Once + exitCh chan interface{} +} + +// A Communicator is the interface used to communicate with the machine +// that exists that will eventually be packaged into an image. Communicators +// allow you to execute remote commands, upload files, etc. +// +// Communicators must be safe for concurrency, meaning multiple calls to +// Start or any other method may be called at the same time. +type Communicator interface { + // Start takes a RemoteCmd and starts it. The RemoteCmd must not be + // modified after being used with Start, and it must not be used with + // Start again. The Start method returns immediately once the command + // is started. It does not wait for the command to complete. The + // RemoteCmd.Exited field should be used for this. + Start(context.Context, *RemoteCmd) error + + // Upload uploads a file to the machine to the given path with the + // contents coming from the given reader. This method will block until + // it completes. + Upload(string, io.Reader, *os.FileInfo) error + + // UploadDir uploads the contents of a directory recursively to + // the remote path. It also takes an optional slice of paths to + // ignore when uploading. + // + // The folder name of the source folder should be created unless there + // is a trailing slash on the source "/". For example: "/tmp/src" as + // the source will create a "src" directory in the destination unless + // a trailing slash is added. This is identical behavior to rsync(1). + UploadDir(dst string, src string, exclude []string) error + + // Download downloads a file from the machine from the given remote path + // with the contents writing to the given writer. This method will + // block until it completes. + Download(string, io.Writer) error + + DownloadDir(src string, dst string, exclude []string) error +} + +type ConfigurableCommunicator interface { + HCL2Speccer + Configure(...interface{}) ([]string, error) +} + +// RunWithUi runs the remote command and streams the output to any configured +// Writers for stdout/stderr, while also writing each line as it comes to a Ui. +// RunWithUi will not return until the command finishes or is cancelled. +func (r *RemoteCmd) RunWithUi(ctx context.Context, c Communicator, ui Ui) error { + r.initchan() + + stdout_r, stdout_w := io.Pipe() + stderr_r, stderr_w := io.Pipe() + defer stdout_w.Close() + defer stderr_w.Close() + + // Retain the original stdout/stderr that we can replace back in. + originalStdout := r.Stdout + originalStderr := r.Stderr + defer func() { + r.m.Lock() + defer r.m.Unlock() + + r.Stdout = originalStdout + r.Stderr = originalStderr + }() + + // Set the writers for the output so that we get it streamed to us + if r.Stdout == nil { + r.Stdout = stdout_w + } else { + r.Stdout = io.MultiWriter(r.Stdout, stdout_w) + } + + if r.Stderr == nil { + r.Stderr = stderr_w + } else { + r.Stderr = io.MultiWriter(r.Stderr, stderr_w) + } + + // Start the command + if err := c.Start(ctx, r); err != nil { + return err + } + + // Create the channels we'll use for data + stdoutCh := iochan.DelimReader(stdout_r, '\n') + stderrCh := iochan.DelimReader(stderr_r, '\n') + + // Start the goroutine to watch for the exit + go func() { + defer stdout_w.Close() + defer stderr_w.Close() + r.Wait() + }() + + // Loop and get all our output +OutputLoop: + for { + select { + case output := <-stderrCh: + if output != "" { + ui.Error(r.cleanOutputLine(output)) + } + case output := <-stdoutCh: + if output != "" { + ui.Message(r.cleanOutputLine(output)) + } + case <-r.exitCh: + break OutputLoop + case <-ctx.Done(): + return ctx.Err() + } + } + + // Make sure we finish off stdout/stderr because we may have gotten + // a message from the exit channel before finishing these first. + for output := range stdoutCh { + ui.Message(r.cleanOutputLine(output)) + } + + for output := range stderrCh { + ui.Error(r.cleanOutputLine(output)) + } + + return nil +} + +// SetExited is a helper for setting that this process is exited. This +// should be called by communicators who are running a remote command in +// order to set that the command is done. +func (r *RemoteCmd) SetExited(status int) { + r.initchan() + + r.m.Lock() + r.exitStatus = status + r.m.Unlock() + + close(r.exitCh) +} + +// Wait for command exit and return exit status +func (r *RemoteCmd) Wait() int { + r.initchan() + <-r.exitCh + r.m.Lock() + defer r.m.Unlock() + return r.exitStatus +} + +func (r *RemoteCmd) ExitStatus() int { + return r.Wait() +} + +func (r *RemoteCmd) initchan() { + r.exitChInit.Do(func() { + if r.exitCh == nil { + r.exitCh = make(chan interface{}) + } + }) +} + +// cleanOutputLine cleans up a line so that '\r' don't muck up the +// UI output when we're reading from a remote command. +func (r *RemoteCmd) cleanOutputLine(line string) string { + // Trim surrounding whitespace + line = strings.TrimRightFunc(line, unicode.IsSpace) + + // Trim up to the first carriage return, since that text would be + // lost anyways. + idx := strings.LastIndex(line, "\r") + if idx > -1 { + line = line[idx+1:] + } + + return line +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/communicator_mock.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/communicator_mock.go new file mode 100644 index 000000000..00b043a91 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/communicator_mock.go @@ -0,0 +1,131 @@ +package packer + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "strings" + "sync" +) + +// MockCommunicator is a valid Communicator implementation that can be +// used for tests. +type MockCommunicator struct { + StartCalled bool + StartCmd *RemoteCmd + StartStderr string + StartStdout string + StartStdin string + StartExitStatus int + + UploadCalled bool + UploadPath string + UploadData string + + UploadDirDst string + UploadDirSrc string + UploadDirExclude []string + + DownloadDirDst string + DownloadDirSrc string + DownloadDirExclude []string + + DownloadCalled bool + DownloadPath string + DownloadData string +} + +func (c *MockCommunicator) Start(ctx context.Context, rc *RemoteCmd) error { + c.StartCalled = true + c.StartCmd = rc + + go func() { + var wg sync.WaitGroup + if rc.Stdout != nil && c.StartStdout != "" { + wg.Add(1) + go func() { + io.Copy(rc.Stdout, strings.NewReader(c.StartStdout)) + wg.Done() + }() + } + + if rc.Stderr != nil && c.StartStderr != "" { + wg.Add(1) + go func() { + io.Copy(rc.Stderr, strings.NewReader(c.StartStderr)) + wg.Done() + }() + } + + if rc.Stdin != nil { + wg.Add(1) + go func() { + defer wg.Done() + var data bytes.Buffer + io.Copy(&data, rc.Stdin) + c.StartStdin = data.String() + }() + } + + wg.Wait() + rc.SetExited(c.StartExitStatus) + }() + + return nil +} + +func (c *MockCommunicator) Upload(path string, r io.Reader, fi *os.FileInfo) error { + c.UploadCalled = true + c.UploadPath = path + + var data bytes.Buffer + if _, err := io.Copy(&data, r); err != nil { + panic(err) + } + + c.UploadData = data.String() + + return nil +} + +func (c *MockCommunicator) UploadDir(dst string, src string, excl []string) error { + c.UploadDirDst = dst + c.UploadDirSrc = src + c.UploadDirExclude = excl + + return nil +} + +func (c *MockCommunicator) Download(path string, w io.Writer) error { + c.DownloadCalled = true + c.DownloadPath = path + w.Write([]byte(c.DownloadData)) + + return nil +} + +func (c *MockCommunicator) DownloadDir(src string, dst string, excl []string) error { + c.DownloadDirDst = dst + c.DownloadDirSrc = src + c.DownloadDirExclude = excl + + return nil +} + +// ScriptUploadErrorMockCommunicator returns an error from it's Upload() method +// when a script is uploaded to test the case where this upload fails. +type ScriptUploadErrorMockCommunicator struct { + MockCommunicator +} + +var ScriptUploadErrorMockCommunicatorError = errors.New("ScriptUploadErrorMockCommunicator Upload error") + +func (c *ScriptUploadErrorMockCommunicator) Upload(path string, r io.Reader, fi *os.FileInfo) error { + // only fail on script uploads, not on environment variable uploads + if !strings.Contains(path, "packer-ps-env-vars") { + return ScriptUploadErrorMockCommunicatorError + } + return c.MockCommunicator.Upload(path, r, fi) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/doc.go new file mode 100644 index 000000000..ada2bbd4c --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/doc.go @@ -0,0 +1,21 @@ +/* +Package packer contains all of the interfaces for key Packer objects. + +This module will need to be imported by all but the very simplest plugins. It +represents the foundation of the API that the Core and Plugins use to +communicate with each other. + +Changes to any of the interfaces in this package likely represent a +backwards-incompatibility and should therefore only be made rarely and when +absolutely necessary. + +Plugins will need to implement either the Builder, Provisioner, +or Post-Processor interfaces, and will likely create an Artifact. The +Communicator must be implemented in the Builder and then passed into the +Provisioners so they can use it communicate with the instance without needing +to know the connection details. + +The UI is created by the Packer core for use by the plugins, and is how the +plugins stream information back to the terminal. +*/ +package packer diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hcl2spec.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hcl2spec.go new file mode 100644 index 000000000..bbba47ec3 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hcl2spec.go @@ -0,0 +1,12 @@ +package packer + +import "github.com/hashicorp/hcl/v2/hcldec" + +// a struct (or type) implementing HCL2Speccer is a type that can tell it's own +// hcl2 conf/layout. +type HCL2Speccer interface { + // ConfigSpec should return the hcl object spec used to configure the + // builder. It will be used to tell the HCL parsing library how to + // validate/configure a configuration. + ConfigSpec() hcldec.ObjectSpec +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hook.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hook.go new file mode 100644 index 000000000..ee8104ac9 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hook.go @@ -0,0 +1,53 @@ +package packer + +import ( + "context" +) + +// This is the hook that should be fired for provisioners to run. +const HookProvision = "packer_provision" +const HookCleanupProvision = "packer_cleanup_provision" + +// A Hook is used to hook into an arbitrarily named location in a build, +// allowing custom behavior to run at certain points along a build. +// +// Run is called when the hook is called, with the name of the hook and +// arbitrary data associated with it. To know what format the data is in, +// you must reference the documentation for the specific hook you're interested +// in. In addition to that, the Hook is given access to a UI so that it can +// output things to the user. +// +// The first context argument controls cancellation, the context will usually +// be called when Run is still in progress so the mechanism that handles this +// must be race-free. Cancel should attempt to cancel the hook in the quickest, +// safest way possible. +type Hook interface { + Run(context.Context, string, Ui, Communicator, interface{}) error +} + +// A Hook implementation that dispatches based on an internal mapping. +type DispatchHook struct { + Mapping map[string][]Hook +} + +// Runs the hook with the given name by dispatching it to the proper +// hooks if a mapping exists. If a mapping doesn't exist, then nothing +// happens. +func (h *DispatchHook) Run(ctx context.Context, name string, ui Ui, comm Communicator, data interface{}) error { + hooks, ok := h.Mapping[name] + if !ok { + // No hooks for that name. No problem. + return nil + } + + for _, hook := range hooks { + if err := ctx.Err(); err != nil { + return err + } + if err := hook.Run(ctx, name, ui, comm, data); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hook_mock.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hook_mock.go new file mode 100644 index 000000000..16571f1fe --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/hook_mock.go @@ -0,0 +1,31 @@ +package packer + +import ( + "context" +) + +// MockHook is an implementation of Hook that can be used for tests. +type MockHook struct { + RunFunc func(context.Context) error + + RunCalled bool + RunComm Communicator + RunData interface{} + RunName string + RunUi Ui +} + +func (t *MockHook) Run(ctx context.Context, name string, ui Ui, comm Communicator, data interface{}) error { + + t.RunCalled = true + t.RunComm = comm + t.RunData = data + t.RunName = name + t.RunUi = ui + + if t.RunFunc == nil { + return nil + } + + return t.RunFunc(ctx) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/logs.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/logs.go new file mode 100644 index 000000000..209b66ca3 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/logs.go @@ -0,0 +1,63 @@ +package packer + +import ( + "bytes" + "io" + "strings" + "sync" +) + +type secretFilter struct { + s map[string]struct{} + m sync.Mutex + w io.Writer +} + +func (l *secretFilter) Set(secrets ...string) { + l.m.Lock() + defer l.m.Unlock() + for _, s := range secrets { + l.s[s] = struct{}{} + } +} + +func (l *secretFilter) SetOutput(output io.Writer) { + l.m.Lock() + defer l.m.Unlock() + l.w = output +} + +func (l *secretFilter) Write(p []byte) (n int, err error) { + for s := range l.s { + if s != "" { + p = bytes.Replace(p, []byte(s), []byte(""), -1) + } + } + return l.w.Write(p) +} + +// FilterString will overwrite any senstitive variables in a string, returning +// the filtered string. +func (l *secretFilter) FilterString(message string) string { + for s := range l.s { + if s != "" { + message = strings.Replace(message, s, "", -1) + } + } + return message +} + +func (l *secretFilter) get() (s []string) { + l.m.Lock() + defer l.m.Unlock() + for k := range l.s { + s = append(s, k) + } + return +} + +var LogSecretFilter secretFilter + +func init() { + LogSecretFilter.s = make(map[string]struct{}) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/maps.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/maps.go new file mode 100644 index 000000000..7cad0a014 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/maps.go @@ -0,0 +1,74 @@ +package packer + +import ( + "fmt" +) + +type MapOfProvisioner map[string]func() (Provisioner, error) + +func (mop MapOfProvisioner) Has(provisioner string) bool { + _, res := mop[provisioner] + return res +} + +func (mop MapOfProvisioner) Start(provisioner string) (Provisioner, error) { + p, found := mop[provisioner] + if !found { + return nil, fmt.Errorf("Unknown provisioner %s", provisioner) + } + return p() +} + +func (mop MapOfProvisioner) List() []string { + res := []string{} + for k := range mop { + res = append(res, k) + } + return res +} + +type MapOfPostProcessor map[string]func() (PostProcessor, error) + +func (mopp MapOfPostProcessor) Has(postProcessor string) bool { + _, res := mopp[postProcessor] + return res +} + +func (mopp MapOfPostProcessor) Start(postProcessor string) (PostProcessor, error) { + p, found := mopp[postProcessor] + if !found { + return nil, fmt.Errorf("Unknown post-processor %s", postProcessor) + } + return p() +} + +func (mopp MapOfPostProcessor) List() []string { + res := []string{} + for k := range mopp { + res = append(res, k) + } + return res +} + +type MapOfBuilder map[string]func() (Builder, error) + +func (mob MapOfBuilder) Has(builder string) bool { + _, res := mob[builder] + return res +} + +func (mob MapOfBuilder) Start(builder string) (Builder, error) { + d, found := mob[builder] + if !found { + return nil, fmt.Errorf("Unknown builder %s", builder) + } + return d() +} + +func (mob MapOfBuilder) List() []string { + res := []string{} + for k := range mob { + res = append(res, k) + } + return res +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/multi_error.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/multi_error.go new file mode 100644 index 000000000..5880612d5 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/multi_error.go @@ -0,0 +1,59 @@ +package packer + +import ( + "fmt" + "strings" +) + +// MultiError is an error type to track multiple errors. This is used to +// accumulate errors in cases such as configuration parsing, and returning +// them as a single error. +type MultiError struct { + Errors []error +} + +func (e *MultiError) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d error(s) occurred:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// MultiErrorAppend is a helper function that will append more errors +// onto a MultiError in order to create a larger multi-error. If the +// original error is not a MultiError, it will be turned into one. +func MultiErrorAppend(err error, errs ...error) *MultiError { + if err == nil { + err = new(MultiError) + } + + switch err := err.(type) { + case *MultiError: + if err == nil { + err = new(MultiError) + } + + for _, verr := range errs { + switch rhsErr := verr.(type) { + case *MultiError: + if rhsErr != nil { + err.Errors = append(err.Errors, rhsErr.Errors...) + } + default: + err.Errors = append(err.Errors, verr) + } + } + return err + default: + newErrs := make([]error, len(errs)+1) + newErrs[0] = err + copy(newErrs[1:], errs) + return &MultiError{ + Errors: newErrs, + } + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/post_processor.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/post_processor.go new file mode 100644 index 000000000..e99bcdd8a --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/post_processor.go @@ -0,0 +1,28 @@ +package packer + +import ( + "context" +) + +// A PostProcessor is responsible for taking an artifact of a build +// and doing some sort of post-processing to turn this into another +// artifact. An example of a post-processor would be something that takes +// the result of a build, compresses it, and returns a new artifact containing +// a single file of the prior artifact compressed. +type PostProcessor interface { + HCL2Speccer + + // Configure is responsible for setting up configuration, storing + // the state for later, and returning and errors, such as validation + // errors. + Configure(...interface{}) error + + // PostProcess takes a previously created Artifact and produces another + // Artifact. If an error occurs, it should return that error. If `keep` is + // true, then the previous artifact defaults to being kept if user has not + // given a value to keep_input_artifact. If forceOverride is true, then any + // user input for keep_input_artifact is ignored and the artifact is either + // kept or discarded according to the value set in `keep`. + // PostProcess is cancellable using context + PostProcess(context.Context, Ui, Artifact) (a Artifact, keep bool, forceOverride bool, err error) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner.go new file mode 100644 index 000000000..1f70e70a9 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner.go @@ -0,0 +1,20 @@ +package packer + +import "context" + +// A provisioner is responsible for installing and configuring software +// on a machine prior to building the actual image. +type Provisioner interface { + HCL2Speccer + + // Prepare is called with a set of configurations to setup the + // internal state of the provisioner. The multiple configurations + // should be merged in some sane way. + Prepare(...interface{}) error + + // Provision is called to actually provision the machine. A context is + // given for cancellation, a UI is given to communicate with the user, and + // a communicator is given that is guaranteed to be connected to some + // machine so that provisioning can be done. + Provision(context.Context, Ui, Communicator, map[string]interface{}) error +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner_mock.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner_mock.go new file mode 100644 index 000000000..08e3d1b8b --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner_mock.go @@ -0,0 +1,60 @@ +//go:generate mapstructure-to-hcl2 -type MockProvisioner +package packer + +import ( + "context" + + "github.com/hashicorp/hcl/v2/hcldec" +) + +// MockProvisioner is an implementation of Provisioner that can be +// used for tests. +type MockProvisioner struct { + ProvFunc func(context.Context) error + + PrepCalled bool + PrepConfigs []interface{} + ProvCalled bool + ProvRetried bool + ProvCommunicator Communicator + ProvUi Ui +} + +func (tp *MockProvisioner) ConfigSpec() hcldec.ObjectSpec { return tp.FlatMapstructure().HCL2Spec() } + +func (tp *MockProvisioner) FlatConfig() interface{} { return tp.FlatMapstructure() } + +func (t *MockProvisioner) Prepare(configs ...interface{}) error { + t.PrepCalled = true + t.PrepConfigs = configs + return nil +} + +func (t *MockProvisioner) Provision(ctx context.Context, ui Ui, comm Communicator, generatedData map[string]interface{}) error { + if t.ProvCalled { + t.ProvRetried = true + return nil + } + + t.ProvCalled = true + t.ProvCommunicator = comm + t.ProvUi = ui + + if t.ProvFunc == nil { + return nil + } + + return t.ProvFunc(ctx) +} + +func (t *MockProvisioner) Communicator() Communicator { + return t.ProvCommunicator +} + +func (t *MockProvisioner) ElevatedUser() string { + return "user" +} + +func (t *MockProvisioner) ElevatedPassword() string { + return "password" +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner_mock.hcl2spec.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner_mock.hcl2spec.go new file mode 100644 index 000000000..6e319dd8f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/provisioner_mock.hcl2spec.go @@ -0,0 +1,41 @@ +// Code generated by "mapstructure-to-hcl2 -type MockProvisioner"; DO NOT EDIT. + +package packer + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// FlatMockProvisioner is an auto-generated flat version of MockProvisioner. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatMockProvisioner struct { + PrepCalled *bool `cty:"prep_called" hcl:"prep_called"` + PrepConfigs []interface{} `cty:"prep_configs" hcl:"prep_configs"` + ProvCalled *bool `cty:"prov_called" hcl:"prov_called"` + ProvRetried *bool `cty:"prov_retried" hcl:"prov_retried"` + ProvCommunicator Communicator `cty:"prov_communicator" hcl:"prov_communicator"` + ProvUi Ui `cty:"prov_ui" hcl:"prov_ui"` +} + +// FlatMapstructure returns a new FlatMockProvisioner. +// FlatMockProvisioner is an auto-generated flat version of MockProvisioner. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*MockProvisioner) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatMockProvisioner) +} + +// HCL2Spec returns the hcl spec of a MockProvisioner. +// This spec is used by HCL to read the fields of MockProvisioner. +// The decoded values from this spec will then be applied to a FlatMockProvisioner. +func (*FlatMockProvisioner) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "prep_called": &hcldec.AttrSpec{Name: "prep_called", Type: cty.Bool, Required: false}, + "prep_configs": &hcldec.AttrSpec{Name: "prep_configs", Type: cty.Bool, Required: false}, /* TODO(azr): could not find type */ + "prov_called": &hcldec.AttrSpec{Name: "prov_called", Type: cty.Bool, Required: false}, + "prov_retried": &hcldec.AttrSpec{Name: "prov_retried", Type: cty.Bool, Required: false}, + "prov_communicator": &hcldec.AttrSpec{Name: "prov_communicator", Type: cty.Bool, Required: false}, /* TODO(azr): could not find type */ + "prov_ui": &hcldec.AttrSpec{Name: "prov_ui", Type: cty.Bool, Required: false}, /* TODO(azr): could not find type */ + } + return s +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/ui.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/ui.go new file mode 100644 index 000000000..7d75fc5a8 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/ui.go @@ -0,0 +1,211 @@ +package packer + +import ( + "errors" + "fmt" + "io" + "log" + "os" + "os/signal" + "strings" + "sync" + "syscall" + + getter "github.com/hashicorp/go-getter/v2" +) + +type TTY interface { + ReadString() (string, error) + Close() error +} + +// The Ui interface handles all communication for Packer with the outside +// world. This sort of control allows us to strictly control how output +// is formatted and various levels of output. +type Ui interface { + Ask(string) (string, error) + Say(string) + Message(string) + Error(string) + Machine(string, ...string) + // TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) + getter.ProgressTracker +} + +var ErrInterrupted = errors.New("interrupted") + +// BasicUI is an implementation of Ui that reads and writes from a standard Go +// reader and writer. It is safe to be called from multiple goroutines. Machine +// readable output is simply logged for this UI. +type BasicUi struct { + Reader io.Reader + Writer io.Writer + ErrorWriter io.Writer + l sync.Mutex + interrupted bool + TTY TTY + PB getter.ProgressTracker +} + +var _ Ui = new(BasicUi) + +func (rw *BasicUi) Ask(query string) (string, error) { + rw.l.Lock() + defer rw.l.Unlock() + + if rw.interrupted { + return "", ErrInterrupted + } + + if rw.TTY == nil { + return "", errors.New("no available tty") + } + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, os.Interrupt, syscall.SIGTERM) + defer signal.Stop(sigCh) + + log.Printf("ui: ask: %s", query) + if query != "" { + if _, err := fmt.Fprint(rw.Writer, query+" "); err != nil { + return "", err + } + } + + result := make(chan string, 1) + go func() { + line, err := rw.TTY.ReadString() + if err != nil { + log.Printf("ui: scan err: %s", err) + return + } + result <- strings.TrimSpace(line) + }() + + select { + case line := <-result: + return line, nil + case <-sigCh: + // Print a newline so that any further output starts properly + // on a new line. + fmt.Fprintln(rw.Writer) + + // Mark that we were interrupted so future Ask calls fail. + rw.interrupted = true + + return "", ErrInterrupted + } +} + +func (rw *BasicUi) Say(message string) { + rw.l.Lock() + defer rw.l.Unlock() + + // Use LogSecretFilter to scrub out sensitive variables + message = LogSecretFilter.FilterString(message) + + log.Printf("ui: %s", message) + _, err := fmt.Fprint(rw.Writer, message+"\n") + if err != nil { + log.Printf("[ERR] Failed to write to UI: %s", err) + } +} + +func (rw *BasicUi) Message(message string) { + rw.l.Lock() + defer rw.l.Unlock() + + // Use LogSecretFilter to scrub out sensitive variables + message = LogSecretFilter.FilterString(message) + + log.Printf("ui: %s", message) + _, err := fmt.Fprint(rw.Writer, message+"\n") + if err != nil { + log.Printf("[ERR] Failed to write to UI: %s", err) + } +} + +func (rw *BasicUi) Error(message string) { + rw.l.Lock() + defer rw.l.Unlock() + + writer := rw.ErrorWriter + if writer == nil { + writer = rw.Writer + } + + // Use LogSecretFilter to scrub out sensitive variables + message = LogSecretFilter.FilterString(message) + + log.Printf("ui error: %s", message) + _, err := fmt.Fprint(writer, message+"\n") + if err != nil { + log.Printf("[ERR] Failed to write to UI: %s", err) + } +} + +func (rw *BasicUi) Machine(t string, args ...string) { + log.Printf("machine readable: %s %#v", t, args) +} + +func (rw *BasicUi) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) { + return rw.PB.TrackProgress(src, currentSize, totalSize, stream) +} + +// Safe is a UI that wraps another UI implementation and +// provides concurrency-safe access +type SafeUi struct { + Sem chan int + Ui Ui + PB getter.ProgressTracker +} + +var _ Ui = new(SafeUi) + +func (u *SafeUi) Ask(s string) (string, error) { + u.Sem <- 1 + ret, err := u.Ui.Ask(s) + <-u.Sem + + return ret, err +} + +func (u *SafeUi) Say(s string) { + u.Sem <- 1 + u.Ui.Say(s) + <-u.Sem +} + +func (u *SafeUi) Message(s string) { + u.Sem <- 1 + u.Ui.Message(s) + <-u.Sem +} + +func (u *SafeUi) Error(s string) { + u.Sem <- 1 + u.Ui.Error(s) + <-u.Sem +} + +func (u *SafeUi) Machine(t string, args ...string) { + u.Sem <- 1 + u.Ui.Machine(t, args...) + <-u.Sem +} + +func (u *SafeUi) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) (body io.ReadCloser) { + u.Sem <- 1 + ret := u.Ui.TrackProgress(src, currentSize, totalSize, stream) + <-u.Sem + + return ret +} + +// NoopProgressTracker is a progress tracker +// that displays nothing. +type NoopProgressTracker struct{} + +// TrackProgress returns stream +func (*NoopProgressTracker) TrackProgress(_ string, _, _ int64, stream io.ReadCloser) io.ReadCloser { + return stream +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packer/ui_mock.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/ui_mock.go new file mode 100644 index 000000000..74600e1d9 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packer/ui_mock.go @@ -0,0 +1,88 @@ +package packer + +import ( + "bytes" + "io" + "io/ioutil" + "testing" +) + +// TestUi creates a simple UI for use in testing. +// It's not meant for "real" use. +func TestUi(t *testing.T) Ui { + var buf bytes.Buffer + return &BasicUi{ + Reader: &buf, + Writer: ioutil.Discard, + ErrorWriter: ioutil.Discard, + PB: &NoopProgressTracker{}, + } +} + +type MockUi struct { + AskCalled bool + AskQuery string + ErrorCalled bool + ErrorMessage string + MachineCalled bool + MachineType string + MachineArgs []string + MessageCalled bool + MessageMessage string + SayCalled bool + SayMessage string + + TrackProgressCalled bool + ProgressBarAddCalled bool + ProgressBarCloseCalled bool +} + +func (u *MockUi) Ask(query string) (string, error) { + u.AskCalled = true + u.AskQuery = query + return "foo", nil +} + +func (u *MockUi) Error(message string) { + u.ErrorCalled = true + u.ErrorMessage = message +} + +func (u *MockUi) Machine(t string, args ...string) { + u.MachineCalled = true + u.MachineType = t + u.MachineArgs = args +} + +func (u *MockUi) Message(message string) { + u.MessageCalled = true + u.MessageMessage = message +} + +func (u *MockUi) Say(message string) { + u.SayCalled = true + u.SayMessage = message +} + +func (u *MockUi) TrackProgress(_ string, _, _ int64, stream io.ReadCloser) (body io.ReadCloser) { + u.TrackProgressCalled = true + + return &readCloser{ + read: func(p []byte) (int, error) { + u.ProgressBarAddCalled = true + return stream.Read(p) + }, + close: func() error { + u.ProgressBarCloseCalled = true + return stream.Close() + }, + } +} + +type readCloser struct { + read func([]byte) (int, error) + close func() error +} + +func (c *readCloser) Close() error { return c.close() } +func (c *readCloser) Read(p []byte) (int, error) { return c.read(p) } diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/packerbuilderdata/generated_data.go b/vendor/github.com/hashicorp/packer-plugin-sdk/packerbuilderdata/generated_data.go new file mode 100644 index 000000000..ba78fbbac --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/packerbuilderdata/generated_data.go @@ -0,0 +1,31 @@ +// Package packerbuilderdata provides tooling for setting and getting special +// builder-generated data that will be passed to the provisioners. This data +// should be limited to runtime data like instance id, ip address, and other +// relevant details that provisioning scripts may need access to. +package packerbuilderdata + +import "github.com/hashicorp/packer-plugin-sdk/multistep" + +// This is used in the BasicPlaceholderData() func in the packer/provisioner.go +// To force users to access generated data via the "generated" func. +const PlaceholderMsg = "To set this dynamically in the Packer template, " + + "you must use the `build` function" + +// GeneratedData manages variables created and exported by a builder after +// it starts, so that provisioners and post-processors can have access to +// build data generated at runtime -- for example, instance ID or instance IP +// address. Internally, it uses the builder's multistep.StateBag. The user +// must make sure that the State field is not is not nil before calling Put(). +type GeneratedData struct { + // The builder's StateBag + State multistep.StateBag +} + +func (gd *GeneratedData) Put(key string, data interface{}) { + genData := make(map[string]interface{}) + if _, ok := gd.State.GetOk("generated_data"); ok { + genData = gd.State.Get("generated_data").(map[string]interface{}) + } + genData[key] = data + gd.State.Put("generated_data", genData) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file.go b/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file.go new file mode 100644 index 000000000..ee2ef52b7 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file.go @@ -0,0 +1,133 @@ +// Package pathing determines where to put the Packer config directory based on +// host OS architecture and user environment variables. +package pathing + +import ( + "log" + "os" + "os/user" + "path/filepath" + "strings" +) + +// ConfigFile returns the default path to the configuration file. On +// Unix-like systems this is the ".packerconfig" file in the home directory. +// On Windows, this is the "packer.config" file in the application data +// directory. +func ConfigFile() (string, error) { + return configFile() +} + +// ConfigDir returns the configuration directory for Packer. +func ConfigDir() (string, error) { + return configDir() +} + +func homeDir() (string, error) { + // Prefer $APPDATA over $HOME in Windows. + // This makes it possible to use packer plugins (as installed by Chocolatey) + // in cmd/ps and msys2. + // See https://github.com/hashicorp/packer/issues/9795 + if home := os.Getenv("APPDATA"); home != "" { + return home, nil + } + + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Fall back to the passwd database if not found which follows + // the same semantics as bourne shell + u, err := user.Current() + + // Get homedir from specified username + // if it is set and different than what we have + if username := os.Getenv("USER"); username != "" && err == nil && u.Username != username { + u, err = user.Lookup(username) + } + + // Fail if we were unable to read the record + if err != nil { + return "", err + } + + return u.HomeDir, nil +} + +func configFile() (string, error) { + var dir string + if cd := os.Getenv("PACKER_CONFIG_DIR"); cd != "" { + log.Printf("Detected config directory from env var: %s", cd) + dir = cd + } else { + homedir, err := homeDir() + if err != nil { + return "", err + } + dir = homedir + } + return filepath.Join(dir, defaultConfigFile), nil +} + +func configDir() (string, error) { + var dir string + if cd := os.Getenv("PACKER_CONFIG_DIR"); cd != "" { + log.Printf("Detected config directory from env var: %s", cd) + dir = cd + } else { + homedir, err := homeDir() + if err != nil { + return "", err + } + dir = homedir + } + + return filepath.Join(dir, defaultConfigDir), nil +} + +// Given a path, check to see if it's using ~ to reference a user directory. +// If so, then replace that component with the requested user directory. +// In "~/", "~" gets replaced by current user's home dir. +// In "~root/", "~user" gets replaced by root's home dir. +// ~ has to be the first character of path for ExpandUser change it. +func ExpandUser(path string) (string, error) { + var ( + u *user.User + err error + ) + + // refuse to do anything with a zero-length path + if len(path) == 0 { + return path, nil + } + + // If no expansion was specified, then refuse that too + if path[0] != '~' { + return path, nil + } + + // Grab everything up to the first filepath.Separator + idx := strings.IndexAny(path, `/\`) + if idx == -1 { + idx = len(path) + } + + // Now we should be able to extract the username + username := path[:idx] + + // Check if the current user was requested + if username == "~" { + u, err = user.Current() + } else { + u, err = user.Lookup(username[1:]) + } + + // If we couldn't figure that out, then fail here + if err != nil { + return "", err + } + + // Now we can replace the path with u.HomeDir + return filepath.Join(u.HomeDir, path[idx:]), nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file_unix.go b/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file_unix.go new file mode 100644 index 000000000..71f1f2669 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file_unix.go @@ -0,0 +1,8 @@ +// +build darwin freebsd linux netbsd openbsd solaris + +package pathing + +const ( + defaultConfigFile = ".packerconfig" + defaultConfigDir = ".packer.d" +) diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file_windows.go b/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file_windows.go new file mode 100644 index 000000000..138dd9dcd --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/pathing/config_file_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package pathing + +const ( + defaultConfigFile = "packer.config" + defaultConfigDir = "packer.d" +) diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/plugin/server.go b/vendor/github.com/hashicorp/packer-plugin-sdk/plugin/server.go new file mode 100644 index 000000000..32a19b137 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/plugin/server.go @@ -0,0 +1,150 @@ +// The plugin package provides the functionality to both expose a Packer +// plugin binary and to connect to an existing Packer plugin binary. +// +// Packer supports plugins in the form of self-contained external static +// Go binaries. These binaries behave in a certain way (enforced by this +// package) and are connected to in a certain way (also enforced by this +// package). +package plugin + +import ( + "errors" + "fmt" + "log" + "math/rand" + "net" + "os" + "os/signal" + "runtime" + "strconv" + "sync/atomic" + "syscall" + "time" + + packrpc "github.com/hashicorp/packer-plugin-sdk/rpc" + "github.com/hashicorp/packer-plugin-sdk/tmp" +) + +// This is a count of the number of interrupts the process has received. +// This is updated with sync/atomic whenever a SIGINT is received and can +// be checked by the plugin safely to take action. +var Interrupts int32 = 0 + +const MagicCookieKey = "PACKER_PLUGIN_MAGIC_COOKIE" +const MagicCookieValue = "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2" + +// The APIVersion is outputted along with the RPC address. The plugin +// client validates this API version and will show an error if it doesn't +// know how to speak it. +const APIVersion = "5" + +var ErrManuallyStartedPlugin = errors.New( + "Please do not execute plugins directly. Packer will execute these for you.") + +// Server waits for a connection to this plugin and returns a Packer +// RPC server that you can use to register components and serve them. +func Server() (*packrpc.Server, error) { + if os.Getenv(MagicCookieKey) != MagicCookieValue { + return nil, ErrManuallyStartedPlugin + } + + // If there is no explicit number of Go threads to use, then set it + if os.Getenv("GOMAXPROCS") == "" { + runtime.GOMAXPROCS(runtime.NumCPU()) + } + + listener, err := serverListener() + if err != nil { + return nil, err + } + defer listener.Close() + + // Output the address to stdout + log.Printf("Plugin address: %s %s\n", + listener.Addr().Network(), listener.Addr().String()) + fmt.Printf("%s|%s|%s\n", + APIVersion, + listener.Addr().Network(), + listener.Addr().String()) + os.Stdout.Sync() + + // Accept a connection + log.Println("Waiting for connection...") + conn, err := listener.Accept() + if err != nil { + log.Printf("Error accepting connection: %s\n", err.Error()) + return nil, err + } + + // Eat the interrupts + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt, syscall.SIGTERM) + go func() { + var count int32 = 0 + for { + <-ch + newCount := atomic.AddInt32(&count, 1) + log.Printf("Received interrupt signal (count: %d). Ignoring.", newCount) + } + }() + + // Serve a single connection + log.Println("Serving a plugin connection...") + return packrpc.NewServer(conn) +} + +func serverListener() (net.Listener, error) { + if runtime.GOOS == "windows" { + return serverListener_tcp() + } + + return serverListener_unix() +} + +func serverListener_tcp() (net.Listener, error) { + minPort, err := strconv.ParseInt(os.Getenv("PACKER_PLUGIN_MIN_PORT"), 10, 32) + if err != nil { + return nil, err + } + + maxPort, err := strconv.ParseInt(os.Getenv("PACKER_PLUGIN_MAX_PORT"), 10, 32) + if err != nil { + return nil, err + } + + log.Printf("Plugin port range: [%d,%d]", minPort, maxPort) + + for port := minPort; port <= maxPort; port++ { + address := fmt.Sprintf("127.0.0.1:%d", port) + listener, err := net.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + return nil, errors.New("Couldn't bind plugin TCP listener") +} + +func serverListener_unix() (net.Listener, error) { + tf, err := tmp.File("packer-plugin") + if err != nil { + return nil, err + } + path := tf.Name() + + // Close the file and remove it because it has to not exist for + // the domain socket. + if err := tf.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + return net.Listen("unix", path) +} + +func init() { + // Seed the random number generator + rand.Seed(time.Now().UTC().UnixNano()) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/plugin/set.go b/vendor/github.com/hashicorp/packer-plugin-sdk/plugin/set.go new file mode 100644 index 000000000..216124b4d --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/plugin/set.go @@ -0,0 +1,171 @@ +package plugin + +import ( + "encoding/json" + "fmt" + "io" + "log" + "os" + "sort" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + pluginVersion "github.com/hashicorp/packer-plugin-sdk/version" + "github.com/hashicorp/packer/version" +) + +// Set is a plugin set. It's API is meant to be very close to what is returned +// by plugin.Server +// It can describe itself or run a single plugin using the CLI arguments. +type Set struct { + version string + sdkVersion string + Builders map[string]packersdk.Builder + PostProcessors map[string]packersdk.PostProcessor + Provisioners map[string]packersdk.Provisioner +} + +// SetDescription describes a Set. +type SetDescription struct { + Version string `json:"version"` + SDKVersion string `json:"sdk_version"` + Builders []string `json:"builders"` + PostProcessors []string `json:"post_processors"` + Provisioners []string `json:"provisioners"` +} + +//// +// Setup +//// + +func NewSet() *Set { + return &Set{ + version: version.String(), + sdkVersion: version.String(), // TODO: Set me after the split + Builders: map[string]packersdk.Builder{}, + PostProcessors: map[string]packersdk.PostProcessor{}, + Provisioners: map[string]packersdk.Provisioner{}, + } +} + +func (i *Set) SetVersion(version *pluginVersion.PluginVersion) { + i.version = version.String() +} + +func (i *Set) RegisterBuilder(name string, builder packersdk.Builder) { + if _, found := i.Builders[name]; found { + panic(fmt.Errorf("registering duplicate %s builder", name)) + } + i.Builders[name] = builder +} + +func (i *Set) RegisterPostProcessor(name string, postProcessor packersdk.PostProcessor) { + if _, found := i.PostProcessors[name]; found { + panic(fmt.Errorf("registering duplicate %s post-processor", name)) + } + i.PostProcessors[name] = postProcessor +} + +func (i *Set) RegisterProvisioner(name string, provisioner packersdk.Provisioner) { + if _, found := i.Provisioners[name]; found { + panic(fmt.Errorf("registering duplicate %s provisioner", name)) + } + i.Provisioners[name] = provisioner +} + +// Run takes the os Args and runs a packer plugin command from it. +// * "describe" command makes the plugin set describe itself. +// * "start builder builder-name" starts the builder "builder-name" +// * "start post-processor example" starts the post-processor "example" +func (i *Set) Run() error { + args := os.Args[1:] + return i.RunCommand(args...) +} + +func (i *Set) RunCommand(args ...string) error { + if len(args) < 1 { + return fmt.Errorf("needs at least one argument") + } + + switch args[0] { + case "describe": + return i.jsonDescribe(os.Stdout) + case "start": + args = args[1:] + if len(args) != 2 { + return fmt.Errorf("start takes two arguments, for example 'start builder example-builder'. Found: %v", args) + } + return i.start(args[0], args[1]) + default: + return fmt.Errorf("Unknown command: %q", args[0]) + } +} + +func (i *Set) start(kind, name string) error { + server, err := Server() + if err != nil { + return err + } + + log.Printf("[TRACE] starting %s %s", kind, name) + + switch kind { + case "builder": + err = server.RegisterBuilder(i.Builders[name]) + case "post-processor": + err = server.RegisterPostProcessor(i.PostProcessors[name]) + case "provisioners": + err = server.RegisterProvisioner(i.Provisioners[name]) + default: + err = fmt.Errorf("Unknown plugin type: %s", kind) + } + if err != nil { + return err + } + server.Serve() + return nil +} + +//// +// Describe +//// + +func (i *Set) description() SetDescription { + return SetDescription{ + Version: i.version, + SDKVersion: i.sdkVersion, + Builders: i.buildersDescription(), + PostProcessors: i.postProcessorsDescription(), + Provisioners: i.provisionersDescription(), + } +} + +func (i *Set) jsonDescribe(out io.Writer) error { + return json.NewEncoder(out).Encode(i.description()) +} + +func (i *Set) buildersDescription() []string { + out := []string{} + for key := range i.Builders { + out = append(out, key) + } + sort.Strings(out) + return out +} + +func (i *Set) postProcessorsDescription() []string { + out := []string{} + for key := range i.PostProcessors { + out = append(out, key) + } + sort.Strings(out) + return out +} + +func (i *Set) provisionersDescription() []string { + out := []string{} + for key := range i.Provisioners { + out = append(out, key) + } + sort.Strings(out) + return out +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/random/string.go b/vendor/github.com/hashicorp/packer-plugin-sdk/random/string.go new file mode 100644 index 000000000..21c286532 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/random/string.go @@ -0,0 +1,47 @@ +// Package random is a helper for generating random alphanumeric strings. +package random + +import ( + "math/rand" + "os" + "time" +) + +var ( + PossibleNumbers = "0123456789" + PossibleLowerCase = "abcdefghijklmnopqrstuvwxyz" + PossibleUpperCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + PossibleSpecialCharacter = " !\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~" + + PossibleAlphaNum = PossibleNumbers + PossibleLowerCase + PossibleUpperCase + PossibleAlphaNumLower = PossibleNumbers + PossibleLowerCase + PossibleAlphaNumUpper = PossibleNumbers + PossibleUpperCase +) + +var rnd = rand.New(rand.NewSource(time.Now().UnixNano() + int64(os.Getpid()))) + +// Numbers returns a random numeric string of the given length +func Numbers(length int) string { return String(PossibleNumbers, length) } + +// AlphaNum returns a random alphanumeric string of the given length. The +// returned string can contain both uppercase and lowercase letters. +func AlphaNum(length int) string { return String(PossibleAlphaNum, length) } + +// AlphaNumLower returns a random alphanumeric string of the given length. The +// returned string can contain lowercase letters, but not uppercase. +func AlphaNumLower(length int) string { return String(PossibleAlphaNumLower, length) } + +// AlphaNumUpper returns a random alphanumeric string of the given length. The +// returned string can contain uppercase letters, but not lowercase. +func AlphaNumUpper(length int) string { return String(PossibleAlphaNumUpper, length) } + +// String returns a random string of the given length, using only the component +// characters provided in the "chooseFrom" string. +func String(chooseFrom string, length int) (randomString string) { + cflen := len(chooseFrom) + bytes := make([]byte, length) + for i := range bytes { + bytes[i] = chooseFrom[rnd.Intn(cflen)] + } + return string(bytes) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/retry/retry.go b/vendor/github.com/hashicorp/packer-plugin-sdk/retry/retry.go new file mode 100644 index 000000000..7b36f3c61 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/retry/retry.go @@ -0,0 +1,119 @@ +// Package retry provides tooling to retry API calls which are known to be +// vulnerable to throttling or flakiness due to eventual consistency. +package retry + +import ( + "context" + "fmt" + "log" + "time" +) + +// Config represents a retry config +type Config struct { + // The operation will be retried until StartTimeout has elapsed. 0 means + // forever. + StartTimeout time.Duration + + // RetryDelay gives the time elapsed after a failure and before we try + // again. Returns 2s by default. + RetryDelay func() time.Duration + + // Max number of retries, 0 means infinite + Tries int + + // ShouldRetry tells whether error should be retried. Nil defaults to always + // true. + ShouldRetry func(error) bool +} + +type RetryExhaustedError struct { + Err error +} + +func (err *RetryExhaustedError) Error() string { + if err == nil || err.Err == nil { + return "" + } + return fmt.Sprintf("retry count exhausted. Last err: %s", err.Err) +} + +// Run will repeatedly retry the proivided fn within the constraints set in the +// retry Config. It will retry until one of the following conditions is met: +// - The provided context is cancelled. +// - The Config.StartTimeout time has passed. +// - The function returns without an error. +// - The maximum number of tries, Config.Tries is exceeded. +// - The function returns with an error that does not satisfy conditions +// set in the Config.ShouldRetry function. +// If the given function (fn) does not return an error, then Run will return +// nil. Otherwise, Run will return a relevant error. +func (cfg Config) Run(ctx context.Context, fn func(context.Context) error) error { + retryDelay := func() time.Duration { return 2 * time.Second } + if cfg.RetryDelay != nil { + retryDelay = cfg.RetryDelay + } + shouldRetry := func(error) bool { return true } + if cfg.ShouldRetry != nil { + shouldRetry = cfg.ShouldRetry + } + var startTimeout <-chan time.Time // nil chans never unlock ! + if cfg.StartTimeout != 0 { + startTimeout = time.After(cfg.StartTimeout) + } + + var err error + for try := 0; ; try++ { + if cfg.Tries != 0 && try == cfg.Tries { + return &RetryExhaustedError{err} + } + if err = fn(ctx); err == nil { + return nil + } + if !shouldRetry(err) { + return err + } + + log.Print(fmt.Errorf("Retryable error: %s", err)) + + select { + case <-ctx.Done(): + return err + case <-startTimeout: + return err + default: + time.Sleep(retryDelay()) + } + } +} + +// Backoff is a self contained backoff time calculator. This struct should be +// passed around as a copy as it changes its own fields upon any Backoff call. +// Backoff is not thread safe. For now only a Linear backoff call is +// implemented and the Exponential call will be implemented when needed. +type Backoff struct { + // Initial time to wait. A Backoff call will change this value. + InitialBackoff time.Duration + // Maximum time returned. + MaxBackoff time.Duration + // For a Linear backoff, InitialBackoff will be multiplied by Multiplier + // after each call. + Multiplier float64 +} + +// Linear Backoff returns a linearly increasing Duration. +// n = n * Multiplier. +// the first value of n is InitialBackoff. n is maxed by MaxBackoff. +func (lb *Backoff) Linear() time.Duration { + wait := lb.InitialBackoff + lb.InitialBackoff = time.Duration(lb.Multiplier * float64(lb.InitialBackoff)) + if lb.MaxBackoff != 0 && lb.InitialBackoff > lb.MaxBackoff { + lb.InitialBackoff = lb.MaxBackoff + } + return wait +} + +// Exponential backoff panics: not implemented, yet. +func (lb *Backoff) Exponential() time.Duration { + panic("not implemented, yet") +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/artifact.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/artifact.go new file mode 100644 index 000000000..dde49c8fe --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/artifact.go @@ -0,0 +1,86 @@ +package rpc + +import ( + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// An implementation of packersdk.Artifact where the artifact is actually +// available over an RPC connection. +type artifact struct { + commonClient +} + +// ArtifactServer wraps a packersdk.Artifact implementation and makes it +// exportable as part of a Golang RPC server. +type ArtifactServer struct { + artifact packersdk.Artifact +} + +func (a *artifact) BuilderId() (result string) { + a.client.Call(a.endpoint+".BuilderId", new(interface{}), &result) + return +} + +func (a *artifact) Files() (result []string) { + a.client.Call(a.endpoint+".Files", new(interface{}), &result) + return +} + +func (a *artifact) Id() (result string) { + a.client.Call(a.endpoint+".Id", new(interface{}), &result) + return +} + +func (a *artifact) String() (result string) { + a.client.Call(a.endpoint+".String", new(interface{}), &result) + return +} + +func (a *artifact) State(name string) (result interface{}) { + a.client.Call(a.endpoint+".State", name, &result) + return +} + +func (a *artifact) Destroy() error { + var result error + if err := a.client.Call(a.endpoint+".Destroy", new(interface{}), &result); err != nil { + return err + } + + return result +} + +func (s *ArtifactServer) BuilderId(args *interface{}, reply *string) error { + *reply = s.artifact.BuilderId() + return nil +} + +func (s *ArtifactServer) Files(args *interface{}, reply *[]string) error { + *reply = s.artifact.Files() + return nil +} + +func (s *ArtifactServer) Id(args *interface{}, reply *string) error { + *reply = s.artifact.Id() + return nil +} + +func (s *ArtifactServer) String(args *interface{}, reply *string) error { + *reply = s.artifact.String() + return nil +} + +func (s *ArtifactServer) State(name string, reply *interface{}) error { + *reply = s.artifact.State(name) + return nil +} + +func (s *ArtifactServer) Destroy(args *interface{}, reply *error) error { + err := s.artifact.Destroy() + if err != nil { + err = NewBasicError(err) + } + + *reply = err + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/build.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/build.go new file mode 100644 index 000000000..b5eb851ed --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/build.go @@ -0,0 +1,173 @@ +package rpc + +import ( + "context" + "log" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// An implementation of packersdk.Build where the build is actually executed +// over an RPC connection. +type build struct { + commonClient +} + +// BuildServer wraps a packersdk.Build implementation and makes it exportable +// as part of a Golang RPC server. +type BuildServer struct { + context context.Context + contextCancel func() + + build packersdk.Build + mux *muxBroker +} + +type BuildPrepareResponse struct { + Warnings []string + Error *BasicError +} + +func (b *build) Name() (result string) { + b.client.Call("Build.Name", new(interface{}), &result) + return +} + +func (b *build) Prepare() ([]string, error) { + var resp BuildPrepareResponse + if cerr := b.client.Call("Build.Prepare", new(interface{}), &resp); cerr != nil { + return nil, cerr + } + var err error = nil + if resp.Error != nil { + err = resp.Error + } + + return resp.Warnings, err +} + +func (b *build) Run(ctx context.Context, ui packersdk.Ui) ([]packersdk.Artifact, error) { + nextId := b.mux.NextId() + server := newServerWithMux(b.mux, nextId) + server.RegisterUi(ui) + go server.Serve() + + done := make(chan interface{}) + defer close(done) + go func() { + select { + case <-ctx.Done(): + log.Printf("Cancelling build after context cancellation %v", ctx.Err()) + if err := b.client.Call("Build.Cancel", new(interface{}), new(interface{})); err != nil { + log.Printf("Error cancelling builder: %s", err) + } + case <-done: + } + }() + + var result []uint32 + if err := b.client.Call("Build.Run", nextId, &result); err != nil { + return nil, err + } + + artifacts := make([]packersdk.Artifact, len(result)) + for i, streamId := range result { + client, err := newClientWithMux(b.mux, streamId) + if err != nil { + return nil, err + } + + artifacts[i] = client.Artifact() + } + + return artifacts, nil +} + +func (b *build) SetDebug(val bool) { + if err := b.client.Call("Build.SetDebug", val, new(interface{})); err != nil { + panic(err) + } +} + +func (b *build) SetForce(val bool) { + if err := b.client.Call("Build.SetForce", val, new(interface{})); err != nil { + panic(err) + } +} + +func (b *build) SetOnError(val string) { + if err := b.client.Call("Build.SetOnError", val, new(interface{})); err != nil { + panic(err) + } +} + +func (b *build) Cancel() { + if err := b.client.Call("Build.Cancel", new(interface{}), new(interface{})); err != nil { + panic(err) + } +} + +func (b *BuildServer) Name(args *interface{}, reply *string) error { + *reply = b.build.Name() + return nil +} + +func (b *BuildServer) Prepare(args *interface{}, resp *BuildPrepareResponse) error { + warnings, err := b.build.Prepare() + *resp = BuildPrepareResponse{ + Warnings: warnings, + Error: NewBasicError(err), + } + return nil +} + +func (b *BuildServer) Run(streamId uint32, reply *[]uint32) error { + if b.context == nil { + b.context, b.contextCancel = context.WithCancel(context.Background()) + } + + client, err := newClientWithMux(b.mux, streamId) + if err != nil { + return NewBasicError(err) + } + defer client.Close() + + artifacts, err := b.build.Run(b.context, client.Ui()) + if err != nil { + return NewBasicError(err) + } + + *reply = make([]uint32, len(artifacts)) + for i, artifact := range artifacts { + streamId := b.mux.NextId() + server := newServerWithMux(b.mux, streamId) + server.RegisterArtifact(artifact) + go server.Serve() + + (*reply)[i] = streamId + } + + return nil +} + +func (b *BuildServer) SetDebug(val *bool, reply *interface{}) error { + b.build.SetDebug(*val) + return nil +} + +func (b *BuildServer) SetForce(val *bool, reply *interface{}) error { + b.build.SetForce(*val) + return nil +} + +func (b *BuildServer) SetOnError(val *string, reply *interface{}) error { + b.build.SetOnError(*val) + return nil +} + +func (b *BuildServer) Cancel(args *interface{}, reply *interface{}) error { + if b.contextCancel != nil { + b.contextCancel() + } + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/builder.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/builder.go new file mode 100644 index 000000000..99761d42e --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/builder.go @@ -0,0 +1,137 @@ +package rpc + +import ( + "context" + "log" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// An implementation of packersdk.Builder where the builder is actually executed +// over an RPC connection. +type builder struct { + commonClient +} + +// BuilderServer wraps a packersdk.Builder implementation and makes it exportable +// as part of a Golang RPC server. +type BuilderServer struct { + context context.Context + contextCancel func() + + commonServer + builder packersdk.Builder +} + +type BuilderPrepareArgs struct { + Configs []interface{} +} + +type BuilderPrepareResponse struct { + GeneratedVars []string + Warnings []string + Error *BasicError +} + +func (b *builder) Prepare(config ...interface{}) ([]string, []string, error) { + config, err := encodeCTYValues(config) + if err != nil { + return nil, nil, err + } + var resp BuilderPrepareResponse + cerr := b.client.Call(b.endpoint+".Prepare", &BuilderPrepareArgs{config}, &resp) + if cerr != nil { + return nil, nil, cerr + } + + if resp.Error != nil { + err = resp.Error + } + + return resp.GeneratedVars, resp.Warnings, err +} + +func (b *builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook) (packersdk.Artifact, error) { + nextId := b.mux.NextId() + server := newServerWithMux(b.mux, nextId) + server.RegisterHook(hook) + server.RegisterUi(ui) + go server.Serve() + + done := make(chan interface{}) + defer close(done) + go func() { + select { + case <-ctx.Done(): + log.Printf("Cancelling builder after context cancellation %v", ctx.Err()) + if err := b.client.Call(b.endpoint+".Cancel", new(interface{}), new(interface{})); err != nil { + log.Printf("Error cancelling builder: %s", err) + } + case <-done: + } + }() + + var responseId uint32 + + if err := b.client.Call(b.endpoint+".Run", nextId, &responseId); err != nil { + return nil, err + } + + if responseId == 0 { + return nil, nil + } + + client, err := newClientWithMux(b.mux, responseId) + if err != nil { + return nil, err + } + + return client.Artifact(), nil +} + +func (b *BuilderServer) Prepare(args *BuilderPrepareArgs, reply *BuilderPrepareResponse) error { + config, err := decodeCTYValues(args.Configs) + if err != nil { + return err + } + generated, warnings, err := b.builder.Prepare(config...) + *reply = BuilderPrepareResponse{ + GeneratedVars: generated, + Warnings: warnings, + Error: NewBasicError(err), + } + return nil +} + +func (b *BuilderServer) Run(streamId uint32, reply *uint32) error { + client, err := newClientWithMux(b.mux, streamId) + if err != nil { + return NewBasicError(err) + } + defer client.Close() + + if b.context == nil { + b.context, b.contextCancel = context.WithCancel(context.Background()) + } + + artifact, err := b.builder.Run(b.context, client.Ui(), client.Hook()) + if err != nil { + return NewBasicError(err) + } + + *reply = 0 + if artifact != nil { + streamId = b.mux.NextId() + artifactServer := newServerWithMux(b.mux, streamId) + artifactServer.RegisterArtifact(artifact) + go artifactServer.Serve() + *reply = streamId + } + + return nil +} + +func (b *BuilderServer) Cancel(args *interface{}, reply *interface{}) error { + b.contextCancel() + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/client.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/client.go new file mode 100644 index 000000000..ff7d5bb91 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/client.go @@ -0,0 +1,149 @@ +package rpc + +import ( + "io" + "log" + "net/rpc" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/ugorji/go/codec" +) + +// Client is the client end that communicates with a Packer RPC server. +// Establishing a connection is up to the user. The Client can communicate over +// any ReadWriteCloser. In Packer, each "plugin" (builder, provisioner, +// and post-processor) creates and launches a server. The the packer "core" +// creates and uses the client. +type Client struct { + mux *muxBroker + client *rpc.Client + closeMux bool +} + +func NewClient(rwc io.ReadWriteCloser) (*Client, error) { + mux, err := newMuxBrokerClient(rwc) + if err != nil { + return nil, err + } + go mux.Run() + + result, err := newClientWithMux(mux, 0) + if err != nil { + mux.Close() + return nil, err + } + + result.closeMux = true + return result, err +} + +func newClientWithMux(mux *muxBroker, streamId uint32) (*Client, error) { + clientConn, err := mux.Dial(streamId) + if err != nil { + return nil, err + } + + h := &codec.MsgpackHandle{ + RawToString: true, + WriteExt: true, + } + clientCodec := codec.GoRpc.ClientCodec(clientConn, h) + + return &Client{ + mux: mux, + client: rpc.NewClientWithCodec(clientCodec), + closeMux: false, + }, nil +} + +func (c *Client) Close() error { + if err := c.client.Close(); err != nil { + return err + } + + if c.closeMux { + log.Printf("[WARN] Client is closing mux") + return c.mux.Close() + } + + return nil +} + +func (c *Client) Artifact() packersdk.Artifact { + return &artifact{ + commonClient: commonClient{ + endpoint: DefaultArtifactEndpoint, + client: c.client, + }, + } +} + +func (c *Client) Build() packersdk.Build { + return &build{ + commonClient: commonClient{ + endpoint: DefaultBuildEndpoint, + client: c.client, + mux: c.mux, + }, + } +} + +func (c *Client) Builder() packersdk.Builder { + return &builder{ + commonClient: commonClient{ + endpoint: DefaultBuilderEndpoint, + client: c.client, + mux: c.mux, + }, + } +} + +func (c *Client) Communicator() packersdk.Communicator { + return &communicator{ + commonClient: commonClient{ + endpoint: DefaultCommunicatorEndpoint, + client: c.client, + mux: c.mux, + }, + } +} + +func (c *Client) Hook() packersdk.Hook { + return &hook{ + commonClient: commonClient{ + endpoint: DefaultHookEndpoint, + client: c.client, + mux: c.mux, + }, + } +} + +func (c *Client) PostProcessor() packersdk.PostProcessor { + return &postProcessor{ + commonClient: commonClient{ + endpoint: DefaultPostProcessorEndpoint, + client: c.client, + mux: c.mux, + }, + } +} + +func (c *Client) Provisioner() packersdk.Provisioner { + return &provisioner{ + commonClient: commonClient{ + endpoint: DefaultProvisionerEndpoint, + client: c.client, + mux: c.mux, + }, + } +} + +func (c *Client) Ui() packersdk.Ui { + return &Ui{ + commonClient: commonClient{ + endpoint: DefaultUiEndpoint, + client: c.client, + }, + endpoint: DefaultUiEndpoint, + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/common.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/common.go new file mode 100644 index 000000000..7044ebcaf --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/common.go @@ -0,0 +1,71 @@ +package rpc + +import ( + "bytes" + "encoding/gob" + "fmt" + "net/rpc" + + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// commonClient allows to rpc call funcs that can be defined on the different +// build blocks of packer +type commonClient struct { + // endpoint is usually the type of build block we are connecting to. + // + // eg: Provisioner / PostProcessor / Builder / Artifact / Communicator + endpoint string + client *rpc.Client + mux *muxBroker +} + +type commonServer struct { + mux *muxBroker + selfConfigurable interface { + ConfigSpec() hcldec.ObjectSpec + } +} + +type ConfigSpecResponse struct { + ConfigSpec []byte +} + +func (p *commonClient) ConfigSpec() hcldec.ObjectSpec { + // TODO(azr): the RPC Call can fail but the ConfigSpec signature doesn't + // return an error; should we simply panic ? Logging this for now; will + // decide later. The correct approach would probably be to return an error + // in ConfigSpec but that will break a lot of things. + resp := &ConfigSpecResponse{} + cerr := p.client.Call(p.endpoint+".ConfigSpec", new(interface{}), resp) + if cerr != nil { + err := fmt.Errorf("ConfigSpec failed: %v", cerr) + panic(err.Error()) + } + + res := hcldec.ObjectSpec{} + err := gob.NewDecoder(bytes.NewReader(resp.ConfigSpec)).Decode(&res) + if err != nil { + panic("ici:" + err.Error()) + } + return res +} + +func (s *commonServer) ConfigSpec(_ interface{}, reply *ConfigSpecResponse) error { + spec := s.selfConfigurable.ConfigSpec() + b := bytes.NewBuffer(nil) + err := gob.NewEncoder(b).Encode(spec) + reply.ConfigSpec = b.Bytes() + + return err +} + +func init() { + gob.Register(new(hcldec.AttrSpec)) + gob.Register(new(hcldec.BlockSpec)) + gob.Register(new(hcldec.BlockAttrsSpec)) + gob.Register(new(hcldec.BlockListSpec)) + gob.Register(new(hcldec.BlockObjectSpec)) + gob.Register(new(cty.Value)) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/communicator.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/communicator.go new file mode 100644 index 000000000..02b55eeca --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/communicator.go @@ -0,0 +1,347 @@ +package rpc + +import ( + "context" + "encoding/gob" + "io" + "log" + "net/rpc" + "os" + "sync" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// An implementation of packersdk.Communicator where the communicator is actually +// executed over an RPC connection. +type communicator struct { + commonClient +} + +// CommunicatorServer wraps a packersdk.Communicator implementation and makes +// it exportable as part of a Golang RPC server. +type CommunicatorServer struct { + commonServer + c packersdk.Communicator +} + +type CommandFinished struct { + ExitStatus int +} + +type CommunicatorStartArgs struct { + Command string + StdinStreamId uint32 + StdoutStreamId uint32 + StderrStreamId uint32 + ResponseStreamId uint32 +} + +type CommunicatorDownloadArgs struct { + Path string + WriterStreamId uint32 +} + +type CommunicatorUploadArgs struct { + Path string + ReaderStreamId uint32 + FileInfo *fileInfo +} + +type CommunicatorUploadDirArgs struct { + Dst string + Src string + Exclude []string +} + +type CommunicatorDownloadDirArgs struct { + Dst string + Src string + Exclude []string +} + +func Communicator(client *rpc.Client) *communicator { + return &communicator{ + commonClient: commonClient{ + client: client, + endpoint: DefaultCommunicatorEndpoint, + }, + } +} + +func (c *communicator) Start(ctx context.Context, cmd *packersdk.RemoteCmd) (err error) { + var args CommunicatorStartArgs + args.Command = cmd.Command + + var wg sync.WaitGroup + + if cmd.Stdin != nil { + args.StdinStreamId = c.mux.NextId() + go func() { + serveSingleCopy("stdin", c.mux, args.StdinStreamId, nil, cmd.Stdin) + }() + } + + if cmd.Stdout != nil { + wg.Add(1) + args.StdoutStreamId = c.mux.NextId() + go func() { + defer wg.Done() + serveSingleCopy("stdout", c.mux, args.StdoutStreamId, cmd.Stdout, nil) + }() + } + + if cmd.Stderr != nil { + wg.Add(1) + args.StderrStreamId = c.mux.NextId() + go func() { + defer wg.Done() + serveSingleCopy("stderr", c.mux, args.StderrStreamId, cmd.Stderr, nil) + }() + } + + responseStreamId := c.mux.NextId() + args.ResponseStreamId = responseStreamId + + go func() { + conn, err := c.mux.Accept(responseStreamId) + wg.Wait() + if err != nil { + log.Printf("[ERR] Error accepting response stream %d: %s", + responseStreamId, err) + cmd.SetExited(123) + return + } + defer conn.Close() + + var finished CommandFinished + decoder := gob.NewDecoder(conn) + if err := decoder.Decode(&finished); err != nil { + log.Printf("[ERR] Error decoding response stream %d: %s", + responseStreamId, err) + cmd.SetExited(123) + return + } + + log.Printf("[INFO] RPC client: Communicator ended with: %d", finished.ExitStatus) + cmd.SetExited(finished.ExitStatus) + }() + + err = c.client.Call(c.endpoint+".Start", &args, new(interface{})) + return +} + +func (c *communicator) Upload(path string, r io.Reader, fi *os.FileInfo) (err error) { + // Pipe the reader through to the connection + streamId := c.mux.NextId() + go serveSingleCopy("uploadData", c.mux, streamId, nil, r) + + args := CommunicatorUploadArgs{ + Path: path, + ReaderStreamId: streamId, + } + + if fi != nil { + args.FileInfo = NewFileInfo(*fi) + } + + err = c.client.Call(c.endpoint+".Upload", &args, new(interface{})) + return +} + +func (c *communicator) UploadDir(dst string, src string, exclude []string) error { + args := &CommunicatorUploadDirArgs{ + Dst: dst, + Src: src, + Exclude: exclude, + } + + var reply error + err := c.client.Call(c.endpoint+".UploadDir", args, &reply) + if err == nil { + err = reply + } + + return err +} + +func (c *communicator) DownloadDir(src string, dst string, exclude []string) error { + args := &CommunicatorDownloadDirArgs{ + Dst: dst, + Src: src, + Exclude: exclude, + } + + var reply error + err := c.client.Call(c.endpoint+".DownloadDir", args, &reply) + if err == nil { + err = reply + } + + return err +} + +func (c *communicator) Download(path string, w io.Writer) (err error) { + // Serve a single connection and a single copy + streamId := c.mux.NextId() + + waitServer := make(chan struct{}) + go func() { + serveSingleCopy("downloadWriter", c.mux, streamId, w, nil) + close(waitServer) + }() + + args := CommunicatorDownloadArgs{ + Path: path, + WriterStreamId: streamId, + } + + // Start sending data to the RPC server + err = c.client.Call(c.endpoint+".Download", &args, new(interface{})) + + // Wait for the RPC server to finish receiving the data before we return + <-waitServer + + return +} + +func (c *CommunicatorServer) Start(args *CommunicatorStartArgs, reply *interface{}) error { + ctx := context.TODO() + + // Build the RemoteCmd on this side so that it all pipes over + // to the remote side. + var cmd packersdk.RemoteCmd + cmd.Command = args.Command + + // Create a channel to signal we're done so that we can close + // our stdin/stdout/stderr streams + toClose := make([]io.Closer, 0) + doneCh := make(chan struct{}) + go func() { + <-doneCh + for _, conn := range toClose { + defer conn.Close() + } + }() + + if args.StdinStreamId > 0 { + conn, err := c.mux.Dial(args.StdinStreamId) + if err != nil { + close(doneCh) + return NewBasicError(err) + } + + toClose = append(toClose, conn) + cmd.Stdin = conn + } + + if args.StdoutStreamId > 0 { + conn, err := c.mux.Dial(args.StdoutStreamId) + if err != nil { + close(doneCh) + return NewBasicError(err) + } + + toClose = append(toClose, conn) + cmd.Stdout = conn + } + + if args.StderrStreamId > 0 { + conn, err := c.mux.Dial(args.StderrStreamId) + if err != nil { + close(doneCh) + return NewBasicError(err) + } + + toClose = append(toClose, conn) + cmd.Stderr = conn + } + + // Connect to the response address so we can write our result to it + // when ready. + responseC, err := c.mux.Dial(args.ResponseStreamId) + if err != nil { + close(doneCh) + return NewBasicError(err) + } + responseWriter := gob.NewEncoder(responseC) + + // Start the actual command + err = c.c.Start(ctx, &cmd) + if err != nil { + close(doneCh) + return NewBasicError(err) + } + + // Start a goroutine to spin and wait for the process to actual + // exit. When it does, report it back to caller... + go func() { + defer close(doneCh) + defer responseC.Close() + cmd.Wait() + log.Printf("[INFO] RPC endpoint: Communicator ended with: %d", cmd.ExitStatus()) + responseWriter.Encode(&CommandFinished{cmd.ExitStatus()}) + }() + + return nil +} + +func (c *CommunicatorServer) Upload(args *CommunicatorUploadArgs, reply *interface{}) (err error) { + readerC, err := c.mux.Dial(args.ReaderStreamId) + if err != nil { + return + } + defer readerC.Close() + + var fi *os.FileInfo + if args.FileInfo != nil { + fi = new(os.FileInfo) + *fi = *args.FileInfo + } + err = c.c.Upload(args.Path, readerC, fi) + return +} + +func (c *CommunicatorServer) UploadDir(args *CommunicatorUploadDirArgs, reply *error) error { + return c.c.UploadDir(args.Dst, args.Src, args.Exclude) +} + +func (c *CommunicatorServer) DownloadDir(args *CommunicatorUploadDirArgs, reply *error) error { + return c.c.DownloadDir(args.Src, args.Dst, args.Exclude) +} + +func (c *CommunicatorServer) Download(args *CommunicatorDownloadArgs, reply *interface{}) (err error) { + writerC, err := c.mux.Dial(args.WriterStreamId) + if err != nil { + return + } + defer writerC.Close() + + err = c.c.Download(args.Path, writerC) + return +} + +func serveSingleCopy(name string, mux *muxBroker, id uint32, dst io.Writer, src io.Reader) { + conn, err := mux.Accept(id) + if err != nil { + log.Printf("[ERR] '%s' accept error: %s", name, err) + return + } + + // Be sure to close the connection after we're done copying so + // that an EOF will successfully be sent to the remote side + defer conn.Close() + + // The connection is the destination/source that is nil + if dst == nil { + dst = conn + } else { + src = conn + } + + written, err := io.Copy(dst, src) + log.Printf("[INFO] %d bytes written for '%s'", written, name) + if err != nil { + log.Printf("[ERR] '%s' copy error: %s", name, err) + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/cty_encode.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/cty_encode.go new file mode 100644 index 000000000..570e0ee13 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/cty_encode.go @@ -0,0 +1,35 @@ +package rpc + +import ( + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/json" +) + +// cty.Value is does not know how to encode itself through the wire so we +// transform it to bytes. +func encodeCTYValues(config []interface{}) ([]interface{}, error) { + for i := range config { + if v, ok := config[i].(cty.Value); ok { + b, err := json.Marshal(v, cty.DynamicPseudoType) + if err != nil { + return nil, err + } + config[i] = b + } + } + return config, nil +} + +// decodeCTYValues will try to decode a cty value when it finds a byte slice +func decodeCTYValues(config []interface{}) ([]interface{}, error) { + for i := range config { + if b, ok := config[i].([]byte); ok { + t, err := json.Unmarshal(b, cty.DynamicPseudoType) + if err != nil { + return nil, err + } + config[i] = t + } + } + return config, nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/error.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/error.go new file mode 100644 index 000000000..c3ab7b1a4 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/error.go @@ -0,0 +1,21 @@ +package rpc + +// This is a type that wraps error types so that they can be messaged +// across RPC channels. Since "error" is an interface, we can't always +// gob-encode the underlying structure. This is a valid error interface +// implementer that we will push across. +type BasicError struct { + Message string +} + +func NewBasicError(err error) *BasicError { + if err == nil { + return nil + } + + return &BasicError{err.Error()} +} + +func (e *BasicError) Error() string { + return e.Message +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/fileinfo.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/fileinfo.go new file mode 100644 index 000000000..299a93138 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/fileinfo.go @@ -0,0 +1,33 @@ +package rpc + +import ( + "os" + "time" +) + +func NewFileInfo(fi os.FileInfo) *fileInfo { + if fi == nil { + return nil + } + + return &fileInfo{N: fi.Name(), S: fi.Size(), M: fi.Mode(), T: fi.ModTime()} +} + +type fileInfo struct { + N string + S int64 + M os.FileMode + T time.Time +} + +func (fi fileInfo) Name() string { return fi.N } +func (fi fileInfo) Size() int64 { return fi.S } +func (fi fileInfo) Mode() os.FileMode { return fi.M } +func (fi fileInfo) ModTime() time.Time { + if fi.T.IsZero() { + return time.Now() + } + return fi.T +} +func (fi fileInfo) IsDir() bool { return fi.M.IsDir() } +func (fi fileInfo) Sys() interface{} { return nil } diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/hook.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/hook.go new file mode 100644 index 000000000..7971501ff --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/hook.go @@ -0,0 +1,90 @@ +package rpc + +import ( + "context" + "log" + "sync" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// An implementation of packersdk.Hook where the hook is actually executed +// over an RPC connection. +type hook struct { + commonClient +} + +// HookServer wraps a packersdk.Hook implementation and makes it exportable +// as part of a Golang RPC server. +type HookServer struct { + context context.Context + contextCancel func() + + hook packersdk.Hook + lock sync.Mutex + mux *muxBroker +} + +type HookRunArgs struct { + Name string + Data interface{} + StreamId uint32 +} + +func (h *hook) Run(ctx context.Context, name string, ui packersdk.Ui, comm packersdk.Communicator, data interface{}) error { + nextId := h.mux.NextId() + server := newServerWithMux(h.mux, nextId) + server.RegisterCommunicator(comm) + server.RegisterUi(ui) + go server.Serve() + + done := make(chan interface{}) + defer close(done) + go func() { + select { + case <-ctx.Done(): + log.Printf("Cancelling hook after context cancellation %v", ctx.Err()) + if err := h.client.Call(h.endpoint+".Cancel", new(interface{}), new(interface{})); err != nil { + log.Printf("Error cancelling builder: %s", err) + } + case <-done: + } + }() + + args := HookRunArgs{ + Name: name, + Data: data, + StreamId: nextId, + } + + return h.client.Call(h.endpoint+".Run", &args, new(interface{})) +} + +func (h *HookServer) Run(args *HookRunArgs, reply *interface{}) error { + client, err := newClientWithMux(h.mux, args.StreamId) + if err != nil { + return NewBasicError(err) + } + defer client.Close() + + h.lock.Lock() + if h.context == nil { + h.context, h.contextCancel = context.WithCancel(context.Background()) + } + h.lock.Unlock() + if err := h.hook.Run(h.context, args.Name, client.Ui(), client.Communicator(), args.Data); err != nil { + return NewBasicError(err) + } + + *reply = nil + return nil +} + +func (h *HookServer) Cancel(args *interface{}, reply *interface{}) error { + h.lock.Lock() + if h.contextCancel != nil { + h.contextCancel() + } + h.lock.Unlock() + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/init.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/init.go new file mode 100644 index 000000000..eeeb9e0c1 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/init.go @@ -0,0 +1,15 @@ +/* +Package rpc contains the implementation of the remote procedure call code that +the Packer core uses to communicate with packer plugins. As a plugin maintainer, +you are unlikely to need to directly import or use this package, but it +underpins the packer server that all plugins must implement. +*/ +package rpc + +import "encoding/gob" + +func init() { + gob.Register(new(map[string]string)) + gob.Register(make([]interface{}, 0)) + gob.Register(new(BasicError)) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/mux_broker.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/mux_broker.go new file mode 100644 index 000000000..64653b1ed --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/mux_broker.go @@ -0,0 +1,189 @@ +package rpc + +import ( + "encoding/binary" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/hashicorp/yamux" +) + +// MuxBroker is responsible for brokering multiplexed connections by unique ID. +// +// This allows a plugin to request a channel with a specific ID to connect to +// or accept a connection from, and the broker handles the details of +// holding these channels open while they're being negotiated. +type muxBroker struct { + nextId uint32 + session *yamux.Session + streams map[uint32]*muxBrokerPending + + sync.Mutex +} + +type muxBrokerPending struct { + ch chan net.Conn + doneCh chan struct{} +} + +func newMuxBroker(s *yamux.Session) *muxBroker { + return &muxBroker{ + session: s, + streams: make(map[uint32]*muxBrokerPending), + } +} + +func newMuxBrokerClient(rwc io.ReadWriteCloser) (*muxBroker, error) { + s, err := yamux.Client(rwc, nil) + if err != nil { + return nil, err + } + + return newMuxBroker(s), nil +} + +func newMuxBrokerServer(rwc io.ReadWriteCloser) (*muxBroker, error) { + s, err := yamux.Server(rwc, nil) + if err != nil { + return nil, err + } + + return newMuxBroker(s), nil +} + +// Accept accepts a connection by ID. +// +// This should not be called multiple times with the same ID at one time. +func (m *muxBroker) Accept(id uint32) (net.Conn, error) { + var c net.Conn + p := m.getStream(id) + select { + case c = <-p.ch: + close(p.doneCh) + case <-time.After(5 * time.Second): + m.Lock() + defer m.Unlock() + delete(m.streams, id) + + return nil, fmt.Errorf("timeout waiting for accept") + } + + // Ack our connection + if err := binary.Write(c, binary.LittleEndian, id); err != nil { + c.Close() + return nil, err + } + + return c, nil +} + +// Close closes the connection and all sub-connections. +func (m *muxBroker) Close() error { + return m.session.Close() +} + +// Dial opens a connection by ID. +func (m *muxBroker) Dial(id uint32) (net.Conn, error) { + // Open the stream + stream, err := m.session.OpenStream() + if err != nil { + return nil, err + } + + // Write the stream ID onto the wire. + if err := binary.Write(stream, binary.LittleEndian, id); err != nil { + stream.Close() + return nil, err + } + + // Read the ack that we connected. Then we're off! + var ack uint32 + if err := binary.Read(stream, binary.LittleEndian, &ack); err != nil { + stream.Close() + return nil, err + } + if ack != id { + stream.Close() + return nil, fmt.Errorf("bad ack: %d (expected %d)", ack, id) + } + + return stream, nil +} + +// NextId returns a unique ID to use next. +func (m *muxBroker) NextId() uint32 { + return atomic.AddUint32(&m.nextId, 1) +} + +// Run starts the brokering and should be executed in a goroutine, since it +// blocks forever, or until the session closes. +func (m *muxBroker) Run() { + for { + stream, err := m.session.AcceptStream() + if err != nil { + // Once we receive an error, just exit + break + } + + // Read the stream ID from the stream + var id uint32 + if err := binary.Read(stream, binary.LittleEndian, &id); err != nil { + stream.Close() + continue + } + + // Initialize the waiter + p := m.getStream(id) + select { + case p.ch <- stream: + default: + } + + // Wait for a timeout + go m.timeoutWait(id, p) + } +} + +func (m *muxBroker) getStream(id uint32) *muxBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.streams[id] + if ok { + return p + } + + m.streams[id] = &muxBrokerPending{ + ch: make(chan net.Conn, 1), + doneCh: make(chan struct{}), + } + return m.streams[id] +} + +func (m *muxBroker) timeoutWait(id uint32, p *muxBrokerPending) { + // Wait for the stream to either be picked up and connected, or + // for a timeout. + timeout := false + select { + case <-p.doneCh: + case <-time.After(5 * time.Second): + timeout = true + } + + m.Lock() + defer m.Unlock() + + // Delete the stream so no one else can grab it + delete(m.streams, id) + + // If we timed out, then check if we have a channel in the buffer, + // and if so, close it. + if timeout { + s := <-p.ch + s.Close() + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/post_processor.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/post_processor.go new file mode 100644 index 000000000..9c2de87b6 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/post_processor.go @@ -0,0 +1,146 @@ +package rpc + +import ( + "context" + "log" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// An implementation of packersdk.PostProcessor where the PostProcessor is actually +// executed over an RPC connection. +type postProcessor struct { + commonClient +} + +// PostProcessorServer wraps a packersdk.PostProcessor implementation and makes it +// exportable as part of a Golang RPC server. +type PostProcessorServer struct { + context context.Context + contextCancel func() + + commonServer + p packersdk.PostProcessor +} + +type PostProcessorConfigureArgs struct { + Configs []interface{} +} + +type PostProcessorProcessResponse struct { + Err *BasicError + Keep bool + ForceOverride bool + StreamId uint32 +} + +func (p *postProcessor) Configure(raw ...interface{}) error { + raw, err := encodeCTYValues(raw) + if err != nil { + return err + } + args := &PostProcessorConfigureArgs{Configs: raw} + return p.client.Call(p.endpoint+".Configure", args, new(interface{})) +} + +func (p *postProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, a packersdk.Artifact) (packersdk.Artifact, bool, bool, error) { + nextId := p.mux.NextId() + server := newServerWithMux(p.mux, nextId) + server.RegisterArtifact(a) + server.RegisterUi(ui) + go server.Serve() + + done := make(chan interface{}) + defer close(done) + + go func() { + select { + case <-ctx.Done(): + log.Printf("Cancelling post-processor after context cancellation %v", ctx.Err()) + if err := p.client.Call(p.endpoint+".Cancel", new(interface{}), new(interface{})); err != nil { + log.Printf("Error cancelling post-processor: %s", err) + } + case <-done: + } + }() + + var response PostProcessorProcessResponse + if err := p.client.Call(p.endpoint+".PostProcess", nextId, &response); err != nil { + return nil, false, false, err + } + + if response.Err != nil { + return nil, false, false, response.Err + } + + if response.StreamId == 0 { + return nil, false, false, nil + } + + client, err := newClientWithMux(p.mux, response.StreamId) + if err != nil { + return nil, false, false, err + } + + return client.Artifact(), response.Keep, response.ForceOverride, nil +} + +func (p *PostProcessorServer) Configure(args *PostProcessorConfigureArgs, reply *interface{}) (err error) { + config, err := decodeCTYValues(args.Configs) + if err != nil { + return err + } + err = p.p.Configure(config...) + return err +} + +func (p *PostProcessorServer) PostProcess(streamId uint32, reply *PostProcessorProcessResponse) error { + client, err := newClientWithMux(p.mux, streamId) + if err != nil { + return NewBasicError(err) + } + + if p.context == nil { + p.context, p.contextCancel = context.WithCancel(context.Background()) + } + + artifact := client.Artifact() + artifactResult, keep, forceOverride, err := p.p.PostProcess(p.context, client.Ui(), artifact) + *reply = PostProcessorProcessResponse{ + Err: NewBasicError(err), + Keep: keep, + ForceOverride: forceOverride, + StreamId: 0, + } + if err != nil { + log.Printf("error: %v", err) + client.Close() + return nil + } + + if artifactResult != artifact { + // Sometimes, the artifact returned by PostProcess is the artifact from + // client.Artifact() and in that case we don't want to close client; + // otherwise the outcome is sort of undetermined. See [GH-9995] for a + // good test file. + defer client.Close() + } + + if artifactResult != nil { + streamId = p.mux.NextId() + reply.StreamId = streamId + server := newServerWithMux(p.mux, streamId) + if err := server.RegisterArtifact(artifactResult); err != nil { + return err + } + go server.Serve() + } + return nil +} + +func (b *PostProcessorServer) Cancel(args *interface{}, reply *interface{}) error { + if b.contextCancel != nil { + b.contextCancel() + } + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/provisioner.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/provisioner.go new file mode 100644 index 000000000..ebf6b640f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/provisioner.go @@ -0,0 +1,98 @@ +package rpc + +import ( + "context" + "log" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// An implementation of packersdk.Provisioner where the provisioner is actually +// executed over an RPC connection. +type provisioner struct { + commonClient +} + +// ProvisionerServer wraps a packersdk.Provisioner implementation and makes it +// exportable as part of a Golang RPC server. +type ProvisionerServer struct { + context context.Context + contextCancel func() + + commonServer + p packersdk.Provisioner +} + +type ProvisionerPrepareArgs struct { + Configs []interface{} +} + +func (p *provisioner) Prepare(configs ...interface{}) error { + configs, err := encodeCTYValues(configs) + if err != nil { + return err + } + args := &ProvisionerPrepareArgs{configs} + return p.client.Call(p.endpoint+".Prepare", args, new(interface{})) +} + +type ProvisionerProvisionArgs struct { + GeneratedData map[string]interface{} + StreamID uint32 +} + +func (p *provisioner) Provision(ctx context.Context, ui packersdk.Ui, comm packersdk.Communicator, generatedData map[string]interface{}) error { + nextId := p.mux.NextId() + server := newServerWithMux(p.mux, nextId) + server.RegisterCommunicator(comm) + server.RegisterUi(ui) + go server.Serve() + + done := make(chan interface{}) + defer close(done) + + go func() { + select { + case <-ctx.Done(): + log.Printf("Cancelling provisioner after context cancellation %v", ctx.Err()) + if err := p.client.Call(p.endpoint+".Cancel", new(interface{}), new(interface{})); err != nil { + log.Printf("Error cancelling provisioner: %s", err) + } + case <-done: + } + }() + + args := &ProvisionerProvisionArgs{generatedData, nextId} + return p.client.Call(p.endpoint+".Provision", args, new(interface{})) +} + +func (p *ProvisionerServer) Prepare(args *ProvisionerPrepareArgs, reply *interface{}) error { + config, err := decodeCTYValues(args.Configs) + if err != nil { + return err + } + return p.p.Prepare(config...) +} + +func (p *ProvisionerServer) Provision(args *ProvisionerProvisionArgs, reply *interface{}) error { + streamId := args.StreamID + client, err := newClientWithMux(p.mux, streamId) + if err != nil { + return NewBasicError(err) + } + defer client.Close() + + if p.context == nil { + p.context, p.contextCancel = context.WithCancel(context.Background()) + } + if err := p.p.Provision(p.context, client.Ui(), client.Communicator(), args.GeneratedData); err != nil { + return NewBasicError(err) + } + + return nil +} + +func (p *ProvisionerServer) Cancel(args *interface{}, reply *interface{}) error { + p.contextCancel() + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/server.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/server.go new file mode 100644 index 000000000..3d0ef4f7f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/server.go @@ -0,0 +1,150 @@ +package rpc + +import ( + "io" + "log" + "net/rpc" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/ugorji/go/codec" +) + +const ( + DefaultArtifactEndpoint string = "Artifact" + DefaultBuildEndpoint = "Build" + DefaultBuilderEndpoint = "Builder" + DefaultCacheEndpoint = "Cache" + DefaultCommandEndpoint = "Command" + DefaultCommunicatorEndpoint = "Communicator" + DefaultHookEndpoint = "Hook" + DefaultPostProcessorEndpoint = "PostProcessor" + DefaultProvisionerEndpoint = "Provisioner" + DefaultUiEndpoint = "Ui" +) + +// Server represents an RPC server for Packer. This must be paired on +// the other side with a Client. In Packer, each "plugin" (builder, provisioner, +// and post-processor) creates and launches a server. The client created and +// used by the packer "core" +type Server struct { + mux *muxBroker + streamId uint32 + server *rpc.Server + closeMux bool +} + +// NewServer returns a new Packer RPC server. +func NewServer(conn io.ReadWriteCloser) (*Server, error) { + mux, err := newMuxBrokerServer(conn) + if err != nil { + return nil, err + } + result := newServerWithMux(mux, 0) + result.closeMux = true + go mux.Run() + return result, nil +} + +func newServerWithMux(mux *muxBroker, streamId uint32) *Server { + return &Server{ + mux: mux, + streamId: streamId, + server: rpc.NewServer(), + closeMux: false, + } +} + +func (s *Server) Close() error { + if s.closeMux { + log.Printf("[WARN] Shutting down mux conn in Server") + return s.mux.Close() + } + + return nil +} + +func (s *Server) RegisterArtifact(a packersdk.Artifact) error { + return s.server.RegisterName(DefaultArtifactEndpoint, &ArtifactServer{ + artifact: a, + }) +} + +func (s *Server) RegisterBuild(b packersdk.Build) error { + return s.server.RegisterName(DefaultBuildEndpoint, &BuildServer{ + build: b, + mux: s.mux, + }) +} + +func (s *Server) RegisterBuilder(b packersdk.Builder) error { + return s.server.RegisterName(DefaultBuilderEndpoint, &BuilderServer{ + commonServer: commonServer{ + selfConfigurable: b, + mux: s.mux, + }, + builder: b, + }) +} + +func (s *Server) RegisterCommunicator(c packersdk.Communicator) error { + return s.server.RegisterName(DefaultCommunicatorEndpoint, &CommunicatorServer{ + c: c, + commonServer: commonServer{ + mux: s.mux, + }, + }) +} + +func (s *Server) RegisterHook(h packersdk.Hook) error { + return s.server.RegisterName(DefaultHookEndpoint, &HookServer{ + hook: h, + mux: s.mux, + }) +} + +func (s *Server) RegisterPostProcessor(p packersdk.PostProcessor) error { + return s.server.RegisterName(DefaultPostProcessorEndpoint, &PostProcessorServer{ + commonServer: commonServer{ + selfConfigurable: p, + mux: s.mux, + }, + p: p, + }) +} + +func (s *Server) RegisterProvisioner(p packersdk.Provisioner) error { + return s.server.RegisterName(DefaultProvisionerEndpoint, &ProvisionerServer{ + commonServer: commonServer{ + selfConfigurable: p, + mux: s.mux, + }, + p: p, + }) +} + +func (s *Server) RegisterUi(ui packersdk.Ui) error { + return s.server.RegisterName(DefaultUiEndpoint, &UiServer{ + ui: ui, + register: s.server.RegisterName, + }) +} + +// ServeConn serves a single connection over the RPC server. It is up +// to the caller to obtain a proper io.ReadWriteCloser. +func (s *Server) Serve() { + // Accept a connection on stream ID 0, which is always used for + // normal client to server connections. + stream, err := s.mux.Accept(s.streamId) + if err != nil { + log.Printf("[ERR] Error retrieving stream for serving: %s", err) + return + } + defer stream.Close() + + h := &codec.MsgpackHandle{ + RawToString: true, + WriteExt: true, + } + rpcCodec := codec.GoRpc.ServerCodec(stream, h) + s.server.ServeCodec(rpcCodec) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/ui.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/ui.go new file mode 100644 index 000000000..61032413a --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/ui.go @@ -0,0 +1,95 @@ +package rpc + +import ( + "log" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +// An implementation of packersdk.Ui where the Ui is actually executed +// over an RPC connection. +type Ui struct { + commonClient + endpoint string +} + +var _ packersdk.Ui = new(Ui) + +// UiServer wraps a packersdk.Ui implementation and makes it exportable +// as part of a Golang RPC server. +type UiServer struct { + ui packersdk.Ui + register func(name string, rcvr interface{}) error +} + +// The arguments sent to Ui.Machine +type UiMachineArgs struct { + Category string + Args []string +} + +func (u *Ui) Ask(query string) (result string, err error) { + err = u.client.Call("Ui.Ask", query, &result) + return +} + +func (u *Ui) Error(message string) { + if err := u.client.Call("Ui.Error", message, new(interface{})); err != nil { + log.Printf("Error in Ui.Error RPC call: %s", err) + } +} + +func (u *Ui) Machine(t string, args ...string) { + rpcArgs := &UiMachineArgs{ + Category: t, + Args: args, + } + + if err := u.client.Call("Ui.Machine", rpcArgs, new(interface{})); err != nil { + log.Printf("Error in Ui.Machine RPC call: %s", err) + } +} + +func (u *Ui) Message(message string) { + if err := u.client.Call("Ui.Message", message, new(interface{})); err != nil { + log.Printf("Error in Ui.Message RPC call: %s", err) + } +} + +func (u *Ui) Say(message string) { + if err := u.client.Call("Ui.Say", message, new(interface{})); err != nil { + log.Printf("Error in Ui.Say RPC call: %s", err) + } +} + +func (u *UiServer) Ask(query string, reply *string) (err error) { + *reply, err = u.ui.Ask(query) + return +} + +func (u *UiServer) Error(message *string, reply *interface{}) error { + u.ui.Error(*message) + + *reply = nil + return nil +} + +func (u *UiServer) Machine(args *UiMachineArgs, reply *interface{}) error { + u.ui.Machine(args.Category, args.Args...) + + *reply = nil + return nil +} + +func (u *UiServer) Message(message *string, reply *interface{}) error { + u.ui.Message(*message) + *reply = nil + return nil +} + +func (u *UiServer) Say(message *string, reply *interface{}) error { + u.ui.Say(*message) + + *reply = nil + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/ui_progress_tracking.go b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/ui_progress_tracking.go new file mode 100644 index 000000000..92fd5dd59 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/rpc/ui_progress_tracking.go @@ -0,0 +1,99 @@ +package rpc + +import ( + "io" + "log" + "net/rpc" + + "github.com/hashicorp/packer-plugin-sdk/random" +) + +// TrackProgress starts a pair of ProgressTrackingClient and ProgressProgressTrackingServer +// that will send the size of each read bytes of stream. +// In order to track an operation on the terminal side. +func (u *Ui) TrackProgress(src string, currentSize, totalSize int64, stream io.ReadCloser) io.ReadCloser { + pl := &TrackProgressParameters{ + Src: src, + CurrentSize: currentSize, + TotalSize: totalSize, + } + var trackingID string + if err := u.client.Call("Ui.NewTrackProgress", pl, &trackingID); err != nil { + log.Printf("Error in Ui.NewTrackProgress RPC call: %s", err) + return stream + } + cli := &ProgressTrackingClient{ + id: trackingID, + client: u.client, + stream: stream, + } + return cli +} + +type ProgressTrackingClient struct { + id string + client *rpc.Client + stream io.ReadCloser +} + +// Read will send len(b) over the wire instead of it's content +func (u *ProgressTrackingClient) Read(b []byte) (read int, err error) { + defer func() { + if err := u.client.Call("Ui"+u.id+".Add", read, new(interface{})); err != nil { + log.Printf("Error in ProgressTrackingClient.Read RPC call: %s", err) + } + }() + return u.stream.Read(b) +} + +func (u *ProgressTrackingClient) Close() error { + log.Printf("closing") + if err := u.client.Call("Ui"+u.id+".Close", nil, new(interface{})); err != nil { + log.Printf("Error in ProgressTrackingClient.Close RPC call: %s", err) + } + return u.stream.Close() +} + +type TrackProgressParameters struct { + Src string + TotalSize int64 + CurrentSize int64 +} + +func (ui *UiServer) NewTrackProgress(pl *TrackProgressParameters, reply *string) error { + // keep identifier as is for now + srvr := &ProgressTrackingServer{ + id: *reply, + } + + *reply = pl.Src + random.AlphaNum(6) + srvr.stream = ui.ui.TrackProgress(pl.Src, pl.CurrentSize, pl.TotalSize, nopReadCloser{}) + err := ui.register("Ui"+*reply, srvr) + if err != nil { + log.Printf("failed to register ProgressTrackingServer at %s: %s", *reply, err) + return err + } + return nil +} + +type ProgressTrackingServer struct { + id string + stream io.ReadCloser +} + +func (t *ProgressTrackingServer) Add(size int, _ *interface{}) error { + stubBytes := make([]byte, size, size) + t.stream.Read(stubBytes) + return nil +} + +func (t *ProgressTrackingServer) Close(_, _ *interface{}) error { + t.stream.Close() + return nil +} + +type nopReadCloser struct { +} + +func (nopReadCloser) Close() error { return nil } +func (nopReadCloser) Read(b []byte) (int, error) { return len(b), nil } diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/none/communicator.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/none/communicator.go new file mode 100644 index 000000000..d37804961 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/none/communicator.go @@ -0,0 +1,49 @@ +// Package none implements the 'none' communicator. Plugin maintainers should not +// import this package directly, instead using the tooling in the +// "packer-plugin-sdk/communicator" module. +package none + +import ( + "context" + "errors" + "io" + "os" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +type comm struct { + config string +} + +// Creates a null packersdk.Communicator implementation. This takes +// an already existing configuration. +func New(config string) (result *comm, err error) { + // Establish an initial connection and connect + result = &comm{ + config: config, + } + + return +} + +func (c *comm) Start(ctx context.Context, cmd *packersdk.RemoteCmd) (err error) { + cmd.SetExited(0) + return +} + +func (c *comm) Upload(path string, input io.Reader, fi *os.FileInfo) error { + return errors.New("Upload is not implemented when communicator = 'none'") +} + +func (c *comm) UploadDir(dst string, src string, excl []string) error { + return errors.New("UploadDir is not implemented when communicator = 'none'") +} + +func (c *comm) Download(path string, output io.Writer) error { + return errors.New("Download is not implemented when communicator = 'none'") +} + +func (c *comm) DownloadDir(dst string, src string, excl []string) error { + return errors.New("DownloadDir is not implemented when communicator = 'none'") +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/communicator.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/communicator.go new file mode 100644 index 000000000..9b08af181 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/communicator.go @@ -0,0 +1,1025 @@ +// Package ssh implements the SSH communicator. Plugin maintainers should not +// import this package directly, instead using the tooling in the +// "packer-plugin-sdk/communicator" module. +package ssh + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/tmp" + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/agent" +) + +// ErrHandshakeTimeout is returned from New() whenever we're unable to establish +// an ssh connection within a certain timeframe. By default the handshake time- +// out period is 1 minute. You can change it with Config.HandshakeTimeout. +var ErrHandshakeTimeout = fmt.Errorf("Timeout during SSH handshake") + +type comm struct { + client *ssh.Client + config *Config + conn net.Conn + address string +} + +// TunnelDirection is the supported tunnel directions +type TunnelDirection int + +const ( + UnsetTunnel TunnelDirection = iota + RemoteTunnel + LocalTunnel +) + +// TunnelSpec represents a request to map a port on one side of the SSH connection to the other +type TunnelSpec struct { + Direction TunnelDirection + ListenType string + ListenAddr string + ForwardType string + ForwardAddr string +} + +// Config is the structure used to configure the SSH communicator. +type Config struct { + // The configuration of the Go SSH connection + SSHConfig *ssh.ClientConfig + + // Connection returns a new connection. The current connection + // in use will be closed as part of the Close method, or in the + // case an error occurs. + Connection func() (net.Conn, error) + + // Pty, if true, will request a pty from the remote end. + Pty bool + + // DisableAgentForwarding, if true, will not forward the SSH agent. + DisableAgentForwarding bool + + // HandshakeTimeout limits the amount of time we'll wait to handshake before + // saying the connection failed. + HandshakeTimeout time.Duration + + // UseSftp, if true, sftp will be used instead of scp for file transfers + UseSftp bool + + // KeepAliveInterval sets how often we send a channel request to the + // server. A value < 0 disables. + KeepAliveInterval time.Duration + + // Timeout is how long to wait for a read or write to succeed. + Timeout time.Duration + + Tunnels []TunnelSpec +} + +// Creates a new packersdk.Communicator implementation over SSH. This takes +// an already existing TCP connection and SSH configuration. +func New(address string, config *Config) (result *comm, err error) { + // Establish an initial connection and connect + result = &comm{ + config: config, + address: address, + } + + if err = result.reconnect(); err != nil { + result = nil + return + } + + return +} + +func (c *comm) Start(ctx context.Context, cmd *packersdk.RemoteCmd) (err error) { + session, err := c.newSession() + if err != nil { + return + } + + // Setup our session + session.Stdin = cmd.Stdin + session.Stdout = cmd.Stdout + session.Stderr = cmd.Stderr + + if c.config.Pty { + // Request a PTY + termModes := ssh.TerminalModes{ + ssh.ECHO: 0, // do not echo + ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud + ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud + } + + if err = session.RequestPty("xterm", 40, 80, termModes); err != nil { + return + } + } + + log.Printf("[DEBUG] starting remote command: %s", cmd.Command) + err = session.Start(cmd.Command + "\n") + if err != nil { + return + } + + go func() { + if c.config.KeepAliveInterval <= 0 { + return + } + c := time.NewTicker(c.config.KeepAliveInterval) + defer c.Stop() + for range c.C { + _, err := session.SendRequest("keepalive@packer.io", true, nil) + if err != nil { + return + } + } + }() + + // Start a goroutine to wait for the session to end and set the + // exit boolean and status. + go func() { + defer session.Close() + + err := session.Wait() + exitStatus := 0 + if err != nil { + switch err.(type) { + case *ssh.ExitError: + exitStatus = err.(*ssh.ExitError).ExitStatus() + log.Printf("[ERROR] Remote command exited with '%d': %s", exitStatus, cmd.Command) + case *ssh.ExitMissingError: + log.Printf("[ERROR] Remote command exited without exit status or exit signal.") + exitStatus = packersdk.CmdDisconnect + default: + log.Printf("[ERROR] Error occurred waiting for ssh session: %s", err.Error()) + } + } + cmd.SetExited(exitStatus) + }() + return +} + +func (c *comm) Upload(path string, input io.Reader, fi *os.FileInfo) error { + if c.config.UseSftp { + return c.sftpUploadSession(path, input, fi) + } else { + return c.scpUploadSession(path, input, fi) + } +} + +func (c *comm) UploadDir(dst string, src string, excl []string) error { + log.Printf("[DEBUG] Upload dir '%s' to '%s'", src, dst) + if c.config.UseSftp { + return c.sftpUploadDirSession(dst, src, excl) + } else { + return c.scpUploadDirSession(dst, src, excl) + } +} + +func (c *comm) DownloadDir(src string, dst string, excl []string) error { + log.Printf("[DEBUG] Download dir '%s' to '%s'", src, dst) + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + dirStack := []string{dst} + for { + fmt.Fprint(w, "\x00") + + // read file info + fi, err := stdoutR.ReadString('\n') + if err != nil { + return err + } + + if len(fi) < 0 { + return fmt.Errorf("empty response from server") + } + + switch fi[0] { + case '\x01', '\x02': + return fmt.Errorf("%s", fi[1:]) + case 'C', 'D': + break + case 'E': + dirStack = dirStack[:len(dirStack)-1] + if len(dirStack) == 0 { + fmt.Fprint(w, "\x00") + return nil + } + continue + default: + return fmt.Errorf("unexpected server response (%x)", fi[0]) + } + + var mode int64 + var size int64 + var name string + log.Printf("[DEBUG] Download dir str:%s", fi) + n, err := fmt.Sscanf(fi[1:], "%o %d %s", &mode, &size, &name) + if err != nil || n != 3 { + return fmt.Errorf("can't parse server response (%s)", fi) + } + if size < 0 { + return fmt.Errorf("negative file size") + } + + log.Printf("[DEBUG] Download dir mode:%0o size:%d name:%s", mode, size, name) + + dst = filepath.Join(dirStack...) + switch fi[0] { + case 'D': + err = os.MkdirAll(filepath.Join(dst, name), os.FileMode(mode)) + if err != nil { + return err + } + dirStack = append(dirStack, name) + continue + case 'C': + fmt.Fprint(w, "\x00") + err = scpDownloadFile(filepath.Join(dst, name), stdoutR, size, os.FileMode(mode)) + if err != nil { + return err + } + } + + if err := checkSCPStatus(stdoutR); err != nil { + return err + } + } + } + return c.scpSession("scp -vrf "+src, scpFunc) +} + +func (c *comm) Download(path string, output io.Writer) error { + if c.config.UseSftp { + return c.sftpDownloadSession(path, output) + } + return c.scpDownloadSession(path, output) +} + +func (c *comm) newSession() (session *ssh.Session, err error) { + log.Println("[DEBUG] Opening new ssh session") + if c.client == nil { + err = errors.New("client not available") + } else { + session, err = c.client.NewSession() + } + + if err != nil { + log.Printf("[ERROR] ssh session open error: '%s', attempting reconnect", err) + if err := c.reconnect(); err != nil { + return nil, err + } + + if c.client == nil { + return nil, errors.New("client not available") + } else { + return c.client.NewSession() + } + } + + return session, nil +} + +func (c *comm) reconnect() (err error) { + if c.conn != nil { + // Ignore errors here because we don't care if it fails + c.conn.Close() + } + + // Set the conn and client to nil since we'll recreate it + c.conn = nil + c.client = nil + + log.Printf("[DEBUG] reconnecting to TCP connection for SSH") + c.conn, err = c.config.Connection() + if err != nil { + // Explicitly set this to the REAL nil. Connection() can return + // a nil implementation of net.Conn which will make the + // "if c.conn == nil" check fail above. Read here for more information + // on this psychotic language feature: + // + // http://golang.org/doc/faq#nil_error + c.conn = nil + + log.Printf("[ERROR] reconnection error: %s", err) + return + } + + if c.config.Timeout > 0 { + c.conn = &timeoutConn{c.conn, c.config.Timeout, c.config.Timeout} + } + + log.Printf("[DEBUG] handshaking with SSH") + + // Default timeout to 1 minute if it wasn't specified (zero value). For + // when you need to handshake from low orbit. + var duration time.Duration + if c.config.HandshakeTimeout == 0 { + duration = 1 * time.Minute + } else { + duration = c.config.HandshakeTimeout + } + + connectionEstablished := make(chan struct{}, 1) + + var sshConn ssh.Conn + var sshChan <-chan ssh.NewChannel + var req <-chan *ssh.Request + + go func() { + sshConn, sshChan, req, err = ssh.NewClientConn(c.conn, c.address, c.config.SSHConfig) + close(connectionEstablished) + }() + + select { + case <-connectionEstablished: + // We don't need to do anything here. We just want select to block until + // we connect or timeout. + case <-time.After(duration): + if c.conn != nil { + c.conn.Close() + } + if sshConn != nil { + sshConn.Close() + } + return ErrHandshakeTimeout + } + + if err != nil { + return + } + log.Printf("[DEBUG] handshake complete!") + if sshConn != nil { + c.client = ssh.NewClient(sshConn, sshChan, req) + } + c.connectToAgent() + err = c.connectTunnels(sshConn) + if err != nil { + return + } + + return +} + +func (c *comm) connectTunnels(sshConn ssh.Conn) (err error) { + if c.client == nil { + return + } + + if len(c.config.Tunnels) == 0 { + // No Tunnels to configure + return + } + + // Start remote forwards of ports to ourselves. + log.Printf("[DEBUG] Tunnel configuration: %v", c.config.Tunnels) + for _, v := range c.config.Tunnels { + done := make(chan struct{}) + var listener net.Listener + switch v.Direction { + case RemoteTunnel: + // This requests the sshd Host to bind a port and send traffic back to us + listener, err = c.client.Listen(v.ListenType, v.ListenAddr) + if err != nil { + err = fmt.Errorf("Tunnel: Failed to bind remote ('%v'): %s", v, err) + return + } + log.Printf("[INFO] Tunnel: Remote bound on %s forwarding to %s", v.ListenAddr, v.ForwardAddr) + connectFunc := ConnectFunc(v.ForwardType, v.ForwardAddr) + go ProxyServe(listener, done, connectFunc) + // Wait for our sshConn to be shutdown + // FIXME: Is there a better "on-shutdown" we can wait on? + go shutdownProxyTunnel(sshConn, done, listener) + case LocalTunnel: + // This binds locally and sends traffic back to the sshd host + listener, err = net.Listen(v.ListenType, v.ListenAddr) + if err != nil { + err = fmt.Errorf("Tunnel: Failed to bind local ('%v'): %s", v, err) + return + } + log.Printf("[INFO] Tunnel: Local bound on %s forwarding to %s", v.ListenAddr, v.ForwardAddr) + connectFunc := func() (net.Conn, error) { + // This Dial occurs on the SSH server's side + return c.client.Dial(v.ForwardType, v.ForwardAddr) + } + go ProxyServe(listener, done, connectFunc) + // FIXME: Is there a better "on-shutdown" we can wait on? + go shutdownProxyTunnel(sshConn, done, listener) + default: + err = fmt.Errorf("Tunnel: Unknown tunnel direction ('%v'): %v", v, v.Direction) + return + } + } + + return +} + +// shutdownProxyTunnel waits for our sshConn to be shutdown and closes the listeners +func shutdownProxyTunnel(sshConn ssh.Conn, done chan struct{}, listener net.Listener) { + sshConn.Wait() + log.Printf("[INFO] Tunnel: Shutting down listener %v", listener) + done <- struct{}{} + close(done) + listener.Close() +} + +func (c *comm) connectToAgent() { + if c.client == nil { + return + } + + if c.config.DisableAgentForwarding { + log.Printf("[INFO] SSH agent forwarding is disabled.") + return + } + + // open connection to the local agent + socketLocation := os.Getenv("SSH_AUTH_SOCK") + if socketLocation == "" { + log.Printf("[INFO] no local agent socket, will not connect agent") + return + } + agentConn, err := net.Dial("unix", socketLocation) + if err != nil { + log.Printf("[ERROR] could not connect to local agent socket: %s", socketLocation) + return + } + + // create agent and add in auth + forwardingAgent := agent.NewClient(agentConn) + if forwardingAgent == nil { + log.Printf("[ERROR] Could not create agent client") + agentConn.Close() + return + } + + // add callback for forwarding agent to SSH config + // XXX - might want to handle reconnects appending multiple callbacks + auth := ssh.PublicKeysCallback(forwardingAgent.Signers) + c.config.SSHConfig.Auth = append(c.config.SSHConfig.Auth, auth) + agent.ForwardToAgent(c.client, forwardingAgent) + + // Setup a session to request agent forwarding + session, err := c.newSession() + if err != nil { + return + } + defer session.Close() + + err = agent.RequestAgentForwarding(session) + if err != nil { + log.Printf("[ERROR] RequestAgentForwarding: %#v", err) + return + } + + log.Printf("[INFO] agent forwarding enabled") + return +} + +func (c *comm) sftpUploadSession(path string, input io.Reader, fi *os.FileInfo) error { + sftpFunc := func(client *sftp.Client) error { + return c.sftpUploadFile(path, input, client, fi) + } + + return c.sftpSession(sftpFunc) +} + +func (c *comm) sftpUploadFile(path string, input io.Reader, client *sftp.Client, fi *os.FileInfo) error { + log.Printf("[DEBUG] sftp: uploading %s", path) + f, err := client.Create(path) + if err != nil { + return err + } + defer f.Close() + + if _, err = io.Copy(f, input); err != nil { + return err + } + + if fi != nil && (*fi).Mode().IsRegular() { + mode := (*fi).Mode().Perm() + err = client.Chmod(path, mode) + if err != nil { + return err + } + } + + return nil +} + +func (c *comm) sftpUploadDirSession(dst string, src string, excl []string) error { + sftpFunc := func(client *sftp.Client) error { + rootDst := dst + if src[len(src)-1] != '/' { + log.Printf("[DEBUG] No trailing slash, creating the source directory name") + rootDst = filepath.Join(dst, filepath.Base(src)) + } + walkFunc := func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + // Calculate the final destination using the + // base source and root destination + relSrc, err := filepath.Rel(src, path) + if err != nil { + return err + } + finalDst := filepath.Join(rootDst, relSrc) + + // In Windows, Join uses backslashes which we don't want to get + // to the sftp server + finalDst = filepath.ToSlash(finalDst) + + // Skip the creation of the target destination directory since + // it should exist and we might not even own it + if finalDst == dst { + return nil + } + + return c.sftpVisitFile(finalDst, path, info, client) + } + + return filepath.Walk(src, walkFunc) + } + + return c.sftpSession(sftpFunc) +} + +func (c *comm) sftpMkdir(path string, client *sftp.Client, fi os.FileInfo) error { + log.Printf("[DEBUG] sftp: creating dir %s", path) + + if err := client.Mkdir(path); err != nil { + // Do not consider it an error if the directory existed + remoteFi, fiErr := client.Lstat(path) + if fiErr != nil || !remoteFi.IsDir() { + return err + } + } + + mode := fi.Mode().Perm() + if err := client.Chmod(path, mode); err != nil { + return err + } + return nil +} + +func (c *comm) sftpVisitFile(dst string, src string, fi os.FileInfo, client *sftp.Client) error { + if !fi.IsDir() { + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + return c.sftpUploadFile(dst, f, client, &fi) + } else { + err := c.sftpMkdir(dst, client, fi) + return err + } +} + +func (c *comm) sftpDownloadSession(path string, output io.Writer) error { + sftpFunc := func(client *sftp.Client) error { + f, err := client.Open(path) + if err != nil { + return err + } + defer f.Close() + + if _, err = io.Copy(output, f); err != nil { + return err + } + + return nil + } + + return c.sftpSession(sftpFunc) +} + +func (c *comm) sftpSession(f func(*sftp.Client) error) error { + client, err := c.newSftpClient() + if err != nil { + return fmt.Errorf("sftpSession error: %s", err.Error()) + } + defer client.Close() + + return f(client) +} + +func (c *comm) newSftpClient() (*sftp.Client, error) { + session, err := c.newSession() + if err != nil { + return nil, err + } + + if err := session.RequestSubsystem("sftp"); err != nil { + return nil, err + } + + pw, err := session.StdinPipe() + if err != nil { + return nil, err + } + pr, err := session.StdoutPipe() + if err != nil { + return nil, err + } + + // Capture stdout so we can return errors to the user + var stdout bytes.Buffer + tee := io.TeeReader(pr, &stdout) + client, err := sftp.NewClientPipe(tee, pw) + if err != nil && stdout.Len() > 0 { + log.Printf("[ERROR] Upload failed: %s", stdout.Bytes()) + } + + return client, err +} + +func (c *comm) scpUploadSession(path string, input io.Reader, fi *os.FileInfo) error { + + // The target directory and file for talking the SCP protocol + target_dir := filepath.Dir(path) + target_file := filepath.Base(path) + + // On windows, filepath.Dir uses backslash separators (ie. "\tmp"). + // This does not work when the target host is unix. Switch to forward slash + // which works for unix and windows + target_dir = filepath.ToSlash(target_dir) + + // Escape spaces in remote directory + target_dir = strings.Replace(target_dir, " ", "\\ ", -1) + + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + return scpUploadFile(target_file, input, w, stdoutR, fi) + } + + return c.scpSession("scp -vt "+target_dir, scpFunc) +} + +func (c *comm) scpUploadDirSession(dst string, src string, excl []string) error { + scpFunc := func(w io.Writer, r *bufio.Reader) error { + uploadEntries := func() error { + f, err := os.Open(src) + if err != nil { + return err + } + defer f.Close() + + entries, err := f.Readdir(-1) + if err != nil { + return err + } + + return scpUploadDir(src, entries, w, r) + } + + if src[len(src)-1] != '/' { + log.Printf("[DEBUG] No trailing slash, creating the source directory name") + fi, err := os.Stat(src) + if err != nil { + return err + } + return scpUploadDirProtocol(filepath.Base(src), w, r, uploadEntries, fi) + } else { + // Trailing slash, so only upload the contents + return uploadEntries() + } + } + + return c.scpSession("scp -rvt "+dst, scpFunc) +} + +func (c *comm) scpDownloadSession(path string, output io.Writer) error { + scpFunc := func(w io.Writer, stdoutR *bufio.Reader) error { + fmt.Fprint(w, "\x00") + + // read file info + fi, err := stdoutR.ReadString('\n') + if err != nil { + return err + } + + if len(fi) < 0 { + return fmt.Errorf("empty response from server") + } + + switch fi[0] { + case '\x01', '\x02': + return fmt.Errorf("%s", fi[1:]) + case 'C': + case 'D': + return fmt.Errorf("remote file is directory") + default: + return fmt.Errorf("unexpected server response (%x)", fi[0]) + } + + var mode string + var size int64 + + n, err := fmt.Sscanf(fi, "%6s %d ", &mode, &size) + if err != nil || n != 2 { + return fmt.Errorf("can't parse server response (%s)", fi) + } + if size < 0 { + return fmt.Errorf("negative file size") + } + + fmt.Fprint(w, "\x00") + + if _, err := io.CopyN(output, stdoutR, size); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + + return checkSCPStatus(stdoutR) + } + + if !strings.Contains(path, " ") { + return c.scpSession("scp -vf "+path, scpFunc) + } + return c.scpSession("scp -vf "+strconv.Quote(path), scpFunc) +} + +func (c *comm) scpSession(scpCommand string, f func(io.Writer, *bufio.Reader) error) error { + session, err := c.newSession() + if err != nil { + return err + } + defer session.Close() + + // Get a pipe to stdin so that we can send data down + stdinW, err := session.StdinPipe() + if err != nil { + return err + } + + // We only want to close once, so we nil w after we close it, + // and only close in the defer if it hasn't been closed already. + defer func() { + if stdinW != nil { + stdinW.Close() + } + }() + + // Get a pipe to stdout so that we can get responses back + stdoutPipe, err := session.StdoutPipe() + if err != nil { + return err + } + stdoutR := bufio.NewReader(stdoutPipe) + + // Set stderr to a bytes buffer + stderr := new(bytes.Buffer) + session.Stderr = stderr + + // Start the sink mode on the other side + // TODO(mitchellh): There are probably issues with shell escaping the path + log.Println("[DEBUG] Starting remote scp process: ", scpCommand) + if err := session.Start(scpCommand); err != nil { + return err + } + + // Call our callback that executes in the context of SCP. We ignore + // EOF errors if they occur because it usually means that SCP prematurely + // ended on the other side. + log.Println("[DEBUG] Started SCP session, beginning transfers...") + if err := f(stdinW, stdoutR); err != nil && err != io.EOF { + return err + } + + // Close the stdin, which sends an EOF, and then set w to nil so that + // our defer func doesn't close it again since that is unsafe with + // the Go SSH package. + log.Println("[DEBUG] SCP session complete, closing stdin pipe.") + stdinW.Close() + stdinW = nil + + // Wait for the SCP connection to close, meaning it has consumed all + // our data and has completed. Or has errored. + log.Println("[DEBUG] Waiting for SSH session to complete.") + err = session.Wait() + log.Printf("[DEBUG] scp stderr (length %d): %s", stderr.Len(), stderr.String()) + if err != nil { + if exitErr, ok := err.(*ssh.ExitError); ok { + // Otherwise, we have an ExitError, meaning we can just read the + // exit status + log.Printf("[DEBUG] non-zero exit status: %d, %v", exitErr.ExitStatus(), err) + stdoutB, err := ioutil.ReadAll(stdoutR) + if err != nil { + return err + } + log.Printf("[DEBUG] scp output: %s", stdoutB) + + // If we exited with status 127, it means SCP isn't available. + // Return a more descriptive error for that. + if exitErr.ExitStatus() == 127 { + return errors.New( + "SCP failed to start. This usually means that SCP is not\n" + + "properly installed on the remote system.") + } + } + + return err + } + + return nil +} + +// checkSCPStatus checks that a prior command sent to SCP completed +// successfully. If it did not complete successfully, an error will +// be returned. +func checkSCPStatus(r *bufio.Reader) error { + code, err := r.ReadByte() + if err != nil { + return err + } + + if code != 0 { + // Treat any non-zero (really 1 and 2) as fatal errors + message, _, err := r.ReadLine() + if err != nil { + return fmt.Errorf("Error reading error message: %s", err) + } + + return errors.New(string(message)) + } + + return nil +} + +func scpDownloadFile(dst string, src io.Reader, size int64, mode os.FileMode) error { + f, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode) + if err != nil { + return err + } + defer f.Close() + if _, err := io.CopyN(f, src, size); err != nil { + return err + } + return nil +} + +func scpUploadFile(dst string, src io.Reader, w io.Writer, r *bufio.Reader, fi *os.FileInfo) error { + var mode os.FileMode + var size int64 + + if fi != nil && (*fi).Mode().IsRegular() { + mode = (*fi).Mode().Perm() + size = (*fi).Size() + } else { + // Create a temporary file where we can copy the contents of the src + // so that we can determine the length, since SCP is length-prefixed. + tf, err := tmp.File("packer-upload") + if err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + defer os.Remove(tf.Name()) + defer tf.Close() + + mode = 0644 + + log.Println("[DEBUG] Copying input data into temporary file so we can read the length") + if _, err := io.Copy(tf, src); err != nil { + return fmt.Errorf("Error copying input data into local temporary "+ + "file. Check that TEMPDIR has enough space. Please see "+ + "https://www.packer.io/docs/other/environment-variables#tmpdir"+ + "for more info. Error: %s", err) + } + + // Sync the file so that the contents are definitely on disk, then + // read the length of it. + if err := tf.Sync(); err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + // Seek the file to the beginning so we can re-read all of it + if _, err := tf.Seek(0, 0); err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + tfi, err := tf.Stat() + if err != nil { + return fmt.Errorf("Error creating temporary file for upload: %s", err) + } + + size = tfi.Size() + src = tf + } + + // Start the protocol + perms := fmt.Sprintf("C%04o", mode) + log.Printf("[DEBUG] scp: Uploading %s: perms=%s size=%d", dst, perms, size) + + fmt.Fprintln(w, perms, size, dst) + if err := checkSCPStatus(r); err != nil { + return err + } + + if _, err := io.CopyN(w, src, size); err != nil { + return err + } + + fmt.Fprint(w, "\x00") + return checkSCPStatus(r) +} + +func scpUploadDirProtocol(name string, w io.Writer, r *bufio.Reader, f func() error, fi os.FileInfo) error { + log.Printf("[DEBUG] SCP: starting directory upload: %s", name) + + mode := fi.Mode().Perm() + + perms := fmt.Sprintf("D%04o 0", mode) + + fmt.Fprintln(w, perms, name) + err := checkSCPStatus(r) + if err != nil { + return err + } + + if err := f(); err != nil { + return err + } + + fmt.Fprintln(w, "E") + return err +} + +func scpUploadDir(root string, fs []os.FileInfo, w io.Writer, r *bufio.Reader) error { + for _, fi := range fs { + realPath := filepath.Join(root, fi.Name()) + + // Track if this is actually a symlink to a directory. If it is + // a symlink to a file we don't do any special behavior because uploading + // a file just works. If it is a directory, we need to know so we + // treat it as such. + isSymlinkToDir := false + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + symPath, err := filepath.EvalSymlinks(realPath) + if err != nil { + return err + } + + symFi, err := os.Lstat(symPath) + if err != nil { + return err + } + + isSymlinkToDir = symFi.IsDir() + } + + if !fi.IsDir() && !isSymlinkToDir { + // It is a regular file (or symlink to a file), just upload it + f, err := os.Open(realPath) + if err != nil { + return err + } + + err = func() error { + defer f.Close() + return scpUploadFile(fi.Name(), f, w, r, &fi) + }() + + if err != nil { + return err + } + + continue + } + + // It is a directory, recursively upload + err := scpUploadDirProtocol(fi.Name(), w, r, func() error { + f, err := os.Open(realPath) + if err != nil { + return err + } + defer f.Close() + + entries, err := f.Readdir(-1) + if err != nil { + return err + } + + return scpUploadDir(realPath, entries, w, r) + }, fi) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/connect.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/connect.go new file mode 100644 index 000000000..80bf0a9f4 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/connect.go @@ -0,0 +1,88 @@ +package ssh + +import ( + "fmt" + "net" + "time" + + "golang.org/x/crypto/ssh" + "golang.org/x/net/proxy" +) + +// ConnectFunc is a convenience method for returning a function +// that just uses net.Dial to communicate with the remote end that +// is suitable for use with the SSH communicator configuration. +func ConnectFunc(network, addr string) func() (net.Conn, error) { + return func() (net.Conn, error) { + c, err := net.DialTimeout(network, addr, 15*time.Second) + if err != nil { + return nil, err + } + + if tcpConn, ok := c.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(5 * time.Second) + } + + return c, nil + } +} + +// ProxyConnectFunc is a convenience method for returning a function +// that connects to a host using SOCKS5 proxy +func ProxyConnectFunc(socksProxy string, socksAuth *proxy.Auth, network, addr string) func() (net.Conn, error) { + return func() (net.Conn, error) { + // create a socks5 dialer + dialer, err := proxy.SOCKS5("tcp", socksProxy, socksAuth, proxy.Direct) + if err != nil { + return nil, fmt.Errorf("Can't connect to the proxy: %s", err) + } + + c, err := dialer.Dial(network, addr) + if err != nil { + return nil, err + } + + return c, nil + } +} + +// BastionConnectFunc is a convenience method for returning a function +// that connects to a host over a bastion connection. +func BastionConnectFunc( + bProto string, + bAddr string, + bConf *ssh.ClientConfig, + proto string, + addr string) func() (net.Conn, error) { + return func() (net.Conn, error) { + // Connect to the bastion + bastion, err := ssh.Dial(bProto, bAddr, bConf) + if err != nil { + return nil, fmt.Errorf("Error connecting to bastion: %s", err) + } + + // Connect through to the end host + conn, err := bastion.Dial(proto, addr) + if err != nil { + bastion.Close() + return nil, err + } + + // Wrap it up so we close both things properly + return &bastionConn{ + Conn: conn, + Bastion: bastion, + }, nil + } +} + +type bastionConn struct { + net.Conn + Bastion *ssh.Client +} + +func (c *bastionConn) Close() error { + c.Conn.Close() + return c.Bastion.Close() +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/connection.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/connection.go new file mode 100644 index 000000000..c3df04543 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/connection.go @@ -0,0 +1,30 @@ +package ssh + +import ( + "net" + "time" +) + +// timeoutConn wraps a net.Conn, and sets a deadline for every read +// and write operation. +type timeoutConn struct { + net.Conn + ReadTimeout time.Duration + WriteTimeout time.Duration +} + +func (c *timeoutConn) Read(b []byte) (int, error) { + err := c.Conn.SetReadDeadline(time.Now().Add(c.ReadTimeout)) + if err != nil { + return 0, err + } + return c.Conn.Read(b) +} + +func (c *timeoutConn) Write(b []byte) (int, error) { + err := c.Conn.SetWriteDeadline(time.Now().Add(c.WriteTimeout)) + if err != nil { + return 0, err + } + return c.Conn.Write(b) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/keyboard_interactive.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/keyboard_interactive.go new file mode 100644 index 000000000..417ab00ae --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/keyboard_interactive.go @@ -0,0 +1,33 @@ +package ssh + +import ( + "io" + "log" + + "golang.org/x/crypto/ssh" + "golang.org/x/crypto/ssh/terminal" +) + +func KeyboardInteractive(c io.ReadWriter) ssh.KeyboardInteractiveChallenge { + t := terminal.NewTerminal(c, "") + return func(user, instruction string, questions []string, echos []bool) ([]string, error) { + if len(questions) == 0 { + return []string{}, nil + } + + log.Printf("[INFO] -- User: %s", user) + log.Printf("[INFO] -- Instructions: %s", instruction) + for i, question := range questions { + log.Printf("[INFO] -- Question %d: %s", i+1, question) + } + answers := make([]string, len(questions)) + for i := range questions { + s, err := t.ReadPassword("") + if err != nil { + return nil, err + } + answers[i] = string(s) + } + return answers, nil + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/password.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/password.go new file mode 100644 index 000000000..774be47f1 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/password.go @@ -0,0 +1,28 @@ +package ssh + +import ( + "log" + + "golang.org/x/crypto/ssh" +) + +// An implementation of ssh.KeyboardInteractiveChallenge that simply sends +// back the password for all questions. The questions are logged. +func PasswordKeyboardInteractive(password string) ssh.KeyboardInteractiveChallenge { + return func(user, instruction string, questions []string, echos []bool) ([]string, error) { + log.Printf("Keyboard interactive challenge: ") + log.Printf("-- User: %s", user) + log.Printf("-- Instructions: %s", instruction) + for i, question := range questions { + log.Printf("-- Question %d: %s", i+1, question) + } + + // Just send the password back for all questions + answers := make([]string, len(questions)) + for i := range answers { + answers[i] = password + } + + return answers, nil + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/tunnel.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/tunnel.go new file mode 100644 index 000000000..c99a1d1d8 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh/tunnel.go @@ -0,0 +1,71 @@ +package ssh + +import ( + "io" + "log" + "net" +) + +// ProxyServe starts Accepting connections +func ProxyServe(l net.Listener, done <-chan struct{}, dialer func() (net.Conn, error)) { + for { + // Accept will return if either the underlying connection is closed or if a connection is made. + // after returning, check to see if c.done can be received. If so, then Accept() returned because + // the connection has been closed. + client, err := l.Accept() + select { + case <-done: + log.Printf("[WARN] Tunnel: received Done event: %v", err) + return + default: + if err != nil { + log.Printf("[ERROR] Tunnel: listen.Accept failed: %v", err) + continue + } + log.Printf("[DEBUG] Tunnel: client '%s' accepted", client.RemoteAddr()) + // Proxy bytes from one side to the other + go handleProxyClient(client, dialer) + } + } +} + +// handleProxyClient will open a connection using the dialer, and ensure close events propagate to the brokers +func handleProxyClient(clientConn net.Conn, dialer func() (net.Conn, error)) { + //We have a client connected, open an upstream connection to the destination + upstreamConn, err := dialer() + if err != nil { + log.Printf("[ERROR] Tunnel: failed to open connection to upstream: %v", err) + clientConn.Close() + return + } + + // channels to wait on the close event for each connection + serverClosed := make(chan struct{}, 1) + upstreamClosed := make(chan struct{}, 1) + + go brokerData(clientConn, upstreamConn, upstreamClosed) + go brokerData(upstreamConn, clientConn, serverClosed) + + // Now we wait for the connections to close and notify the other side of the event + select { + case <-upstreamClosed: + clientConn.Close() + <-serverClosed + case <-serverClosed: + upstreamConn.Close() + <-upstreamClosed + } + log.Printf("[DEBUG] Tunnel: client ('%s') proxy closed", clientConn.RemoteAddr()) +} + +// brokerData is responsible for copying data src => dest. It will also close the src when there are no more bytes to transfer +func brokerData(src net.Conn, dest net.Conn, srcClosed chan struct{}) { + _, err := io.Copy(src, dest) + if err != nil { + log.Printf("[ERROR] Tunnel: Copy error: %s", err) + } + if err := src.Close(); err != nil { + log.Printf("[ERROR] Tunnel: Close error: %s", err) + } + srcClosed <- struct{}{} +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/communicator.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/communicator.go new file mode 100644 index 000000000..11398c994 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/communicator.go @@ -0,0 +1,260 @@ +// Package winrm implements the WinRM communicator. Plugin maintainers should not +// import this package directly, instead using the tooling in the +// "packer-plugin-sdk/communicator" module. +package winrm + +import ( + "context" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/masterzen/winrm" + "github.com/packer-community/winrmcp/winrmcp" +) + +// Communicator represents the WinRM communicator +type Communicator struct { + config *Config + client *winrm.Client + endpoint *winrm.Endpoint +} + +// New creates a new communicator implementation over WinRM. +func New(config *Config) (*Communicator, error) { + endpoint := &winrm.Endpoint{ + Host: config.Host, + Port: config.Port, + HTTPS: config.Https, + Insecure: config.Insecure, + + /* + TODO + HTTPS: connInfo.HTTPS, + Insecure: connInfo.Insecure, + CACert: connInfo.CACert, + */ + } + + // Create the client + params := *winrm.DefaultParameters + + if config.TransportDecorator != nil { + params.TransportDecorator = config.TransportDecorator + } + + params.Timeout = formatDuration(config.Timeout) + client, err := winrm.NewClientWithParameters( + endpoint, config.Username, config.Password, ¶ms) + if err != nil { + return nil, err + } + + // Create the shell to verify the connection + log.Printf("[DEBUG] connecting to remote shell using WinRM") + shell, err := client.CreateShell() + if err != nil { + log.Printf("[ERROR] connection error: %s", err) + return nil, err + } + + if err := shell.Close(); err != nil { + log.Printf("[ERROR] error closing connection: %s", err) + return nil, err + } + + return &Communicator{ + config: config, + client: client, + endpoint: endpoint, + }, nil +} + +// Start implementation of communicator.Communicator interface +func (c *Communicator) Start(ctx context.Context, rc *packersdk.RemoteCmd) error { + shell, err := c.client.CreateShell() + if err != nil { + return err + } + + log.Printf("[INFO] starting remote command: %s", rc.Command) + cmd, err := shell.Execute(rc.Command) + if err != nil { + return err + } + + go runCommand(shell, cmd, rc) + return nil +} + +func runCommand(shell *winrm.Shell, cmd *winrm.Command, rc *packersdk.RemoteCmd) { + defer shell.Close() + var wg sync.WaitGroup + + copyFunc := func(w io.Writer, r io.Reader) { + defer wg.Done() + io.Copy(w, r) + } + + if rc.Stdout != nil && cmd.Stdout != nil { + wg.Add(1) + go copyFunc(rc.Stdout, cmd.Stdout) + } else { + log.Printf("[WARN] Failed to read stdout for command '%s'", rc.Command) + } + + if rc.Stderr != nil && cmd.Stderr != nil { + wg.Add(1) + go copyFunc(rc.Stderr, cmd.Stderr) + } else { + log.Printf("[WARN] Failed to read stderr for command '%s'", rc.Command) + } + + cmd.Wait() + wg.Wait() + + code := cmd.ExitCode() + log.Printf("[INFO] command '%s' exited with code: %d", rc.Command, code) + rc.SetExited(code) +} + +// Upload implementation of communicator.Communicator interface +func (c *Communicator) Upload(path string, input io.Reader, fi *os.FileInfo) error { + wcp, err := c.newCopyClient() + if err != nil { + return fmt.Errorf("Was unable to create winrm client: %s", err) + } + if strings.HasSuffix(path, `\`) { + // path is a directory + if fi != nil { + path += filepath.Base((*fi).Name()) + } else { + return fmt.Errorf("Was unable to infer file basename for upload.") + } + } + log.Printf("Uploading file to '%s'", path) + return wcp.Write(path, input) +} + +// UploadDir implementation of communicator.Communicator interface +func (c *Communicator) UploadDir(dst string, src string, exclude []string) error { + if !strings.HasSuffix(src, "/") { + dst = fmt.Sprintf("%s\\%s", dst, filepath.Base(src)) + } + log.Printf("Uploading dir '%s' to '%s'", src, dst) + wcp, err := c.newCopyClient() + if err != nil { + return err + } + return wcp.Copy(src, dst) +} + +func (c *Communicator) Download(src string, dst io.Writer) error { + client, err := c.newWinRMClient() + if err != nil { + return err + } + + encodeScript := `$file=[System.IO.File]::ReadAllBytes("%s"); Write-Output $([System.Convert]::ToBase64String($file))` + + base64DecodePipe := &Base64Pipe{w: dst} + + cmd := winrm.Powershell(fmt.Sprintf(encodeScript, src)) + _, err = client.Run(cmd, base64DecodePipe, ioutil.Discard) + + return err +} + +func (c *Communicator) DownloadDir(src string, dst string, exclude []string) error { + return fmt.Errorf("WinRM doesn't support download dir.") +} + +func (c *Communicator) getClientConfig() *winrmcp.Config { + return &winrmcp.Config{ + Auth: winrmcp.Auth{ + User: c.config.Username, + Password: c.config.Password, + }, + Https: c.config.Https, + Insecure: c.config.Insecure, + OperationTimeout: c.config.Timeout, + MaxOperationsPerShell: 15, // lowest common denominator + TransportDecorator: c.config.TransportDecorator, + } +} + +func (c *Communicator) newCopyClient() (*winrmcp.Winrmcp, error) { + addr := fmt.Sprintf("%s:%d", c.endpoint.Host, c.endpoint.Port) + clientConfig := c.getClientConfig() + return winrmcp.New(addr, clientConfig) +} + +func (c *Communicator) newWinRMClient() (*winrm.Client, error) { + conf := c.getClientConfig() + + // Shamelessly borrowed from the winrmcp client to ensure + // that the client is configured using the same defaulting behaviors that + // winrmcp uses even we we aren't using winrmcp. This ensures similar + // behavior between upload, download, and copy functions. We can't use the + // one generated by winrmcp because it isn't exported. + var endpoint *winrm.Endpoint + endpoint = &winrm.Endpoint{ + Host: c.endpoint.Host, + Port: c.endpoint.Port, + HTTPS: conf.Https, + Insecure: conf.Insecure, + TLSServerName: conf.TLSServerName, + CACert: conf.CACertBytes, + Timeout: conf.ConnectTimeout, + } + params := winrm.NewParameters( + winrm.DefaultParameters.Timeout, + winrm.DefaultParameters.Locale, + winrm.DefaultParameters.EnvelopeSize, + ) + + params.TransportDecorator = conf.TransportDecorator + params.Timeout = "PT3M" + + client, err := winrm.NewClientWithParameters( + endpoint, conf.Auth.User, conf.Auth.Password, params) + return client, err +} + +type Base64Pipe struct { + w io.Writer // underlying writer (file, buffer) +} + +func (d *Base64Pipe) ReadFrom(r io.Reader) (int64, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return 0, err + } + + var i int + i, err = d.Write(b) + + if err != nil { + return 0, err + } + + return int64(i), err +} + +func (d *Base64Pipe) Write(p []byte) (int, error) { + dst := make([]byte, base64.StdEncoding.DecodedLen(len(p))) + + decodedBytes, err := base64.StdEncoding.Decode(dst, p) + if err != nil { + return 0, err + } + + return d.w.Write(dst[0:decodedBytes]) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/config.go new file mode 100644 index 000000000..728336734 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/config.go @@ -0,0 +1,19 @@ +package winrm + +import ( + "time" + + "github.com/masterzen/winrm" +) + +// Config is used to configure the WinRM connection +type Config struct { + Host string + Port int + Username string + Password string + Timeout time.Duration + Https bool + Insecure bool + TransportDecorator func() winrm.Transporter +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/time.go b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/time.go new file mode 100644 index 000000000..f8fb6fe8d --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm/time.go @@ -0,0 +1,32 @@ +package winrm + +import ( + "fmt" + "time" +) + +// formatDuration formats the given time.Duration into an ISO8601 +// duration string. +func formatDuration(duration time.Duration) string { + // We're not supporting negative durations + if duration.Seconds() <= 0 { + return "PT0S" + } + + h := int(duration.Hours()) + m := int(duration.Minutes()) - (h * 60) + s := int(duration.Seconds()) - (h*3600 + m*60) + + res := "PT" + if h > 0 { + res = fmt.Sprintf("%s%dH", res, h) + } + if m > 0 { + res = fmt.Sprintf("%s%dM", res, m) + } + if s > 0 { + res = fmt.Sprintf("%s%dS", res, s) + } + + return res +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/communicator.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/communicator.go new file mode 100644 index 000000000..4c2cc0c98 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/communicator.go @@ -0,0 +1,73 @@ +package shell_local + +import ( + "context" + "fmt" + "io" + "log" + "os" + "os/exec" + "syscall" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" +) + +type Communicator struct { + ExecuteCommand []string +} + +func (c *Communicator) Start(ctx context.Context, cmd *packersdk.RemoteCmd) error { + if len(c.ExecuteCommand) == 0 { + return fmt.Errorf("Error launching command via shell-local communicator: No ExecuteCommand provided") + } + + // Build the local command to execute + log.Printf("[INFO] (shell-local communicator): Executing local shell command %s", c.ExecuteCommand) + localCmd := exec.CommandContext(ctx, c.ExecuteCommand[0], c.ExecuteCommand[1:]...) + localCmd.Stdin = cmd.Stdin + localCmd.Stdout = cmd.Stdout + localCmd.Stderr = cmd.Stderr + + // Start it. If it doesn't work, then error right away. + if err := localCmd.Start(); err != nil { + return err + } + + // We've started successfully. Start a goroutine to wait for + // it to complete and track exit status. + go func() { + var exitStatus int + err := localCmd.Wait() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitStatus = 1 + + // There is no process-independent way to get the REAL + // exit status so we just try to go deeper. + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitStatus = status.ExitStatus() + } + } + } + + cmd.SetExited(exitStatus) + }() + + return nil +} + +func (c *Communicator) Upload(string, io.Reader, *os.FileInfo) error { + return fmt.Errorf("upload not supported") +} + +func (c *Communicator) UploadDir(string, string, []string) error { + return fmt.Errorf("uploadDir not supported") +} + +func (c *Communicator) Download(string, io.Writer) error { + return fmt.Errorf("download not supported") +} + +func (c *Communicator) DownloadDir(string, string, []string) error { + return fmt.Errorf("downloadDir not supported") +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/config.go new file mode 100644 index 000000000..ea80cab38 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/config.go @@ -0,0 +1,237 @@ +//go:generate mapstructure-to-hcl2 -type Config + +package shell_local + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/shell" + configHelper "github.com/hashicorp/packer-plugin-sdk/template/config" + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" +) + +type Config struct { + shell.Provisioner `mapstructure:",squash"` + + // ** DEPRECATED: USE INLINE INSTEAD ** + // ** Only Present for backwards compatibility ** + // Command is the command to execute + Command string + + // The command used to execute the script. The '{{ .Path }}' variable + // should be used to specify where the script goes, {{ .Vars }} + // can be used to inject the environment_vars into the environment. + ExecuteCommand []string `mapstructure:"execute_command"` + + // The shebang value used when running inline scripts. + InlineShebang string `mapstructure:"inline_shebang"` + + // An array of multiple Runtime OSs to run on. + OnlyOn []string `mapstructure:"only_on"` + + // The file extension to use for the file generated from the inline commands + TempfileExtension string `mapstructure:"tempfile_extension"` + + // End dedupe with postprocessor + UseLinuxPathing bool `mapstructure:"use_linux_pathing"` + + // used to track the data sent to shell-local from the builder + // GeneratedData + + ctx interpolate.Context + generatedData map[string]interface{} +} + +func Decode(config *Config, raws ...interface{}) error { + err := configHelper.Decode(config, &configHelper.DecodeOpts{ + Interpolate: true, + InterpolateContext: &config.ctx, + InterpolateFilter: &interpolate.RenderFilter{ + Exclude: []string{ + "execute_command", + }, + }, + }, raws...) + if err != nil { + return fmt.Errorf("Error decoding config: %s", err) + // return fmt.Errorf("Error decoding config: %s, config is %#v, and raws is %#v", err, config, raws) + } + + return nil +} + +func Validate(config *Config) error { + var errs *packersdk.MultiError + + if runtime.GOOS == "windows" { + if len(config.ExecuteCommand) == 0 { + config.ExecuteCommand = []string{ + "cmd", + "/V", + "/C", + "{{.Vars}}", + "call", + "{{.Script}}", + } + } + if len(config.TempfileExtension) == 0 { + config.TempfileExtension = ".cmd" + } + } else { + if config.InlineShebang == "" { + config.InlineShebang = "/bin/sh -e" + } + if len(config.ExecuteCommand) == 0 { + config.ExecuteCommand = []string{ + "/bin/sh", + "-c", + "{{.Vars}} {{.Script}}", + } + } + } + + // Clean up input + if config.Inline != nil && len(config.Inline) == 0 { + config.Inline = make([]string, 0) + } + + if config.Scripts == nil { + config.Scripts = make([]string, 0) + } + + if config.Vars == nil { + config.Vars = make([]string, 0) + } + + // Verify that the user has given us a command to run + if config.Command == "" && len(config.Inline) == 0 && + len(config.Scripts) == 0 && config.Script == "" { + errs = packersdk.MultiErrorAppend(errs, + errors.New("Command, Inline, Script and Scripts options cannot all be empty.")) + } + + // Check that user hasn't given us too many commands to run + tooManyOptionsErr := errors.New("You may only specify one of the " + + "following options: Command, Inline, Script or Scripts. Please" + + " consolidate these options in your config.") + + if config.Command != "" { + if len(config.Inline) != 0 || len(config.Scripts) != 0 || config.Script != "" { + errs = packersdk.MultiErrorAppend(errs, tooManyOptionsErr) + } else { + config.Inline = []string{config.Command} + } + } + + if config.Script != "" { + if len(config.Scripts) > 0 || len(config.Inline) > 0 { + errs = packersdk.MultiErrorAppend(errs, tooManyOptionsErr) + } else { + config.Scripts = []string{config.Script} + } + } + + if len(config.Scripts) > 0 && config.Inline != nil { + errs = packersdk.MultiErrorAppend(errs, tooManyOptionsErr) + } + + // Check that all scripts we need to run exist locally + for _, path := range config.Scripts { + if _, err := os.Stat(path); err != nil { + errs = packersdk.MultiErrorAppend(errs, + fmt.Errorf("Bad script '%s': %s", path, err)) + } + } + + // Check for properly formatted go os types + supportedSyslist := []string{"darwin", "freebsd", "linux", "openbsd", "solaris", "windows"} + if len(config.OnlyOn) > 0 { + for _, provided_os := range config.OnlyOn { + supported_os := false + for _, go_os := range supportedSyslist { + if provided_os == go_os { + supported_os = true + break + } + } + if supported_os != true { + return fmt.Errorf("Invalid OS specified in only_on: '%s'\n"+ + "Supported OS names: %s", provided_os, strings.Join(supportedSyslist, ", ")) + } + } + } + + if config.UseLinuxPathing { + for index, script := range config.Scripts { + scriptAbsPath, err := filepath.Abs(script) + if err != nil { + return fmt.Errorf("Error converting %s to absolute path: %s", script, err.Error()) + } + converted, err := ConvertToLinuxPath(scriptAbsPath) + if err != nil { + return err + } + config.Scripts[index] = converted + } + // Interoperability issues with WSL makes creating and running tempfiles + // via golang's os package basically impossible. + if len(config.Inline) > 0 { + errs = packersdk.MultiErrorAppend(errs, + fmt.Errorf("Packer is unable to use the Command and Inline "+ + "features with the Windows Linux Subsystem. Please use "+ + "the Script or Scripts options instead")) + } + } + + if config.EnvVarFormat == "" { + if (runtime.GOOS == "windows") && !config.UseLinuxPathing { + config.EnvVarFormat = "set %s=%s && " + } else { + config.EnvVarFormat = "%s='%s' " + } + } + + // drop unnecessary "." in extension; we add this later. + if config.TempfileExtension != "" { + if strings.HasPrefix(config.TempfileExtension, ".") { + config.TempfileExtension = config.TempfileExtension[1:] + } + } + + // Do a check for bad environment variables, such as '=foo', 'foobar' + for _, kv := range config.Vars { + vs := strings.SplitN(kv, "=", 2) + if len(vs) != 2 || vs[0] == "" { + errs = packersdk.MultiErrorAppend(errs, + fmt.Errorf("Environment variable not in format 'key=value': %s", kv)) + } + } + + if errs != nil && len(errs.Errors) > 0 { + return errs + } + + return nil +} + +// C:/path/to/your/file becomes /mnt/c/path/to/your/file +func ConvertToLinuxPath(winAbsPath string) (string, error) { + // get absolute path of script, and morph it into the bash path + winAbsPath = strings.Replace(winAbsPath, "\\", "/", -1) + splitPath := strings.SplitN(winAbsPath, ":/", 2) + if len(splitPath) == 2 { + winBashPath := fmt.Sprintf("/mnt/%s/%s", strings.ToLower(splitPath[0]), splitPath[1]) + return winBashPath, nil + } else { + err := fmt.Errorf("There was an error splitting your absolute path; expected "+ + "to find a drive following the format ':/' but did not: absolute "+ + "path: %s", winAbsPath) + return "", err + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/config.hcl2spec.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/config.hcl2spec.go new file mode 100644 index 000000000..33b1265d5 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/config.hcl2spec.go @@ -0,0 +1,69 @@ +// Code generated by "mapstructure-to-hcl2 -type Config"; DO NOT EDIT. + +package shell_local + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// FlatConfig is an auto-generated flat version of Config. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatConfig struct { + PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"` + PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"` + PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"` + PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"` + PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"` + PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"` + PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"` + PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"` + Inline []string `cty:"inline" hcl:"inline"` + Script *string `cty:"script" hcl:"script"` + Scripts []string `cty:"scripts" hcl:"scripts"` + ValidExitCodes []int `mapstructure:"valid_exit_codes" cty:"valid_exit_codes" hcl:"valid_exit_codes"` + Vars []string `mapstructure:"environment_vars" cty:"environment_vars" hcl:"environment_vars"` + EnvVarFormat *string `mapstructure:"env_var_format" cty:"env_var_format" hcl:"env_var_format"` + Command *string `cty:"command" hcl:"command"` + ExecuteCommand []string `mapstructure:"execute_command" cty:"execute_command" hcl:"execute_command"` + InlineShebang *string `mapstructure:"inline_shebang" cty:"inline_shebang" hcl:"inline_shebang"` + OnlyOn []string `mapstructure:"only_on" cty:"only_on" hcl:"only_on"` + TempfileExtension *string `mapstructure:"tempfile_extension" cty:"tempfile_extension" hcl:"tempfile_extension"` + UseLinuxPathing *bool `mapstructure:"use_linux_pathing" cty:"use_linux_pathing" hcl:"use_linux_pathing"` +} + +// FlatMapstructure returns a new FlatConfig. +// FlatConfig is an auto-generated flat version of Config. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatConfig) +} + +// HCL2Spec returns the hcl spec of a Config. +// This spec is used by HCL to read the fields of Config. +// The decoded values from this spec will then be applied to a FlatConfig. +func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false}, + "packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false}, + "packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false}, + "packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false}, + "packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false}, + "packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false}, + "packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false}, + "packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false}, + "inline": &hcldec.AttrSpec{Name: "inline", Type: cty.List(cty.String), Required: false}, + "script": &hcldec.AttrSpec{Name: "script", Type: cty.String, Required: false}, + "scripts": &hcldec.AttrSpec{Name: "scripts", Type: cty.List(cty.String), Required: false}, + "valid_exit_codes": &hcldec.AttrSpec{Name: "valid_exit_codes", Type: cty.List(cty.Number), Required: false}, + "environment_vars": &hcldec.AttrSpec{Name: "environment_vars", Type: cty.List(cty.String), Required: false}, + "env_var_format": &hcldec.AttrSpec{Name: "env_var_format", Type: cty.String, Required: false}, + "command": &hcldec.AttrSpec{Name: "command", Type: cty.String, Required: false}, + "execute_command": &hcldec.AttrSpec{Name: "execute_command", Type: cty.List(cty.String), Required: false}, + "inline_shebang": &hcldec.AttrSpec{Name: "inline_shebang", Type: cty.String, Required: false}, + "only_on": &hcldec.AttrSpec{Name: "only_on", Type: cty.List(cty.String), Required: false}, + "tempfile_extension": &hcldec.AttrSpec{Name: "tempfile_extension", Type: cty.String, Required: false}, + "use_linux_pathing": &hcldec.AttrSpec{Name: "use_linux_pathing", Type: cty.Bool, Required: false}, + } + return s +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/doc.go new file mode 100644 index 000000000..1c7a21ead --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/doc.go @@ -0,0 +1,14 @@ +/* +Package shell-local is designed to make it easier to shell out locally on the +machine running Packer. The top level tools in this package are probably not +relevant to plugin maintainers, as they are implementation details shared +between the HashiCorp-maintained shell-local provisioner and shell-local +post-processor. + +However, the localexec sub-package can be used in any plugins that need local +shell access, whether that is in a driver for a hypervisor, or a command to a +third party cli tool. Please make sure that any third party tool dependencies +are noted in your plugin's documentation. +*/ + +package shell_local diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/localexec/run_and_stream.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/localexec/run_and_stream.go new file mode 100644 index 000000000..b4c595309 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/localexec/run_and_stream.go @@ -0,0 +1,113 @@ +package localexec + +import ( + "fmt" + "io" + "log" + "os/exec" + "regexp" + "strings" + "sync" + "syscall" + + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/mitchellh/iochan" +) + +// RunAndStream allows you to run a local command and stream output to the UI. +// This does not require use of a shell-local communicator, so is a nice tool +// for plugins that need to shell out to a local dependency and provide clear +// output to users. +func RunAndStream(cmd *exec.Cmd, ui packersdk.Ui, sensitive []string) error { + stdout_r, stdout_w := io.Pipe() + stderr_r, stderr_w := io.Pipe() + defer stdout_w.Close() + defer stderr_w.Close() + + // Scrub any sensitive values from being printed to Packer ui. + packersdk.LogSecretFilter.Set(sensitive...) + + args := make([]string, len(cmd.Args)-1) + copy(args, cmd.Args[1:]) + + log.Printf("Executing: %s %v", cmd.Path, args) + cmd.Stdout = stdout_w + cmd.Stderr = stderr_w + if err := cmd.Start(); err != nil { + return err + } + + // Create the channels we'll use for data + exitCh := make(chan int, 1) + stdoutCh := iochan.DelimReader(stdout_r, '\n') + stderrCh := iochan.DelimReader(stderr_r, '\n') + + // Start the goroutine to watch for the exit + go func() { + defer stdout_w.Close() + defer stderr_w.Close() + exitStatus := 0 + + err := cmd.Wait() + if exitErr, ok := err.(*exec.ExitError); ok { + exitStatus = 1 + + // There is no process-independent way to get the REAL + // exit status so we just try to go deeper. + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitStatus = status.ExitStatus() + } + } + + exitCh <- exitStatus + }() + + // This waitgroup waits for the streaming to end + var streamWg sync.WaitGroup + streamWg.Add(2) + + streamFunc := func(ch <-chan string) { + defer streamWg.Done() + + for data := range ch { + data = cleanOutputLine(data) + if data != "" { + ui.Message(data) + } + } + } + + // Stream stderr/stdout + go streamFunc(stderrCh) + go streamFunc(stdoutCh) + + // Wait for the process to end and then wait for the streaming to end + exitStatus := <-exitCh + streamWg.Wait() + + if exitStatus != 0 { + return fmt.Errorf("Bad exit status: %d", exitStatus) + } + + return nil +} + +// cleanOutputLine cleans up a line so that '\r' don't muck up the +// UI output when we're reading from a remote command. +func cleanOutputLine(line string) string { + // Build a regular expression that will get rid of shell codes + re := regexp.MustCompile("(?i)\x1b\\[([0-9]{1,2}(;[0-9]{1,2})?)?[a|b|m|k]") + line = re.ReplaceAllString(line, "") + + // Trim surrounding whitespace + line = strings.TrimSpace(line) + + // Trim up to the first carriage return, since that text would be + // lost anyways. + idx := strings.LastIndex(line, "\r") + if idx > -1 { + line = line[idx+1:] + } + + return line +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/run.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/run.go new file mode 100644 index 000000000..a82f4ba7d --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shell-local/run.go @@ -0,0 +1,220 @@ +package shell_local + +import ( + "bufio" + "context" + "fmt" + "log" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + + "github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" + "github.com/hashicorp/packer-plugin-sdk/tmp" +) + +func Run(ctx context.Context, ui packersdk.Ui, config *Config, generatedData map[string]interface{}) (bool, error) { + if generatedData != nil { + config.generatedData = generatedData + } else { + // No fear; probably just in the post-processor, not provisioner. + // Make sure it's not a nil map so we can assign to it later. + config.generatedData = make(map[string]interface{}) + } + config.ctx.Data = generatedData + // Check if shell-local can even execute against this runtime OS + if len(config.OnlyOn) > 0 { + runCommand := false + for _, os := range config.OnlyOn { + if os == runtime.GOOS { + runCommand = true + break + } + } + if !runCommand { + ui.Say(fmt.Sprintf("Skipping shell-local due to runtime OS")) + log.Printf("[INFO] (shell-local): skipping shell-local due to missing runtime OS") + return true, nil + } + } + + scripts := make([]string, len(config.Scripts)) + if len(config.Scripts) > 0 { + copy(scripts, config.Scripts) + } else if config.Inline != nil { + // If we have an inline script, then turn that into a temporary + // shell script and use that. + tempScriptFileName, err := createInlineScriptFile(config) + if err != nil { + return false, err + } + + // figure out what extension the file should have, and rename it. + if config.TempfileExtension != "" { + os.Rename(tempScriptFileName, fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension)) + tempScriptFileName = fmt.Sprintf("%s.%s", tempScriptFileName, config.TempfileExtension) + } + + scripts = append(scripts, tempScriptFileName) + + defer os.Remove(tempScriptFileName) + } + + // Create environment variables to set before executing the command + flattenedEnvVars, err := createFlattenedEnvVars(config) + if err != nil { + return false, err + } + + for _, script := range scripts { + // use absolute path in case the script is linked with forward slashes + // on windows. + absScript, err := filepath.Abs(script) + if err != nil { + return false, fmt.Errorf( + "Error executing script: %s\n%v\n", + absScript, + err, + ) + } + interpolatedCmds, err := createInterpolatedCommands(config, absScript, flattenedEnvVars) + if err != nil { + return false, err + } + ui.Say(fmt.Sprintf("Running local shell script: %s", script)) + + comm := &Communicator{ + ExecuteCommand: interpolatedCmds, + } + + // The remoteCmd generated here isn't actually run, but it allows us to + // use the same interafce for the shell-local communicator as we use for + // the other communicators; ultimately, this command is just used for + // buffers and for reading the final exit status. + flattenedCmd := strings.Join(interpolatedCmds, " ") + cmd := &packersdk.RemoteCmd{Command: flattenedCmd} + log.Printf("[INFO] (shell-local): starting local command: %s", flattenedCmd) + if err := cmd.RunWithUi(ctx, comm, ui); err != nil { + return false, fmt.Errorf( + "Error executing script: %s\n\n"+ + "Please see output above for more information.", + absScript) + } + + if err := config.ValidExitCode(cmd.ExitStatus()); err != nil { + return false, err + } + } + + return true, nil +} + +func createInlineScriptFile(config *Config) (string, error) { + tf, err := tmp.File("packer-shell") + if err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + defer tf.Close() + // Write our contents to it + writer := bufio.NewWriter(tf) + if config.InlineShebang != "" { + shebang := fmt.Sprintf("#!%s\n", config.InlineShebang) + log.Printf("[INFO] (shell-local): Prepending inline script with %s", shebang) + writer.WriteString(shebang) + } + + for _, command := range config.Inline { + // interpolate command to check for template variables. + command, err := interpolate.Render(command, &config.ctx) + if err != nil { + return "", err + } + + if _, err := writer.WriteString(command + "\n"); err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + } + + if err := writer.Flush(); err != nil { + return "", fmt.Errorf("Error preparing shell script: %s", err) + } + + err = os.Chmod(tf.Name(), 0700) + if err != nil { + log.Printf("[ERROR] (shell-local): error modifying permissions of temp script file: %s", err.Error()) + } + return tf.Name(), nil +} + +// Generates the final command to send to the communicator, using either the +// user-provided ExecuteCommand or defaulting to something that makes sense for +// the host OS +func createInterpolatedCommands(config *Config, script string, flattenedEnvVars string) ([]string, error) { + config.generatedData["Vars"] = flattenedEnvVars + config.generatedData["Script"] = script + config.generatedData["Command"] = script + + config.ctx.Data = config.generatedData + + interpolatedCmds := make([]string, len(config.ExecuteCommand)) + for i, cmd := range config.ExecuteCommand { + interpolatedCmd, err := interpolate.Render(cmd, &config.ctx) + if err != nil { + return nil, fmt.Errorf("Error processing command: %s", err) + } + interpolatedCmds[i] = interpolatedCmd + } + return interpolatedCmds, nil +} + +func createFlattenedEnvVars(config *Config) (string, error) { + flattened := "" + envVars := make(map[string]string) + + // Always available Packer provided env vars + envVars["PACKER_BUILD_NAME"] = config.PackerBuildName + envVars["PACKER_BUILDER_TYPE"] = config.PackerBuilderType + + // expose ip address variables + httpAddr := config.generatedData["PackerHTTPAddr"] + if httpAddr != nil && httpAddr != commonsteps.HttpAddrNotImplemented { + envVars["PACKER_HTTP_ADDR"] = httpAddr.(string) + } + httpIP := config.generatedData["PackerHTTPIP"] + if httpIP != nil && httpIP != commonsteps.HttpIPNotImplemented { + envVars["PACKER_HTTP_IP"] = httpIP.(string) + } + httpPort := config.generatedData["PackerHTTPPort"] + if httpPort != nil && httpPort != commonsteps.HttpPortNotImplemented { + envVars["PACKER_HTTP_PORT"] = httpPort.(string) + } + + // Split vars into key/value components + for _, envVar := range config.Vars { + envVar, err := interpolate.Render(envVar, &config.ctx) + if err != nil { + return "", err + } + // Split vars into key/value components + keyValue := strings.SplitN(envVar, "=", 2) + // Store pair, replacing any single quotes in value so they parse + // correctly with required environment variable format + envVars[keyValue[0]] = strings.Replace(keyValue[1], "'", `'"'"'`, -1) + } + + // Create a list of env var keys in sorted order + var keys []string + for k := range envVars { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + flattened += fmt.Sprintf(config.EnvVarFormat, key, envVars[key]) + } + return flattened, nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shell/exit_code.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shell/exit_code.go new file mode 100644 index 000000000..815e92f88 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shell/exit_code.go @@ -0,0 +1,39 @@ +package shell + +import "fmt" + +func (p *Provisioner) ValidExitCode(code int) error { + // Check exit code against allowed codes (likely just 0) + validCodes := p.ValidExitCodes + if len(validCodes) == 0 { + validCodes = []int{0} + } + validExitCode := false + for _, v := range validCodes { + if code == v { + validExitCode = true + break + } + } + if !validExitCode { + return &ErrorInvalidExitCode{ + Code: code, + Allowed: validCodes, + } + } + return nil +} + +type ErrorInvalidExitCode struct { + Code int + Allowed []int +} + +func (e *ErrorInvalidExitCode) Error() string { + if e == nil { + return "" + } + return fmt.Sprintf("Script exited with non-zero exit status: %d."+ + "Allowed exit codes are: %v", + e.Code, e.Allowed) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shell/shell.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shell/shell.go new file mode 100644 index 000000000..180955cbf --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shell/shell.go @@ -0,0 +1,62 @@ +// Package shell defines configuration fields that are common to many different +// kinds of shell. For example, this common configuration is imported by the +// "shell", "shell-local", and "powershell" provisioners. This provides +// consistency in the user experience and prevents provisioner maintainers from +// having to reimplement common useful functions across various environments. +package shell + +import "github.com/hashicorp/packer-plugin-sdk/common" + +// Provisioner contains common fields to all shell provisioners. +// It is provided as a convenience to encourage plugin developers to +// consider implementing these options, which we believe are valuable for all +// shell-type provisioners. It also helps guarantee that option names for +// similar options are the same across the various shell provisioners. +// Validation and defaulting are left to the maintainer because appropriate +// values and defaults will be different depending on which shell is used. +// To use the Provisioner struct, embed it in your shell provisioner's config +// using the `mapstructure:",squash"` struct tag. Examples can be found in the +// HashiCorp-maintained "shell", "shell-local", "windows-shell" and "powershell" +// provisioners. +type Provisioner struct { + common.PackerConfig `mapstructure:",squash"` + + // An inline script to execute. Multiple strings are all executed + // in the context of a single shell. + Inline []string + + // The local path of the shell script to upload and execute. + Script string + + // An array of multiple scripts to run. + Scripts []string + + // Valid Exit Codes - 0 is not always the only valid error code! See + // http://www.symantec.com/connect/articles/windows-system-error-codes-exit-codes-description + // for examples such as 3010 - "The requested operation is successful. + ValidExitCodes []int `mapstructure:"valid_exit_codes"` + + // An array of environment variables that will be injected before + // your command(s) are executed. + Vars []string `mapstructure:"environment_vars"` + + // This is used in the template generation to format environment variables + // inside the `ExecuteCommand` template. + EnvVarFormat string `mapstructure:"env_var_format"` +} + +type ProvisionerRemoteSpecific struct { + // If true, the script contains binary and line endings will not be + // converted from Windows to Unix-style. + Binary bool + + // The remote path where the local shell script will be uploaded to. + // This should be set to a writable file that is in a pre-existing directory. + // This defaults to remote_folder/remote_file + RemotePath string `mapstructure:"remote_path"` + + // The command used to execute the script. The '{{ .Path }}' variable + // should be used to specify where the script goes, {{ .Vars }} + // can be used to inject the environment_vars into the environment. + ExecuteCommand string `mapstructure:"execute_command"` +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/shutdowncommand/config.go b/vendor/github.com/hashicorp/packer-plugin-sdk/shutdowncommand/config.go new file mode 100644 index 000000000..81caca725 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/shutdowncommand/config.go @@ -0,0 +1,43 @@ +//go:generate struct-markdown + +//Package shutdowncommand is a helper module for builder plugin configuration. +package shutdowncommand + +import ( + "time" + + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" +) + +// ShutdownConfig defines implementation details for shutting down a VM once it +// is done being provisioned. +// +// It is provided as a convenience to encourage builder developers to +// consider implementing these options, which we believe are valuable for all +// builders. It also helps guarantee that option names for similar options +// are the same across the various builders. Embed it in your builder config +// using the `mapstructure:",squash"` struct tag. +type ShutdownConfig struct { + // The command to use to gracefully shut down the machine once all + // provisioning is complete. By default this is an empty string, which + // tells Packer to just forcefully shut down the machine. This setting can + // be safely omitted if for example, a shutdown command to gracefully halt + // the machine is configured inside a provisioning script. If one or more + // scripts require a reboot it is suggested to leave this blank (since + // reboots may fail) and instead specify the final shutdown command in your + // last script. + ShutdownCommand string `mapstructure:"shutdown_command" required:"false"` + // The amount of time to wait after executing the shutdown_command for the + // virtual machine to actually shut down. If the machine doesn't shut down + // in this time it is considered an error. By default, the time out is "5m" + // (five minutes). + ShutdownTimeout time.Duration `mapstructure:"shutdown_timeout" required:"false"` +} + +func (c *ShutdownConfig) Prepare(ctx *interpolate.Context) []error { + if c.ShutdownTimeout == 0 { + c.ShutdownTimeout = 5 * time.Minute + } + + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/custom_types.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/custom_types.go new file mode 100644 index 000000000..398f24dd4 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/custom_types.go @@ -0,0 +1,142 @@ +//go:generate mapstructure-to-hcl2 -type KeyValue,KeyValues,KeyValueFilter,NameValue,NameValues,NameValueFilter +package config + +import ( + "strconv" +) + +type Trilean uint8 + +const ( + // This will assign unset to 0, which is the default value in interpolation + TriUnset Trilean = iota + TriTrue + TriFalse +) + +func (t Trilean) ToString() string { + if t == TriTrue { + return "TriTrue" + } else if t == TriFalse { + return "TriFalse" + } + return "TriUnset" +} + +func (t Trilean) ToBoolPointer() *bool { + if t == TriTrue { + return boolPointer(true) + } else if t == TriFalse { + return boolPointer(false) + } + return nil +} + +func (t Trilean) True() bool { + if t == TriTrue { + return true + } + return false +} + +func (t Trilean) False() bool { + if t == TriFalse { + return true + } + return false +} + +func TrileanFromString(s string) (Trilean, error) { + if s == "" { + return TriUnset, nil + } + + b, err := strconv.ParseBool(s) + if err != nil { + return TriUnset, err + } else if b == true { + return TriTrue, nil + } else { + return TriFalse, nil + } +} + +func TrileanFromBool(b bool) Trilean { + if b { + return TriTrue + } + return TriFalse +} + +func boolPointer(b bool) *bool { + return &b +} + +// These are used to convert HCL blocks to key-value pairs +type KeyValue struct { + Key string + Value string +} + +type KeyValues []KeyValue + +func (kvs KeyValues) CopyOn(to *map[string]string) []error { + if len(kvs) == 0 { + return nil + } + if *to == nil { + *to = map[string]string{} + } + for _, kv := range kvs { + (*to)[kv.Key] = kv.Value + } + return nil +} + +type KeyValueFilter struct { + Filters map[string]string + Filter KeyValues +} + +func (kvf *KeyValueFilter) Prepare() []error { + kvf.Filter.CopyOn(&kvf.Filters) + return nil +} + +func (kvf *KeyValueFilter) Empty() bool { + return len(kvf.Filters) == 0 +} + +type NameValue struct { + Name string + Value string +} + +type NameValues []NameValue + +func (nvs NameValues) CopyOn(to *map[string]string) []error { + if len(nvs) == 0 { + return nil + } + if *to == nil { + *to = map[string]string{} + } + for _, kv := range nvs { + (*to)[kv.Name] = kv.Value + } + return nil +} + +type NameValueFilter struct { + Filters map[string]string + Filter NameValues +} + +func (nvf *NameValueFilter) Prepare() []error { + nvf.Filter.CopyOn(&nvf.Filters) + return nil +} + +func (nvf *NameValueFilter) Empty() bool { + return len(nvf.Filters) == 0 +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/custom_types.hcl2spec.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/custom_types.hcl2spec.go new file mode 100644 index 000000000..d438b16f0 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/custom_types.hcl2spec.go @@ -0,0 +1,108 @@ +// Code generated by "mapstructure-to-hcl2 -type KeyValue,KeyValues,KeyValueFilter,NameValue,NameValues,NameValueFilter"; DO NOT EDIT. + +package config + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// FlatKeyValue is an auto-generated flat version of KeyValue. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatKeyValue struct { + Key *string `cty:"key" hcl:"key"` + Value *string `cty:"value" hcl:"value"` +} + +// FlatMapstructure returns a new FlatKeyValue. +// FlatKeyValue is an auto-generated flat version of KeyValue. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*KeyValue) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatKeyValue) +} + +// HCL2Spec returns the hcl spec of a KeyValue. +// This spec is used by HCL to read the fields of KeyValue. +// The decoded values from this spec will then be applied to a FlatKeyValue. +func (*FlatKeyValue) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "key": &hcldec.AttrSpec{Name: "key", Type: cty.String, Required: false}, + "value": &hcldec.AttrSpec{Name: "value", Type: cty.String, Required: false}, + } + return s +} + +// FlatKeyValueFilter is an auto-generated flat version of KeyValueFilter. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatKeyValueFilter struct { + Filters map[string]string `cty:"filters" hcl:"filters"` + Filter []FlatKeyValue `cty:"filter" hcl:"filter"` +} + +// FlatMapstructure returns a new FlatKeyValueFilter. +// FlatKeyValueFilter is an auto-generated flat version of KeyValueFilter. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*KeyValueFilter) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatKeyValueFilter) +} + +// HCL2Spec returns the hcl spec of a KeyValueFilter. +// This spec is used by HCL to read the fields of KeyValueFilter. +// The decoded values from this spec will then be applied to a FlatKeyValueFilter. +func (*FlatKeyValueFilter) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "filters": &hcldec.AttrSpec{Name: "filters", Type: cty.Map(cty.String), Required: false}, + "filter": &hcldec.BlockListSpec{TypeName: "filter", Nested: hcldec.ObjectSpec((*FlatKeyValue)(nil).HCL2Spec())}, + } + return s +} + +// FlatNameValue is an auto-generated flat version of NameValue. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatNameValue struct { + Name *string `cty:"name" hcl:"name"` + Value *string `cty:"value" hcl:"value"` +} + +// FlatMapstructure returns a new FlatNameValue. +// FlatNameValue is an auto-generated flat version of NameValue. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*NameValue) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatNameValue) +} + +// HCL2Spec returns the hcl spec of a NameValue. +// This spec is used by HCL to read the fields of NameValue. +// The decoded values from this spec will then be applied to a FlatNameValue. +func (*FlatNameValue) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "name": &hcldec.AttrSpec{Name: "name", Type: cty.String, Required: false}, + "value": &hcldec.AttrSpec{Name: "value", Type: cty.String, Required: false}, + } + return s +} + +// FlatNameValueFilter is an auto-generated flat version of NameValueFilter. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatNameValueFilter struct { + Filters map[string]string `cty:"filters" hcl:"filters"` + Filter []FlatNameValue `cty:"filter" hcl:"filter"` +} + +// FlatMapstructure returns a new FlatNameValueFilter. +// FlatNameValueFilter is an auto-generated flat version of NameValueFilter. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*NameValueFilter) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatNameValueFilter) +} + +// HCL2Spec returns the hcl spec of a NameValueFilter. +// This spec is used by HCL to read the fields of NameValueFilter. +// The decoded values from this spec will then be applied to a FlatNameValueFilter. +func (*FlatNameValueFilter) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "filters": &hcldec.AttrSpec{Name: "filters", Type: cty.Map(cty.String), Required: false}, + "filter": &hcldec.BlockListSpec{TypeName: "filter", Nested: hcldec.ObjectSpec((*FlatNameValue)(nil).HCL2Spec())}, + } + return s +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/decode.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/decode.go new file mode 100644 index 000000000..96b5a553a --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/decode.go @@ -0,0 +1,329 @@ +package config + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" + "github.com/mitchellh/mapstructure" + "github.com/ryanuber/go-glob" + "github.com/zclconf/go-cty/cty" + "github.com/zclconf/go-cty/cty/gocty" + ctyjson "github.com/zclconf/go-cty/cty/json" +) + +// DecodeOpts are the options for decoding configuration. +type DecodeOpts struct { + // Metadata, if non-nil, will be set to the metadata post-decode + Metadata *mapstructure.Metadata + + // Interpolate, if true, will automatically interpolate the + // configuration with the given InterpolateContext. User variables + // will be automatically detected and added in-place to the given + // context. + Interpolate bool + InterpolateContext *interpolate.Context + InterpolateFilter *interpolate.RenderFilter + + // PluginType is the BuilderID, etc of the plugin -- it is used to + // determine whether to tell the user to "fix" their template if an + // unknown option is a deprecated one for this plugin type. + PluginType string + + DecodeHooks []mapstructure.DecodeHookFunc +} + +var DefaultDecodeHookFuncs = []mapstructure.DecodeHookFunc{ + uint8ToStringHook, + stringToTrilean, + mapstructure.StringToSliceHookFunc(","), + mapstructure.StringToTimeDurationHookFunc(), +} + +// Decode decodes the configuration into the target and optionally +// automatically interpolates all the configuration as it goes. +func Decode(target interface{}, config *DecodeOpts, raws ...interface{}) error { + // loop over raws once to get cty values from hcl, if that's a thing. + for i, raw := range raws { + // check for cty values and transform them to json then to a + // map[string]interface{} so that mapstructure can do its thing. + cval, ok := raw.(cty.Value) + if !ok { + continue + } + type flatConfigurer interface { + FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } + } + ctarget := target.(flatConfigurer) + flatCfg := ctarget.FlatMapstructure() + err := gocty.FromCtyValue(cval, flatCfg) + if err != nil { + switch err := err.(type) { + case cty.PathError: + return fmt.Errorf("%v: %v", err, err.Path) + } + return err + } + b, err := ctyjson.SimpleJSONValue{Value: cval}.MarshalJSON() + if err != nil { + return err + } + var raw map[string]interface{} + if err := json.Unmarshal(b, &raw); err != nil { + return err + } + raws[i] = raw + { + // reset target to zero. + // In HCL2, we need to prepare provisioners/post-processors after a + // builder has started in order to have build values correctly + // extrapolated. Packer plugins have never been prepared twice in + // the past and some of them set fields during their Validation + // steps; which end up in an invalid provisioner/post-processor, + // like in [GH-9596]. This ensures Packer plugin will be reset + // right before we Prepare them. + p := reflect.ValueOf(target).Elem() + p.Set(reflect.Zero(p.Type())) + } + } + + // Now perform the normal decode. + + if config == nil { + config = &DecodeOpts{Interpolate: true} + } + + // Detect user variables from the raws and merge them into our context + ctxData, raws := DetectContextData(raws...) + + // Interpolate first + if config.Interpolate { + ctx, err := DetectContext(raws...) + if err != nil { + return err + } + if config.InterpolateContext == nil { + config.InterpolateContext = ctx + } else { + config.InterpolateContext.BuildName = ctx.BuildName + config.InterpolateContext.BuildType = ctx.BuildType + config.InterpolateContext.CorePackerVersionString = ctx.CorePackerVersionString + config.InterpolateContext.TemplatePath = ctx.TemplatePath + config.InterpolateContext.UserVariables = ctx.UserVariables + if config.InterpolateContext.Data == nil { + config.InterpolateContext.Data = ctxData + } + } + ctx = config.InterpolateContext + + // Render everything + for i, raw := range raws { + m, err := interpolate.RenderMap(raw, ctx, config.InterpolateFilter) + if err != nil { + return err + } + + raws[i] = m + } + } + + decodeHookFuncs := DefaultDecodeHookFuncs + if len(config.DecodeHooks) != 0 { + decodeHookFuncs = config.DecodeHooks + } + + // Build our decoder + var md mapstructure.Metadata + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: target, + Metadata: &md, + WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc(decodeHookFuncs...), + }) + if err != nil { + return err + } + + // In practice, raws is two interfaces: one containing all the packer config + // vars, and one containing the raw json configuration for a single + // plugin. + for _, raw := range raws { + if err := decoder.Decode(raw); err != nil { + return err + } + } + + // If we have unused keys, it is an error + if len(md.Unused) > 0 { + var err error + sort.Strings(md.Unused) + for _, unused := range md.Unused { + if unused == "type" || strings.HasPrefix(unused, "packer_") { + continue + } + + // Check for whether the key is handled in a packer fix + // call. + fixable := false + + // check whether the deprecation option can be fixed using packer fix. + if config.PluginType != "" { + for k, deprecatedOptions := range DeprecatedOptions { + // the deprecated options keys are globbable, for + // example "amazon*" for all amazon builders, or * for + // all builders + if glob.Glob(k, config.PluginType) { + for _, deprecatedOption := range deprecatedOptions { + if unused == deprecatedOption { + fixable = true + break + } + } + } + if fixable == true { + break + } + } + } + + unusedErr := fmt.Errorf("unknown configuration key: '%q'", + unused) + + if fixable { + unusedErr = fmt.Errorf("Deprecated configuration key: '%s'."+ + " Please call `packer fix` against your template to "+ + "update your template to be compatible with the current "+ + "version of Packer. Visit "+ + "https://www.packer.io/docs/commands/fix/ for more detail.", + unused) + } + + err = multierror.Append(err, unusedErr) + } + if err != nil { + return err + } + } + + // Set the metadata if it is set + if config.Metadata != nil { + *config.Metadata = md + } + + return nil +} + +func DetectContextData(raws ...interface{}) (map[interface{}]interface{}, []interface{}) { + // In provisioners, the last value pulled from raws is the placeholder data + // for build-specific variables. Pull these out to add to interpolation + // context. + if len(raws) == 0 { + return nil, raws + } + + // Internally, our tests may cause this to be read as a map[string]string + placeholderData := raws[len(raws)-1] + if pd, ok := placeholderData.(map[string]string); ok { + if uuid, ok := pd["PackerRunUUID"]; ok { + if strings.Contains(uuid, "Build_PackerRunUUID.") { + cast := make(map[interface{}]interface{}) + for k, v := range pd { + cast[k] = v + } + raws = raws[:len(raws)-1] + return cast, raws + } + } + } + + // but with normal interface conversion across the rpc, it'll look like a + // map[interface]interface, not a map[string]string + if pd, ok := placeholderData.(map[interface{}]interface{}); ok { + if uuid, ok := pd["PackerRunUUID"]; ok { + if strings.Contains(uuid.(string), "Build_PackerRunUUID.") { + raws = raws[:len(raws)-1] + return pd, raws + } + } + } + + return nil, raws +} + +// DetectContext builds a base interpolate.Context, automatically +// detecting things like user variables from the raw configuration params. +func DetectContext(raws ...interface{}) (*interpolate.Context, error) { + var s struct { + BuildName string `mapstructure:"packer_build_name"` + BuildType string `mapstructure:"packer_builder_type"` + CorePackerVersionString string `mapstructure:"packer_core_version"` + TemplatePath string `mapstructure:"packer_template_path"` + Vars map[string]string `mapstructure:"packer_user_variables"` + SensitiveVars []string `mapstructure:"packer_sensitive_variables"` + } + + for _, r := range raws { + if err := mapstructure.Decode(r, &s); err != nil { + log.Printf("Error detecting context: %s", err) + return nil, err + } + } + + return &interpolate.Context{ + BuildName: s.BuildName, + BuildType: s.BuildType, + CorePackerVersionString: s.CorePackerVersionString, + TemplatePath: s.TemplatePath, + UserVariables: s.Vars, + SensitiveVariables: s.SensitiveVars, + }, nil +} + +func uint8ToStringHook(f reflect.Kind, t reflect.Kind, v interface{}) (interface{}, error) { + // We need to convert []uint8 to string. We have to do this + // because internally Packer uses MsgPack for RPC and the MsgPack + // codec turns strings into []uint8 + if f == reflect.Slice && t == reflect.String { + dataVal := reflect.ValueOf(v) + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + v = string(dataVal.Interface().([]uint8)) + } + } + + return v, nil +} + +func stringToTrilean(f reflect.Type, t reflect.Type, v interface{}) (interface{}, error) { + // We have a custom data type, config, which we read from a string and + // then cast to a *bool. Why? So that we can appropriately read "unset" + // *bool values in order to intelligently default, even when the values are + // being set by a template variable. + + testTril, _ := TrileanFromString("") + if t == reflect.TypeOf(testTril) { + // From value is string + if f == reflect.TypeOf("") { + tril, err := TrileanFromString(v.(string)) + if err != nil { + return v, fmt.Errorf("Error parsing bool from given var: %s", err) + } + return tril, nil + } else { + // From value is boolean + if f == reflect.TypeOf(true) { + tril := TrileanFromBool(v.(bool)) + return tril, nil + } + } + + } + return v, nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/deprecated_options.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/deprecated_options.go new file mode 100644 index 000000000..bb53c7d01 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/config/deprecated_options.go @@ -0,0 +1,21 @@ +// + +package config + +var DeprecatedOptions = map[string][]string{ + "*": []string{"iso_md5", "ssh_key_path", "ssh_disable_agent", "ssh_host_port_min", "ssh_host_port_max", "ssh_skip_nat_mapping", "ssh_wait_timeout", "iso_checksum_url", "iso_checksum_type"}, + "*amazon*": []string{"shutdown_behaviour", "enhanced_networking", "ssh_private_ip", "temporary_security_group_source_cidr", "clean_ami_name", "spot_price_auto_product"}, + "Azure*": []string{"clean_image_name", "exlude_from_latest"}, + "MSOpenTech.hyperv": []string{"vhd_temp_path", "clone_from_vmxc_path", "cpu", "ram_size"}, + "ansible": []string{"galaxycommand"}, + "hashicorp.scaleway": []string{"access_key"}, + "jetbrains.vsphere": []string{"network_card", "network", "networkCard", "disk_size", "disk_thin_provisioned", "disk_eagerly_scrub"}, + "mitchellh.virtualbox": []string{"guest_additions_attach"}, + "packer.docker": []string{"login_email"}, + "packer.googlecompute": []string{"clean_image_name"}, + "packer.parallels": []string{"headless", "parallels_tools_host_path", "guest_os_distribution"}, + "packer.post-processor.docker-import": []string{"login_email"}, + "packer.post-processor.docker-tag": []string{"tag"}, + "packer.post-processor.manifest": []string{"filename"}, + "transcend.qemu": []string{"ssh_host_port_max", "ssh_host_port_min"}, +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/doc.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/doc.go new file mode 100644 index 000000000..355460e5b --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/doc.go @@ -0,0 +1,44 @@ +/* +Package template helps plugins parse the Packer template into golang structures. + +This package should be imported and used by all plugins. It implements the +golang template engines that Packer documentes on its website, along with input +validation, custom type decoding, and template variable interpolation. + +A simple usage example that defines a config and then unpacks a user-provided +json template into the provided config: + + import ( + // ... + "github.com/hashicorp/packer-plugin-sdk/template/config" + "github.com/hashicorp/packer-plugin-sdk/template/interpolate" + ) + + type Config struct { + Field1 string `mapstructure:"field_1"` + Field2 bool `mapstructure:"field_2"` + Field3 bool `mapstructure:"field_3"` + + ctx interpolate.Context + } + + type Provisioner struct { + config Config + } + + func (p *CommentProvisioner) Prepare(raws ...interface{}) error { + err := config.Decode(&p.config, &config.DecodeOpts{ + Interpolate: true, + InterpolateContext: &p.config.ctx, + }, raws...) + if err != nil { + return err + } + + return nil + } + +More implementation details for plugins can be found in the +[extending packer](https://www.packer.io/docs/extending) section of the website. +*/ +package template diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/funcs.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/funcs.go new file mode 100644 index 000000000..6a340d1c2 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/funcs.go @@ -0,0 +1,115 @@ +package template + +import ( + "errors" + "fmt" + "log" + "os" + "strings" + "sync" + + consulapi "github.com/hashicorp/consul/api" + awssmapi "github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager" + vaultapi "github.com/hashicorp/vault/api" +) + +// DeprecatedTemplateFunc wraps a template func to warn users that it's +// deprecated. The deprecation warning is called only once. +func DeprecatedTemplateFunc(funcName, useInstead string, deprecated func(string) string) func(string) string { + once := sync.Once{} + return func(in string) string { + once.Do(func() { + log.Printf("[WARN]: the `%s` template func is deprecated, please use %s instead", + funcName, useInstead) + }) + return deprecated(in) + } +} + +// Vault retrieves a secret from a HashiCorp Vault KV store. +// It assumes the necessary environment variables are set. +func Vault(path string, key string) (string, error) { + + if token := os.Getenv("VAULT_TOKEN"); token == "" { + return "", errors.New("Must set VAULT_TOKEN env var in order to use vault template function") + } + + vaultConfig := vaultapi.DefaultConfig() + cli, err := vaultapi.NewClient(vaultConfig) + if err != nil { + return "", fmt.Errorf("Error getting Vault client: %s", err) + } + secret, err := cli.Logical().Read(path) + if err != nil { + return "", fmt.Errorf("Error reading vault secret: %s", err) + } + if secret == nil { + return "", errors.New("Vault Secret does not exist at the given path") + } + + data, ok := secret.Data["data"] + if !ok { + // maybe ths is v1, not v2 kv store + value, ok := secret.Data[key] + if ok { + return value.(string), nil + } + + // neither v1 nor v2 proudced a valid value + return "", fmt.Errorf("Vault data was empty at the given path. Warnings: %s", strings.Join(secret.Warnings, "; ")) + } + + if val, ok := data.(map[string]interface{})[key]; ok { + return val.(string), nil + } + return "", errors.New("Vault path does not contain the requested key") +} + +// Consul retrieves a value from a HashiCorp Consul KV store. +// It assumes the necessary environment variables are set. +func Consul(k string) (string, error) { + consulConfig := consulapi.DefaultConfig() + client, err := consulapi.NewClient(consulConfig) + if err != nil { + return "", fmt.Errorf("error getting consul client: %s", err) + } + + q := &consulapi.QueryOptions{} + kv, _, err := client.KV().Get(k, q) + if err != nil { + return "", fmt.Errorf("error reading consul key: %s", err) + } + if kv == nil { + return "", fmt.Errorf("key does not exist at the given path: %s", k) + } + + value := string(kv.Value) + if value == "" { + return "", fmt.Errorf("value is empty at path %s", k) + } + + return value, nil +} + +// GetAwsSecret retrieves a value from an AWS Secrets Manager. +// It assumes that credentials are properly set in the AWS SDK's credential +// chain. +func GetAWSSecret(name, key string) (string, error) { + // Check if at least 1 parameter has been used + if len(name) == 0 { + return "", errors.New("At least one secret name must be provided") + } + // client uses AWS SDK CredentialChain method. So,credentials can + // be loaded from credential file, environment variables, or IAM + // roles. + client := awssmapi.New( + &awssmapi.AWSConfig{}, + ) + + spec := &awssmapi.SecretSpec{ + Name: name, + Key: key, + } + + return client.GetSecret(spec) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/secretsmanager.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/secretsmanager.go new file mode 100644 index 000000000..67ec3b90b --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/secretsmanager.go @@ -0,0 +1,108 @@ +// Package secretsmanager provide methods to get data from +// AWS Secret Manager +package secretsmanager + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/aws/aws-sdk-go/service/secretsmanager/secretsmanageriface" +) + +// Client represents an AWS Secrets Manager client +type Client struct { + config *AWSConfig + api secretsmanageriface.SecretsManagerAPI +} + +// New creates an AWS Session Manager Client +func New(config *AWSConfig) *Client { + c := &Client{ + config: config, + } + + s := c.newSession(config) + c.api = secretsmanager.New(s) + return c +} + +func (c *Client) newSession(config *AWSConfig) *session.Session { + // Initialize config with error verbosity + sessConfig := aws.NewConfig().WithCredentialsChainVerboseErrors(true) + + if config.Region != "" { + sessConfig = sessConfig.WithRegion(config.Region) + } + + opts := session.Options{ + SharedConfigState: session.SharedConfigEnable, + Config: *sessConfig, + } + + return session.Must(session.NewSessionWithOptions(opts)) +} + +// GetSecret return an AWS Secret Manager secret +// in plain text from a given secret name +func (c *Client) GetSecret(spec *SecretSpec) (string, error) { + params := &secretsmanager.GetSecretValueInput{ + SecretId: aws.String(spec.Name), + VersionStage: aws.String("AWSCURRENT"), + } + + resp, err := c.api.GetSecretValue(params) + if err != nil { + return "", err + } + + if resp.SecretString == nil { + return "", errors.New("Secret is not string") + } + + secret := SecretString{ + Name: *resp.Name, + SecretString: *resp.SecretString, + } + value, err := getSecretValue(&secret, spec) + if err != nil { + return "", err + } + + return value, nil +} + +func getSecretValue(s *SecretString, spec *SecretSpec) (string, error) { + var secretValue map[string]string + blob := []byte(s.SecretString) + + //For those plaintext secrets just return the value + if json.Valid(blob) != true { + return s.SecretString, nil + } + + err := json.Unmarshal(blob, &secretValue) + if err != nil { + return "", err + } + + // If key is not set and secret has multiple keys, return error + if spec.Key == "" && len(secretValue) > 1 { + return "", errors.New("Secret has multiple values and no key was set") + } + + if spec.Key == "" { + for _, v := range secretValue { + return v, nil + } + } + + if v, ok := secretValue[spec.Key]; ok { + return v, nil + } + + return "", fmt.Errorf("No secret found for key %q", spec.Key) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/types.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/types.go new file mode 100644 index 000000000..6cd1dc46e --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/types.go @@ -0,0 +1,22 @@ +package secretsmanager + +// AWSConfig store configuration used to initialize +// secrets manager client. +type AWSConfig struct { + Region string +} + +// SecretSpec represent specs of secret to be searched +// If Key field is not set then package will return first +// secret key stored in secret name. +type SecretSpec struct { + Name string + Key string +} + +// SecretString is a concret representation +// of an AWS Secrets Manager Secret String +type SecretString struct { + Name string + SecretString string +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/funcs.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/funcs.go new file mode 100644 index 000000000..f2878742c --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/funcs.go @@ -0,0 +1,312 @@ +package interpolate + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + "time" + + "github.com/hashicorp/packer-plugin-sdk/packerbuilderdata" + commontpl "github.com/hashicorp/packer-plugin-sdk/template" + "github.com/hashicorp/packer-plugin-sdk/uuid" + strftime "github.com/jehiah/go-strftime" +) + +// InitTime is the UTC time when this package was initialized. It is +// used as the timestamp for all configuration templates so that they +// match for a single build. +var InitTime time.Time + +func init() { + InitTime = time.Now().UTC() +} + +// Funcs are the interpolation funcs that are available within interpolations. +var FuncGens = map[string]interface{}{ + "build_name": funcGenBuildName, + "build_type": funcGenBuildType, + "env": funcGenEnv, + "isotime": funcGenIsotime, + "strftime": funcGenStrftime, + "pwd": funcGenPwd, + "split": funcGenSplitter, + "template_dir": funcGenTemplateDir, + "timestamp": funcGenTimestamp, + "uuid": funcGenUuid, + "user": funcGenUser, + "packer_version": funcGenPackerVersion, + "consul_key": funcGenConsul, + "vault": funcGenVault, + "sed": funcGenSed, + "build": funcGenBuild, + "aws_secretsmanager": funcGenAwsSecrets, + + "replace": replace, + "replace_all": replace_all, + + "upper": strings.ToUpper, + "lower": strings.ToLower, +} + +var ErrVariableNotSetString = "Error: variable not set:" + +// FuncGenerator is a function that given a context generates a template +// function for the template. +type FuncGenerator func(*Context) interface{} + +// Funcs returns the functions that can be used for interpolation given +// a context. +func Funcs(ctx *Context) template.FuncMap { + result := make(map[string]interface{}) + for k, v := range FuncGens { + switch v := v.(type) { + case func(*Context) interface{}: + result[k] = v(ctx) + default: + result[k] = v + } + } + if ctx != nil { + for k, v := range ctx.Funcs { + result[k] = v + } + } + + return template.FuncMap(result) +} + +func funcGenSplitter(ctx *Context) interface{} { + return func(k string, s string, i int) (string, error) { + // return func(s string) (string, error) { + split := strings.Split(k, s) + if len(split) <= i { + return "", fmt.Errorf("the substring %d was unavailable using the separator value, %s, only %d values were found", i, s, len(split)) + } + return split[i], nil + } +} + +func funcGenBuildName(ctx *Context) interface{} { + return func() (string, error) { + if ctx == nil || ctx.BuildName == "" { + return "", errors.New("build_name not available") + } + + return ctx.BuildName, nil + } +} + +func funcGenBuildType(ctx *Context) interface{} { + return func() (string, error) { + if ctx == nil || ctx.BuildType == "" { + return "", errors.New("build_type not available") + } + + return ctx.BuildType, nil + } +} + +func funcGenEnv(ctx *Context) interface{} { + return func(k string) (string, error) { + if !ctx.EnableEnv { + // The error message doesn't have to be that detailed since + // semantic checks should catch this. + return "", errors.New("env vars are not allowed here") + } + + return os.Getenv(k), nil + } +} + +func funcGenIsotime(ctx *Context) interface{} { + return func(format ...string) (string, error) { + if len(format) == 0 { + return InitTime.Format(time.RFC3339), nil + } + + if len(format) > 1 { + return "", fmt.Errorf("too many values, 1 needed: %v", format) + } + + return InitTime.Format(format[0]), nil + } +} + +func funcGenStrftime(ctx *Context) interface{} { + return func(format string) string { + return strftime.Format(format, InitTime) + } +} + +func funcGenPwd(ctx *Context) interface{} { + return func() (string, error) { + return os.Getwd() + } +} + +func funcGenTemplateDir(ctx *Context) interface{} { + return func() (string, error) { + if ctx == nil || ctx.TemplatePath == "" { + return "", errors.New("template path not available") + } + + path, err := filepath.Abs(filepath.Dir(ctx.TemplatePath)) + if err != nil { + return "", err + } + + return path, nil + } +} + +func passthroughOrInterpolate(data map[interface{}]interface{}, s string) (string, error) { + if heldPlace, ok := data[s]; ok { + if hp, ok := heldPlace.(string); ok { + // If we're in the first interpolation pass, the goal is to + // make sure that we pass the value through. + // TODO match against an actual string constant + if strings.Contains(hp, packerbuilderdata.PlaceholderMsg) { + return fmt.Sprintf("{{.%s}}", s), nil + } else { + return hp, nil + } + } + } + return "", fmt.Errorf("loaded data, but couldnt find %s in it.", s) + +} +func funcGenBuild(ctx *Context) interface{} { + // Depending on where the context data is coming from, it could take a few + // different map types. The following switch standardizes the map types + // so we can act on them correctly. + return func(s string) (string, error) { + switch data := ctx.Data.(type) { + case map[interface{}]interface{}: + return passthroughOrInterpolate(data, s) + case map[string]interface{}: + // convert to a map[interface{}]interface{} so we can use same + // parsing on it + passed := make(map[interface{}]interface{}, len(data)) + for k, v := range data { + passed[k] = v + } + return passthroughOrInterpolate(passed, s) + case map[string]string: + // convert to a map[interface{}]interface{} so we can use same + // parsing on it + passed := make(map[interface{}]interface{}, len(data)) + for k, v := range data { + passed[k] = v + } + return passthroughOrInterpolate(passed, s) + default: + return "", fmt.Errorf("Error validating build variable: the given "+ + "variable %s will not be passed into your plugin.", s) + } + } +} + +func funcGenTimestamp(ctx *Context) interface{} { + return func() string { + return strconv.FormatInt(InitTime.Unix(), 10) + } +} + +func funcGenUser(ctx *Context) interface{} { + return func(k string) (string, error) { + if ctx == nil || ctx.UserVariables == nil { + return "", errors.New("test") + } + + val, ok := ctx.UserVariables[k] + if ctx.EnableEnv { + // error and retry if we're interpolating UserVariables. But if + // we're elsewhere in the template, just return the empty string. + if !ok { + return "", fmt.Errorf("%s %s", ErrVariableNotSetString, k) + } + } + return val, nil + } +} + +func funcGenUuid(ctx *Context) interface{} { + return func() string { + return uuid.TimeOrderedUUID() + } +} + +func funcGenPackerVersion(ctx *Context) interface{} { + return func() (string, error) { + if ctx == nil || ctx.CorePackerVersionString == "" { + return "", errors.New("packer_version not available") + } + + return ctx.CorePackerVersionString, nil + } +} + +func funcGenConsul(ctx *Context) interface{} { + return func(key string) (string, error) { + if !ctx.EnableEnv { + // The error message doesn't have to be that detailed since + // semantic checks should catch this. + return "", errors.New("consul_key is not allowed here") + } + + return commontpl.Consul(key) + } +} + +func funcGenVault(ctx *Context) interface{} { + return func(path string, key string) (string, error) { + // Only allow interpolation from Vault when env vars are being read. + if !ctx.EnableEnv { + // The error message doesn't have to be that detailed since + // semantic checks should catch this. + return "", errors.New("Vault vars are only allowed in the variables section") + } + + return commontpl.Vault(path, key) + } +} + +func funcGenAwsSecrets(ctx *Context) interface{} { + return func(secret ...string) (string, error) { + if !ctx.EnableEnv { + // The error message doesn't have to be that detailed since + // semantic checks should catch this. + return "", errors.New("AWS Secrets Manager is only allowed in the variables section") + } + switch len(secret) { + case 0: + return "", errors.New("secret name must be provided") + case 1: + return commontpl.GetAWSSecret(secret[0], "") + case 2: + return commontpl.GetAWSSecret(secret[0], secret[1]) + default: + return "", errors.New("only secret name and optional secret key can be provided.") + } + } +} + +func funcGenSed(ctx *Context) interface{} { + return func(expression string, inputString string) (string, error) { + return "", errors.New("template function `sed` is deprecated " + + "use `replace` or `replace_all` instead." + + "Documentation: https://www.packer.io/docs/templates/engine") + } +} + +func replace_all(old, new, src string) string { + return strings.ReplaceAll(src, old, new) +} + +func replace(old, new string, n int, src string) string { + return strings.Replace(src, old, new, n) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/i.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/i.go new file mode 100644 index 000000000..8ad4e50df --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/i.go @@ -0,0 +1,133 @@ +package interpolate + +import ( + "bytes" + "regexp" + "strings" + "text/template" + + "github.com/google/uuid" +) + +// Context is the context that an interpolation is done in. This defines +// things such as available variables. +type Context struct { + // Data is the data for the template that is available + Data interface{} + + // Funcs are extra functions available in the template + Funcs map[string]interface{} + + // UserVariables is the mapping of user variables that the + // "user" function reads from. + UserVariables map[string]string + + // SensitiveVariables is a list of variables to sanitize. + SensitiveVariables []string + + // EnableEnv enables the env function + EnableEnv bool + + // All the fields below are used for built-in functions. + // + // BuildName and BuildType are the name and type, respectively, + // of the builder being used. + // + // TemplatePath is the path to the template that this is being + // rendered within. + BuildName string + BuildType string + CorePackerVersionString string + TemplatePath string +} + +// NewContext returns an initialized empty context. +func NewContext() *Context { + return &Context{} +} + +// RenderOnce is shorthand for constructing an I and calling Render one time. +func RenderOnce(v string, ctx *Context) (string, error) { + return (&I{Value: v}).Render(ctx) +} + +// Render is shorthand for constructing an I and calling Render until all variables are rendered. +func Render(v string, ctx *Context) (rendered string, err error) { + // Keep interpolating until all variables are done + // Sometimes a variable can been inside another one + for { + rendered, err = (&I{Value: v}).Render(ctx) + if err != nil || rendered == v { + break + } + v = rendered + } + return +} + +// Render is shorthand for constructing an I and calling Render. +// Use regex to filter variables that are not supposed to be interpolated now +func RenderRegex(v string, ctx *Context, regex string) (string, error) { + re := regexp.MustCompile(regex) + matches := re.FindAllStringSubmatch(v, -1) + + // Replace variables to be excluded with a unique UUID + excluded := make(map[string]string) + for _, value := range matches { + id := uuid.New().String() + excluded[id] = value[0] + v = strings.ReplaceAll(v, value[0], id) + } + + rendered, err := (&I{Value: v}).Render(ctx) + if err != nil { + return rendered, err + } + + // Replace back by the UUID the previously excluded values + for id, value := range excluded { + rendered = strings.ReplaceAll(rendered, id, value) + } + + return rendered, nil +} + +// Validate is shorthand for constructing an I and calling Validate. +func Validate(v string, ctx *Context) error { + return (&I{Value: v}).Validate(ctx) +} + +// I stands for "interpolation" and is the main interpolation struct +// in order to render values. +type I struct { + Value string +} + +// Render renders the interpolation with the given context. +func (i *I) Render(ictx *Context) (string, error) { + tpl, err := i.template(ictx) + if err != nil { + return "", err + } + + var result bytes.Buffer + var data interface{} + if ictx != nil { + data = ictx.Data + } + if err := tpl.Execute(&result, data); err != nil { + return "", err + } + + return result.String(), nil +} + +// Validate validates that the template is syntactically valid. +func (i *I) Validate(ctx *Context) error { + _, err := i.template(ctx) + return err +} + +func (i *I) template(ctx *Context) (*template.Template, error) { + return template.New("root").Funcs(Funcs(ctx)).Parse(i.Value) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/parse.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/parse.go new file mode 100644 index 000000000..b18079510 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/parse.go @@ -0,0 +1,42 @@ +package interpolate + +import ( + "fmt" + "text/template" + "text/template/parse" +) + +// functionsCalled returns a map (to be used as a set) of the functions +// that are called from the given text template. +func functionsCalled(t *template.Template) map[string]struct{} { + result := make(map[string]struct{}) + functionsCalledWalk(t.Tree.Root, result) + return result +} + +func functionsCalledWalk(raw parse.Node, r map[string]struct{}) { + switch node := raw.(type) { + case *parse.ActionNode: + functionsCalledWalk(node.Pipe, r) + case *parse.CommandNode: + if in, ok := node.Args[0].(*parse.IdentifierNode); ok { + r[in.Ident] = struct{}{} + } + + for _, n := range node.Args[1:] { + functionsCalledWalk(n, r) + } + case *parse.ListNode: + for _, n := range node.Nodes { + functionsCalledWalk(n, r) + } + case *parse.PipeNode: + for _, n := range node.Cmds { + functionsCalledWalk(n, r) + } + case *parse.StringNode, *parse.TextNode: + // Ignore + default: + panic(fmt.Sprintf("unknown type: %T", node)) + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/render.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/render.go new file mode 100644 index 000000000..7893e8745 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/render.go @@ -0,0 +1,282 @@ +package interpolate + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "github.com/mitchellh/mapstructure" + "github.com/mitchellh/reflectwalk" +) + +// RenderFilter is an option for filtering what gets rendered and +// doesn't within an interface. +type RenderFilter struct { + Include []string + Exclude []string + + once sync.Once + excludeSet map[string]struct{} + includeSet map[string]struct{} +} + +// RenderMap renders all the strings in the given interface. The +// interface must decode into a map[string]interface{}, but is left +// as an interface{} type to ease backwards compatibility with the way +// arguments are passed around in Packer. +func RenderMap(v interface{}, ctx *Context, f *RenderFilter) (map[string]interface{}, error) { + // First decode it into the map + var m map[string]interface{} + if err := mapstructure.Decode(v, &m); err != nil { + return nil, err + } + + // Now go through each value and render it + for k, raw := range m { + // Always validate every field + if err := ValidateInterface(raw, ctx); err != nil { + return nil, fmt.Errorf("invalid '%s': %s", k, err) + } + + if !f.include(k) { + continue + } + + raw, err := RenderInterface(raw, ctx) + if err != nil { + return nil, fmt.Errorf("render '%s': %s", k, err) + } + + m[k] = raw + } + + return m, nil +} + +// RenderInterface renders any value and returns the resulting value. +func RenderInterface(v interface{}, ctx *Context) (interface{}, error) { + f := func(v string) (string, error) { + return RenderOnce(v, ctx) + } + + walker := &renderWalker{ + F: f, + Replace: true, + } + err := reflectwalk.Walk(v, walker) + if err != nil { + return nil, err + } + + if walker.Top != nil { + v = walker.Top + } + return v, nil +} + +// ValidateInterface renders any value and returns the resulting value. +func ValidateInterface(v interface{}, ctx *Context) error { + f := func(v string) (string, error) { + return v, Validate(v, ctx) + } + + walker := &renderWalker{ + F: f, + Replace: false, + } + err := reflectwalk.Walk(v, walker) + if err != nil { + return err + } + + return nil +} + +// Include checks whether a key should be included. +func (f *RenderFilter) include(k string) bool { + if f == nil { + return true + } + + k = strings.ToLower(k) + + f.once.Do(f.init) + if len(f.includeSet) > 0 { + _, ok := f.includeSet[k] + return ok + } + if len(f.excludeSet) > 0 { + _, ok := f.excludeSet[k] + return !ok + } + + return true +} + +func (f *RenderFilter) init() { + f.includeSet = make(map[string]struct{}) + for _, v := range f.Include { + f.includeSet[strings.ToLower(v)] = struct{}{} + } + + f.excludeSet = make(map[string]struct{}) + for _, v := range f.Exclude { + f.excludeSet[strings.ToLower(v)] = struct{}{} + } +} + +// renderWalker implements interfaces for the reflectwalk package +// (github.com/mitchellh/reflectwalk) that can be used to automatically +// execute a callback for an interpolation. +type renderWalker struct { + // F is the function to call for every interpolation. It can be nil. + // + // If Replace is true, then the return value of F will be used to + // replace the interpolation. + F renderWalkerFunc + Replace bool + + // ContextF is an advanced version of F that also receives the + // location of where it is in the structure. This lets you do + // context-aware validation. + ContextF renderWalkerContextFunc + + // Top is the top value of the walk. This might get replaced if the + // top value needs to be modified. It is valid to read after any walk. + // If it is nil, it means the top wasn't replaced. + Top interface{} + + key []string + lastValue reflect.Value + loc reflectwalk.Location + cs []reflect.Value + csKey []reflect.Value + csData interface{} + sliceIndex int +} + +// renderWalkerFunc is the callback called by interpolationWalk. +// It is called with any interpolation found. It should return a value +// to replace the interpolation with, along with any errors. +// +// If Replace is set to false in renderWalker, then the replace +// value can be anything as it will have no effect. +type renderWalkerFunc func(string) (string, error) + +// renderWalkerContextFunc is called by interpolationWalk if +// ContextF is set. This receives both the interpolation and the location +// where the interpolation is. +// +// This callback can be used to validate the location of the interpolation +// within the configuration. +type renderWalkerContextFunc func(reflectwalk.Location, string) + +func (w *renderWalker) Enter(loc reflectwalk.Location) error { + w.loc = loc + return nil +} + +func (w *renderWalker) Exit(loc reflectwalk.Location) error { + w.loc = reflectwalk.None + + switch loc { + case reflectwalk.Map: + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + w.key = w.key[:len(w.key)-1] + w.csKey = w.csKey[:len(w.csKey)-1] + case reflectwalk.Slice: + // Split any values that need to be split + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.SliceElem: + w.csKey = w.csKey[:len(w.csKey)-1] + } + + return nil +} + +func (w *renderWalker) Map(m reflect.Value) error { + w.cs = append(w.cs, m) + return nil +} + +func (w *renderWalker) MapElem(m, k, v reflect.Value) error { + w.csData = k + w.csKey = append(w.csKey, k) + w.key = append(w.key, k.String()) + w.lastValue = v + return nil +} + +func (w *renderWalker) Slice(s reflect.Value) error { + w.cs = append(w.cs, s) + return nil +} + +func (w *renderWalker) SliceElem(i int, elem reflect.Value) error { + w.csKey = append(w.csKey, reflect.ValueOf(i)) + w.sliceIndex = i + return nil +} + +func (w *renderWalker) Primitive(v reflect.Value) error { + setV := v + + // We only care about strings + if v.Kind() == reflect.Interface { + setV = v + v = v.Elem() + } + if v.Kind() != reflect.String { + return nil + } + + strV := v.String() + if w.ContextF != nil { + w.ContextF(w.loc, strV) + } + + if w.F == nil { + return nil + } + + replaceVal, err := w.F(strV) + if err != nil { + return fmt.Errorf( + "%s in:\n\n%s", + err, v.String()) + } + + if w.Replace { + resultVal := reflect.ValueOf(replaceVal) + switch w.loc { + case reflectwalk.MapKey: + m := w.cs[len(w.cs)-1] + + // Delete the old value + var zero reflect.Value + m.SetMapIndex(w.csData.(reflect.Value), zero) + + // Set the new key with the existing value + m.SetMapIndex(resultVal, w.lastValue) + + // Set the key to be the new key + w.csData = resultVal + case reflectwalk.MapValue: + // If we're in a map, then the only way to set a map value is + // to set it directly. + m := w.cs[len(w.cs)-1] + mk := w.csData.(reflect.Value) + m.SetMapIndex(mk, resultVal) + case reflectwalk.WalkLoc: + // At the root element, we can't write that, so we just save it + w.Top = resultVal.Interface() + default: + // Otherwise, we should be addressable + setV.Set(resultVal) + } + } + + return nil +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/parse.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/parse.go new file mode 100644 index 000000000..5d1f04cc1 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/parse.go @@ -0,0 +1,578 @@ +package template + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/packer-plugin-sdk/tmp" + "github.com/mitchellh/mapstructure" +) + +// rawTemplate is the direct JSON document format of the template file. +// This is what is decoded directly from the file, and then it is turned +// into a Template object thereafter. +type rawTemplate struct { + MinVersion string `mapstructure:"min_packer_version" json:"min_packer_version,omitempty"` + Description string `json:"description,omitempty"` + + Builders []interface{} `mapstructure:"builders" json:"builders,omitempty"` + Comments []map[string]string `json:"comments,omitempty"` + Push map[string]interface{} `json:"push,omitempty"` + PostProcessors []interface{} `mapstructure:"post-processors" json:"post-processors,omitempty"` + Provisioners []interface{} `json:"provisioners,omitempty"` + CleanupProvisioner interface{} `mapstructure:"error-cleanup-provisioner" json:"error-cleanup-provisioner,omitempty"` + Variables map[string]interface{} `json:"variables,omitempty"` + SensitiveVariables []string `mapstructure:"sensitive-variables" json:"sensitive-variables,omitempty"` + + RawContents []byte `json:"-"` +} + +// MarshalJSON conducts the necessary flattening of the rawTemplate struct +// to provide valid Packer template JSON +func (r *rawTemplate) MarshalJSON() ([]byte, error) { + // Avoid recursion + type rawTemplate_ rawTemplate + out, _ := json.Marshal(rawTemplate_(*r)) + + var m map[string]json.RawMessage + _ = json.Unmarshal(out, &m) + + // Flatten Comments + delete(m, "comments") + for _, comment := range r.Comments { + for k, v := range comment { + out, _ = json.Marshal(v) + m[k] = out + } + } + + return json.Marshal(m) +} + +func (r *rawTemplate) decodeProvisioner(raw interface{}) (Provisioner, error) { + var p Provisioner + if err := r.weakDecoder(&p, nil).Decode(raw); err != nil { + return p, fmt.Errorf("Error decoding provisioner: %s", err) + + } + + // Type is required before any richer validation + if p.Type == "" { + return p, fmt.Errorf("Provisioner missing 'type'") + } + + // Set the raw configuration and delete any special keys + p.Config = raw.(map[string]interface{}) + + delete(p.Config, "except") + delete(p.Config, "only") + delete(p.Config, "override") + delete(p.Config, "pause_before") + delete(p.Config, "max_retries") + delete(p.Config, "type") + delete(p.Config, "timeout") + + if len(p.Config) == 0 { + p.Config = nil + } + return p, nil +} + +// Template returns the actual Template object built from this raw +// structure. +func (r *rawTemplate) Template() (*Template, error) { + var result Template + var errs error + + // Copy some literals + result.Description = r.Description + result.MinVersion = r.MinVersion + result.RawContents = r.RawContents + + // Gather the comments + if len(r.Comments) > 0 { + result.Comments = make(map[string]string, len(r.Comments)) + + for _, c := range r.Comments { + for k, v := range c { + result.Comments[k] = v + } + } + } + + // Gather the variables + if len(r.Variables) > 0 { + result.Variables = make(map[string]*Variable, len(r.Variables)) + } + + for k, rawV := range r.Variables { + var v Variable + v.Key = k + + // Variable is required if the value is exactly nil + v.Required = rawV == nil + + // Weak decode the default if we have one + if err := r.decoder(&v.Default, nil).Decode(rawV); err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "variable %s: %s", k, err)) + continue + } + + for _, sVar := range r.SensitiveVariables { + if sVar == k { + result.SensitiveVariables = append(result.SensitiveVariables, &v) + } + } + + result.Variables[k] = &v + } + + // Let's start by gathering all the builders + if len(r.Builders) > 0 { + result.Builders = make(map[string]*Builder, len(r.Builders)) + } + for i, rawB := range r.Builders { + var b Builder + if err := mapstructure.WeakDecode(rawB, &b); err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "builder %d: %s", i+1, err)) + continue + } + + // Set the raw configuration and delete any special keys + b.Config = rawB.(map[string]interface{}) + + delete(b.Config, "name") + delete(b.Config, "type") + + if len(b.Config) == 0 { + b.Config = nil + } + + // If there is no type set, it is an error + if b.Type == "" { + errs = multierror.Append(errs, fmt.Errorf( + "builder %d: missing 'type'", i+1)) + continue + } + + // The name defaults to the type if it isn't set + if b.Name == "" { + b.Name = b.Type + } + + // If this builder already exists, it is an error + if _, ok := result.Builders[b.Name]; ok { + errs = multierror.Append(errs, fmt.Errorf( + "builder %d: builder with name '%s' already exists", + i+1, b.Name)) + continue + } + + // Append the builders + result.Builders[b.Name] = &b + } + + // Gather all the post-processors + if len(r.PostProcessors) > 0 { + result.PostProcessors = make([][]*PostProcessor, 0, len(r.PostProcessors)) + } + for i, v := range r.PostProcessors { + // Parse the configurations. We need to do this because post-processors + // can take three different formats. + configs, err := r.parsePostProcessor(i, v) + if err != nil { + errs = multierror.Append(errs, err) + continue + } + + // Parse the PostProcessors out of the configs + pps := make([]*PostProcessor, 0, len(configs)) + for j, c := range configs { + var pp PostProcessor + if err := r.decoder(&pp, nil).Decode(c); err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "post-processor %d.%d: %s", i+1, j+1, err)) + continue + } + + // Type is required + if pp.Type == "" { + errs = multierror.Append(errs, fmt.Errorf( + "post-processor %d.%d: type is required", i+1, j+1)) + continue + } + + // Set the raw configuration and delete any special keys + pp.Config = c + + // The name defaults to the type if it isn't set + if pp.Name == "" { + pp.Name = pp.Type + } + + delete(pp.Config, "except") + delete(pp.Config, "only") + delete(pp.Config, "keep_input_artifact") + delete(pp.Config, "type") + delete(pp.Config, "name") + + if len(pp.Config) == 0 { + pp.Config = nil + } + + pps = append(pps, &pp) + } + + result.PostProcessors = append(result.PostProcessors, pps) + } + + // Gather all the provisioners + if len(r.Provisioners) > 0 { + result.Provisioners = make([]*Provisioner, 0, len(r.Provisioners)) + } + for i, v := range r.Provisioners { + p, err := r.decodeProvisioner(v) + if err != nil { + errs = multierror.Append(errs, fmt.Errorf( + "provisioner %d: %s", i+1, err)) + continue + } + + result.Provisioners = append(result.Provisioners, &p) + } + + // Gather the error-cleanup-provisioner + if r.CleanupProvisioner != nil { + p, err := r.decodeProvisioner(r.CleanupProvisioner) + if err != nil { + errs = multierror.Append(errs, + fmt.Errorf("On Error Cleanup Provisioner error: %s", err)) + } + + result.CleanupProvisioner = &p + } + + // If we have errors, return those with a nil result + if errs != nil { + return nil, errs + } + + return &result, nil +} + +func (r *rawTemplate) decoder( + result interface{}, + md *mapstructure.Metadata) *mapstructure.Decoder { + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Metadata: md, + Result: result, + }) + if err != nil { + // This really shouldn't happen since we have firm control over + // all the arguments and they're all unit tested. So we use a + // panic here to note this would definitely be a bug. + panic(err) + } + return d +} + +func (r *rawTemplate) weakDecoder( + result interface{}, + md *mapstructure.Metadata) *mapstructure.Decoder { + d, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + WeaklyTypedInput: true, + DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + Metadata: md, + Result: result, + }) + if err != nil { + // This really shouldn't happen since we have firm control over + // all the arguments and they're all unit tested. So we use a + // panic here to note this would definitely be a bug. + panic(err) + } + return d +} + +func (r *rawTemplate) parsePostProcessor( + i int, raw interface{}) ([]map[string]interface{}, error) { + switch v := raw.(type) { + case string: + return []map[string]interface{}{ + {"type": v}, + }, nil + case map[string]interface{}: + return []map[string]interface{}{v}, nil + case []interface{}: + var err error + result := make([]map[string]interface{}, len(v)) + for j, innerRaw := range v { + switch innerV := innerRaw.(type) { + case string: + result[j] = map[string]interface{}{"type": innerV} + case map[string]interface{}: + result[j] = innerV + case []interface{}: + err = multierror.Append(err, fmt.Errorf( + "post-processor %d.%d: sequence not allowed to be nested in a sequence", + i+1, j+1)) + default: + err = multierror.Append(err, fmt.Errorf( + "post-processor %d.%d: unknown format", + i+1, j+1)) + } + } + + if err != nil { + return nil, err + } + + return result, nil + default: + return nil, fmt.Errorf("post-processor %d: bad format", i+1) + } +} + +// Parse takes the given io.Reader and parses a Template object out of it. +func Parse(r io.Reader) (*Template, error) { + // First, decode the object into an interface{} and search for duplicate fields. + // We do this instead of the rawTemplate directly because we'd rather use mapstructure to + // decode since it has richer errors. + var raw interface{} + buf, err := jsonUnmarshal(r, &raw) + if err != nil { + return nil, err + } + + // Create our decoder + var md mapstructure.Metadata + var rawTpl rawTemplate + rawTpl.RawContents = buf.Bytes() + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Metadata: &md, + Result: &rawTpl, + }) + if err != nil { + return nil, err + } + + // Do the actual decode into our structure + if err := decoder.Decode(raw); err != nil { + return nil, err + } + + // Build an error if there are unused root level keys + if len(md.Unused) > 0 { + sort.Strings(md.Unused) + + unusedMap, ok := raw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("Failed to convert unused root level keys to map") + } + + for _, unused := range md.Unused { + if unused[0] == '_' { + commentVal, ok := unusedMap[unused].(string) + if !ok { + return nil, fmt.Errorf("Failed to cast root level comment value in comment \"%s\" to string.", unused) + } + + comment := map[string]string{ + unused: commentVal, + } + + rawTpl.Comments = append(rawTpl.Comments, comment) + continue + } + + err = multierror.Append(err, fmt.Errorf( + "Unknown root level key in template: '%s'", unused)) + } + } + if err != nil { + return nil, err + } + + // Return the template parsed from the raw structure + return rawTpl.Template() +} + +func jsonUnmarshal(r io.Reader, raw *interface{}) (bytes.Buffer, error) { + // Create a buffer to copy what we read + var buf bytes.Buffer + if _, err := buf.ReadFrom(r); err != nil { + return buf, err + } + + // Decode the object into an interface{} + if err := json.Unmarshal(buf.Bytes(), raw); err != nil { + return buf, err + } + + // If Json is valid, check for duplicate fields to avoid silent unwanted override + jsonDecoder := json.NewDecoder(strings.NewReader(buf.String())) + if err := checkForDuplicateFields(jsonDecoder); err != nil { + return buf, err + } + + return buf, nil +} + +func checkForDuplicateFields(d *json.Decoder) error { + // Get next token from JSON + t, err := d.Token() + if err != nil { + return err + } + + delim, ok := t.(json.Delim) + // Do nothing if it's not a delimiter + if !ok { + return nil + } + + // Check for duplicates inside of a delimiter {} or [] + switch delim { + case '{': + keys := make(map[string]bool) + for d.More() { + // Get attribute key + t, err := d.Token() + if err != nil { + return err + } + key := t.(string) + + // Check for duplicates + if keys[key] { + return fmt.Errorf("template has duplicate field: %s", key) + } + keys[key] = true + + // Check value to find duplicates in nested blocks + if err := checkForDuplicateFields(d); err != nil { + return err + } + } + case '[': + for d.More() { + if err := checkForDuplicateFields(d); err != nil { + return err + } + } + } + + // consume closing delimiter } or ] + if _, err := d.Token(); err != nil { + return err + } + + return nil +} + +// ParseFile is the same as Parse but is a helper to automatically open +// a file for parsing. +func ParseFile(path string) (*Template, error) { + var f *os.File + var err error + if path == "-" { + // Create a temp file for stdin in case of errors + f, err = tmp.File("parse") + if err != nil { + return nil, err + } + defer os.Remove(f.Name()) + defer f.Close() + if _, err = io.Copy(f, os.Stdin); err != nil { + return nil, err + } + if _, err = f.Seek(0, io.SeekStart); err != nil { + return nil, err + } + } else { + f, err = os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + } + tpl, err := Parse(f) + if err != nil { + syntaxErr, ok := err.(*json.SyntaxError) + if !ok { + return nil, err + } + // Rewind the file and get a better error + if _, err := f.Seek(0, io.SeekStart); err != nil { + return nil, err + } + // Grab the error location, and return a string to point to offending syntax error + line, col, highlight := highlightPosition(f, syntaxErr.Offset) + err = fmt.Errorf("Error parsing JSON: %s\nAt line %d, column %d (offset %d):\n%s", err, line, col, syntaxErr.Offset, highlight) + return nil, err + } + + if !filepath.IsAbs(path) { + path, err = filepath.Abs(path) + if err != nil { + return nil, err + } + } + + tpl.Path = path + return tpl, nil +} + +// Takes a file and the location in bytes of a parse error +// from json.SyntaxError.Offset and returns the line, column, +// and pretty-printed context around the error with an arrow indicating the exact +// position of the syntax error. +func highlightPosition(f *os.File, pos int64) (line, col int, highlight string) { + // Modified version of the function in Camlistore by Brad Fitzpatrick + // https://github.com/camlistore/camlistore/blob/4b5403dd5310cf6e1ae8feb8533fd59262701ebc/vendor/go4.org/errorutil/highlight.go + line = 1 + // New io.Reader for file + br := bufio.NewReader(f) + // Initialize lines + lastLine := "" + thisLine := new(bytes.Buffer) + // Loop through template to find line, column + for n := int64(0); n < pos; n++ { + // read byte from io.Reader + b, err := br.ReadByte() + if err != nil { + break + } + // If end of line, save line as previous line in case next line is offender + if b == '\n' { + lastLine = thisLine.String() + thisLine.Reset() + line++ + col = 1 + } else { + // Write current line, until line is safe, or error point is encountered + col++ + thisLine.WriteByte(b) + } + } + + // Populate highlight string to place a '^' char at offending column + if line > 1 { + highlight += fmt.Sprintf("%5d: %s\n", line-1, lastLine) + } + + highlight += fmt.Sprintf("%5d: %s\n", line, thisLine.String()) + highlight += fmt.Sprintf("%s^\n", strings.Repeat(" ", col+5)) + return +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/template.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/template.go new file mode 100644 index 000000000..decd2d23c --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/template.go @@ -0,0 +1,323 @@ +//go:generate mapstructure-to-hcl2 -type Provisioner + +package template + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + multierror "github.com/hashicorp/go-multierror" +) + +// Template represents the parsed template that is used to configure +// Packer builds. +type Template struct { + // Path is the path to the template. This will be blank if Parse is + // used, but will be automatically populated by ParseFile. + Path string + + Description string + MinVersion string + + Comments map[string]string + Variables map[string]*Variable + SensitiveVariables []*Variable + Builders map[string]*Builder + Provisioners []*Provisioner + CleanupProvisioner *Provisioner + PostProcessors [][]*PostProcessor + + // RawContents is just the raw data for this template + RawContents []byte +} + +// Raw converts a Template struct back into the raw Packer template structure +func (t *Template) Raw() (*rawTemplate, error) { + var out rawTemplate + + out.MinVersion = t.MinVersion + out.Description = t.Description + + for k, v := range t.Comments { + out.Comments = append(out.Comments, map[string]string{k: v}) + } + + for _, b := range t.Builders { + out.Builders = append(out.Builders, b) + } + + for _, p := range t.Provisioners { + out.Provisioners = append(out.Provisioners, p) + } + + for _, pp := range t.PostProcessors { + out.PostProcessors = append(out.PostProcessors, pp) + } + + for _, v := range t.SensitiveVariables { + out.SensitiveVariables = append(out.SensitiveVariables, v.Key) + } + + for k, v := range t.Variables { + if out.Variables == nil { + out.Variables = make(map[string]interface{}) + } + + out.Variables[k] = v + } + + return &out, nil +} + +// Builder represents a builder configured in the template +type Builder struct { + Name string `json:"name,omitempty"` + Type string `json:"type"` + Config map[string]interface{} `json:"config,omitempty"` +} + +// MarshalJSON conducts the necessary flattening of the Builder struct +// to provide valid Packer template JSON +func (b *Builder) MarshalJSON() ([]byte, error) { + // Avoid recursion + type Builder_ Builder + out, _ := json.Marshal(Builder_(*b)) + + var m map[string]json.RawMessage + _ = json.Unmarshal(out, &m) + + // Flatten Config + delete(m, "config") + for k, v := range b.Config { + out, _ = json.Marshal(v) + m[k] = out + } + + return json.Marshal(m) +} + +// PostProcessor represents a post-processor within the template. +type PostProcessor struct { + OnlyExcept `mapstructure:",squash" json:",omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type"` + KeepInputArtifact *bool `mapstructure:"keep_input_artifact" json:"keep_input_artifact,omitempty"` + Config map[string]interface{} `json:"config,omitempty"` +} + +// MarshalJSON conducts the necessary flattening of the PostProcessor struct +// to provide valid Packer template JSON +func (p *PostProcessor) MarshalJSON() ([]byte, error) { + // Early exit for simple definitions + if len(p.Config) == 0 && len(p.OnlyExcept.Only) == 0 && len(p.OnlyExcept.Except) == 0 && p.KeepInputArtifact == nil { + return json.Marshal(p.Type) + } + + // Avoid recursion + type PostProcessor_ PostProcessor + out, _ := json.Marshal(PostProcessor_(*p)) + + var m map[string]json.RawMessage + _ = json.Unmarshal(out, &m) + + // Flatten Config + delete(m, "config") + for k, v := range p.Config { + out, _ = json.Marshal(v) + m[k] = out + } + + return json.Marshal(m) +} + +// Provisioner represents a provisioner within the template. +type Provisioner struct { + OnlyExcept `mapstructure:",squash" json:",omitempty"` + + Type string `json:"type"` + Config map[string]interface{} `json:"config,omitempty"` + Override map[string]interface{} `json:"override,omitempty"` + PauseBefore time.Duration `mapstructure:"pause_before" json:"pause_before,omitempty"` + MaxRetries string `mapstructure:"max_retries" json:"max_retries,omitempty"` + Timeout time.Duration `mapstructure:"timeout" json:"timeout,omitempty"` +} + +// MarshalJSON conducts the necessary flattening of the Provisioner struct +// to provide valid Packer template JSON +func (p *Provisioner) MarshalJSON() ([]byte, error) { + // Avoid recursion + type Provisioner_ Provisioner + out, _ := json.Marshal(Provisioner_(*p)) + + var m map[string]json.RawMessage + _ = json.Unmarshal(out, &m) + + // Flatten Config + delete(m, "config") + for k, v := range p.Config { + out, _ = json.Marshal(v) + m[k] = out + } + + return json.Marshal(m) +} + +// Push represents the configuration for pushing the template to Atlas. +type Push struct { + Name string + Address string + BaseDir string `mapstructure:"base_dir"` + Include []string + Exclude []string + Token string + VCS bool +} + +// Variable represents a variable within the template +type Variable struct { + Key string + Default string + Required bool +} + +func (v *Variable) MarshalJSON() ([]byte, error) { + if v.Required { + // We use a nil pointer to coax Go into marshalling it as a JSON null + var ret *string + return json.Marshal(ret) + } + + return json.Marshal(v.Default) +} + +// OnlyExcept is a struct that is meant to be embedded that contains the +// logic required for "only" and "except" meta-parameters. +type OnlyExcept struct { + Only []string `json:"only,omitempty"` + Except []string `json:"except,omitempty"` +} + +//------------------------------------------------------------------- +// Functions +//------------------------------------------------------------------- + +// Validate does some basic validation of the template on top of the +// validation that occurs while parsing. If possible, we try to defer +// validation to here. The validation errors that occur during parsing +// are the minimal necessary to make sure parsing builds a reasonable +// Template structure. +func (t *Template) Validate() error { + var err error + + // At least one builder must be defined + if len(t.Builders) == 0 { + err = multierror.Append(err, errors.New( + "at least one builder must be defined")) + } + + // Verify that the provisioner overrides target builders that exist + for i, p := range t.Provisioners { + // Validate only/except + if verr := p.OnlyExcept.Validate(t); verr != nil { + for _, e := range multierror.Append(verr).Errors { + err = multierror.Append(err, fmt.Errorf( + "provisioner %d: %s", i+1, e)) + } + } + + // Validate overrides + for name := range p.Override { + if _, ok := t.Builders[name]; !ok { + err = multierror.Append(err, fmt.Errorf( + "provisioner %d: override '%s' doesn't exist", + i+1, name)) + } + } + } + + // Verify post-processors + for i, chain := range t.PostProcessors { + for j, p := range chain { + // Validate only/except + if verr := p.OnlyExcept.Validate(t); verr != nil { + for _, e := range multierror.Append(verr).Errors { + err = multierror.Append(err, fmt.Errorf( + "post-processor %d.%d: %s", i+1, j+1, e)) + } + } + } + } + + return err +} + +// Skip says whether or not to skip the build with the given name. +func (o *OnlyExcept) Skip(n string) bool { + if len(o.Only) > 0 { + for _, v := range o.Only { + if v == n { + return false + } + } + + return true + } + + if len(o.Except) > 0 { + for _, v := range o.Except { + if v == n { + return true + } + } + + return false + } + + return false +} + +// Validate validates that the OnlyExcept settings are correct for a thing. +func (o *OnlyExcept) Validate(t *Template) error { + if len(o.Only) > 0 && len(o.Except) > 0 { + return errors.New("only one of 'only' or 'except' may be specified") + } + + var err error + for _, n := range o.Only { + if _, ok := t.Builders[n]; !ok { + err = multierror.Append(err, fmt.Errorf( + "'only' specified builder '%s' not found", n)) + } + } + for _, n := range o.Except { + if _, ok := t.Builders[n]; !ok { + err = multierror.Append(err, fmt.Errorf( + "'except' specified builder '%s' not found", n)) + } + } + + return err +} + +//------------------------------------------------------------------- +// GoStringer +//------------------------------------------------------------------- + +func (b *Builder) GoString() string { + return fmt.Sprintf("*%#v", *b) +} + +func (p *Provisioner) GoString() string { + return fmt.Sprintf("*%#v", *p) +} + +func (p *PostProcessor) GoString() string { + return fmt.Sprintf("*%#v", *p) +} + +func (v *Variable) GoString() string { + return fmt.Sprintf("*%#v", *v) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/template.hcl2spec.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/template.hcl2spec.go new file mode 100644 index 000000000..0eb054099 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/template.hcl2spec.go @@ -0,0 +1,45 @@ +// Code generated by "mapstructure-to-hcl2 -type Provisioner"; DO NOT EDIT. + +package template + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/zclconf/go-cty/cty" +) + +// FlatProvisioner is an auto-generated flat version of Provisioner. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatProvisioner struct { + Only []string `json:"only,omitempty" cty:"only" hcl:"only"` + Except []string `json:"except,omitempty" cty:"except" hcl:"except"` + Type *string `json:"type" cty:"type" hcl:"type"` + Config map[string]interface{} `json:"config,omitempty" cty:"config" hcl:"config"` + Override map[string]interface{} `json:"override,omitempty" cty:"override" hcl:"override"` + PauseBefore *string `mapstructure:"pause_before" json:"pause_before,omitempty" cty:"pause_before" hcl:"pause_before"` + MaxRetries *string `mapstructure:"max_retries" json:"max_retries,omitempty" cty:"max_retries" hcl:"max_retries"` + Timeout *string `mapstructure:"timeout" json:"timeout,omitempty" cty:"timeout" hcl:"timeout"` +} + +// FlatMapstructure returns a new FlatProvisioner. +// FlatProvisioner is an auto-generated flat version of Provisioner. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*Provisioner) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatProvisioner) +} + +// HCL2Spec returns the hcl spec of a Provisioner. +// This spec is used by HCL to read the fields of Provisioner. +// The decoded values from this spec will then be applied to a FlatProvisioner. +func (*FlatProvisioner) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "only": &hcldec.AttrSpec{Name: "only", Type: cty.List(cty.String), Required: false}, + "except": &hcldec.AttrSpec{Name: "except", Type: cty.List(cty.String), Required: false}, + "type": &hcldec.AttrSpec{Name: "type", Type: cty.String, Required: false}, + "config": &hcldec.AttrSpec{Name: "config", Type: cty.Map(cty.String), Required: false}, + "override": &hcldec.AttrSpec{Name: "override", Type: cty.Map(cty.String), Required: false}, + "pause_before": &hcldec.AttrSpec{Name: "pause_before", Type: cty.String, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.String, Required: false}, + "timeout": &hcldec.AttrSpec{Name: "timeout", Type: cty.String, Required: false}, + } + return s +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/tmp/tmp.go b/vendor/github.com/hashicorp/packer-plugin-sdk/tmp/tmp.go new file mode 100644 index 000000000..147673b9f --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/tmp/tmp.go @@ -0,0 +1,46 @@ +// Package tmp provides temporary directory helpers. +// +// tmp stores temporary items in the system's +// temporary directory unless a corresponding +// environment variable is set ( see os.TempDir ). +// +// On Unix systems, it uses $TMPDIR if non-empty, else /tmp. +// On Windows, it uses GetTempPath, returning the first non-empty +// value from %TMP%, %TEMP%, %USERPROFILE%, or the Windows directory. +// On Plan 9, it returns /tmp. +// +// The directory is neither guaranteed to exist nor have accessible +// permissions. +package tmp + +import ( + "io/ioutil" + "os" +) + +var tmpDir = os.TempDir() + +// Dir creates a new temporary directory in the system temporary +// directory with a name beginning with prefix and returns the path +// of the new directory. +// Multiple programs calling Dir simultaneously +// will not choose the same directory. +// It is the caller's responsibility +// to remove the file when no longer needed. +func Dir(prefix string) (string, error) { + return ioutil.TempDir(tmpDir, prefix) +} + +// File creates a new temporary file in the system temporary +// directory, opens the file for reading and writing, and +// returns the resulting *os.File. +// The filename is generated by taking pattern and adding a random +// string to the end. If pattern includes a "*", the random string +// replaces the last "*". +// Multiple programs calling File simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func File(pattern string) (*os.File, error) { + return ioutil.TempFile(tmpDir, pattern) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/useragent/useragent.go b/vendor/github.com/hashicorp/packer-plugin-sdk/useragent/useragent.go new file mode 100644 index 000000000..bbb72f47b --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/useragent/useragent.go @@ -0,0 +1,28 @@ +// Package useragent creates a user agent for builders to use when calling out +// to cloud APIs or other addresses. +package useragent + +import ( + "fmt" + "runtime" +) + +var ( + // projectURL is the project URL. + projectURL = "https://www.packer.io/" + + // rt is the runtime - variable for tests. + rt = runtime.Version() + + // goos is the os - variable for tests. + goos = runtime.GOOS + + // goarch is the architecture - variable for tests. + goarch = runtime.GOARCH +) + +// String returns the consistent user-agent string for Packer. +func String(packerVersion string) string { + return fmt.Sprintf("Packer/%s (+%s; %s; %s/%s)", + packerVersion, projectURL, rt, goos, goarch) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/uuid/uuid.go b/vendor/github.com/hashicorp/packer-plugin-sdk/uuid/uuid.go new file mode 100644 index 000000000..bf5d922ab --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/uuid/uuid.go @@ -0,0 +1,25 @@ +// Package uuid provides helper functions for creating time-ordered UUIDs. +package uuid + +import ( + "crypto/rand" + "fmt" + "time" +) + +// Generates a time ordered UUID. Top 32 bits are a timestamp, +// bottom 96 are random. +func TimeOrderedUUID() string { + unix := uint32(time.Now().UTC().Unix()) + + b := make([]byte, 12) + n, err := rand.Read(b) + if n != len(b) { + err = fmt.Errorf("Not enough entropy available") + } + if err != nil { + panic(err) + } + return fmt.Sprintf("%08x-%04x-%04x-%04x-%04x%08x", + unix, b[0:2], b[2:4], b[4:6], b[6:8], b[8:]) +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go b/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go new file mode 100644 index 000000000..345530638 --- /dev/null +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go @@ -0,0 +1,81 @@ +// Package version helps plugin creators set and track the plugin version using +// the same convenience functions used by the Packer core. +package version + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/go-version" +) + +// The git commit that was compiled. This will be filled in by the compiler. +var GitCommit string + +// InitializePluginVersion initializes the SemVer and returns a version var. +// If the provided "version" string is not valid, the call to version.Must +// will panic. Therefore, this function should always be called in a package +// init() function to make sure that plugins are following proper semantic +// versioning and to make sure that plugins which aren't following proper +// semantic versioning crash immediately rather than later. +func InitializePluginVersion(vers, versionPrerelease string) *PluginVersion { + pv := PluginVersion{ + version: vers, + versionPrerelease: versionPrerelease, + } + // This call initializes the SemVer to make sure that if Packer crashes due + // to an invalid SemVer it's at the very beginning of the Packer run. + pv.semVer = version.Must(version.NewVersion(vers)) + return &pv +} + +type PluginVersion struct { + // The main version number that is being run at the moment. + version string + // A pre-release marker for the version. If this is "" (empty string) + // then it means that it is a final release. Otherwise, this is a pre-release + // such as "dev" (in development), "beta", "rc1", etc. + versionPrerelease string + // The Semantic Version of the plugin. Used for version constraint comparisons + semVer *version.Version +} + +func (p *PluginVersion) FormattedVersion() string { + var versionString bytes.Buffer + fmt.Fprintf(&versionString, "%s", p.version) + if p.versionPrerelease != "" { + fmt.Fprintf(&versionString, "-%s", p.versionPrerelease) + + if GitCommit != "" { + fmt.Fprintf(&versionString, " (%s)", GitCommit) + } + } + + return versionString.String() +} + +func (p *PluginVersion) SemVer() *version.Version { + if p.semVer != nil { + // SemVer is an instance of version.Version. This has the secondary + // benefit of verifying during tests and init time that our version is a + // proper semantic version, which should always be the case. + p.semVer = version.Must(version.NewVersion(p.version)) + } + return p.semVer +} + +func (p *PluginVersion) GetVersion() string { + return p.version +} + +func (p *PluginVersion) GetVersionPrerelease() string { + return p.versionPrerelease +} + +// String returns the complete version string, including prerelease +func (p *PluginVersion) String() string { + if p.versionPrerelease != "" { + return fmt.Sprintf("%s-%s", p.version, p.versionPrerelease) + } + return p.version +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fa24c695d..059f67b9a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -84,10 +84,6 @@ github.com/aliyun/alibaba-cloud-sdk-go/services/ecs github.com/aliyun/alibaba-cloud-sdk-go/services/ram # github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20170113022742-e6dbea820a9f github.com/aliyun/aliyun-oss-go-sdk/oss -# github.com/antchfx/xpath v0.0.0-20170728053731-b5c552e1acbd -github.com/antchfx/xpath -# github.com/antchfx/xquery v0.0.0-20170730121040-eb8c3c172607 -github.com/antchfx/xquery/xml # github.com/antihax/optional v1.0.0 github.com/antihax/optional # github.com/apparentlymart/go-cidr v1.0.1 @@ -189,8 +185,6 @@ github.com/digitalocean/godo github.com/dimchansky/utfbom # github.com/dylanmei/iso8601 v0.1.0 github.com/dylanmei/iso8601 -# github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08 -github.com/dylanmei/winrmtest # github.com/exoscale/egoscale v0.18.1 github.com/exoscale/egoscale # github.com/fatih/camelcase v1.0.0 @@ -361,6 +355,45 @@ github.com/hashicorp/hcl/v2/hclparse github.com/hashicorp/hcl/v2/hclsyntax github.com/hashicorp/hcl/v2/hclwrite github.com/hashicorp/hcl/v2/json +# github.com/hashicorp/packer-plugin-sdk v0.0.4 +github.com/hashicorp/packer-plugin-sdk/acctest +github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc +github.com/hashicorp/packer-plugin-sdk/acctest/testutils +github.com/hashicorp/packer-plugin-sdk/adapter +github.com/hashicorp/packer-plugin-sdk/bootcommand +github.com/hashicorp/packer-plugin-sdk/chroot +github.com/hashicorp/packer-plugin-sdk/common +github.com/hashicorp/packer-plugin-sdk/communicator +github.com/hashicorp/packer-plugin-sdk/communicator/ssh +github.com/hashicorp/packer-plugin-sdk/communicator/sshkey +github.com/hashicorp/packer-plugin-sdk/filelock +github.com/hashicorp/packer-plugin-sdk/guestexec +github.com/hashicorp/packer-plugin-sdk/json +github.com/hashicorp/packer-plugin-sdk/multistep +github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps +github.com/hashicorp/packer-plugin-sdk/net +github.com/hashicorp/packer-plugin-sdk/packer +github.com/hashicorp/packer-plugin-sdk/packerbuilderdata +github.com/hashicorp/packer-plugin-sdk/pathing +github.com/hashicorp/packer-plugin-sdk/plugin +github.com/hashicorp/packer-plugin-sdk/random +github.com/hashicorp/packer-plugin-sdk/retry +github.com/hashicorp/packer-plugin-sdk/rpc +github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/none +github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh +github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm +github.com/hashicorp/packer-plugin-sdk/shell +github.com/hashicorp/packer-plugin-sdk/shell-local +github.com/hashicorp/packer-plugin-sdk/shell-local/localexec +github.com/hashicorp/packer-plugin-sdk/shutdowncommand +github.com/hashicorp/packer-plugin-sdk/template +github.com/hashicorp/packer-plugin-sdk/template/config +github.com/hashicorp/packer-plugin-sdk/template/interpolate +github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager +github.com/hashicorp/packer-plugin-sdk/tmp +github.com/hashicorp/packer-plugin-sdk/useragent +github.com/hashicorp/packer-plugin-sdk/uuid +github.com/hashicorp/packer-plugin-sdk/version # github.com/hashicorp/serf v0.9.2 github.com/hashicorp/serf/coordinate # github.com/hashicorp/vault/api v1.0.4