update vendoring
This commit is contained in:
parent
82eb22c8bb
commit
37652b05eb
28
go.mod
28
go.mod
|
@ -34,7 +34,7 @@ require (
|
|||
github.com/digitalocean/go-qemu v0.0.0-20181112162955-dd7bb9c771b8
|
||||
github.com/digitalocean/godo v1.11.1
|
||||
github.com/dylanmei/iso8601 v0.1.0 // indirect
|
||||
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08
|
||||
github.com/dylanmei/winrmtest v0.0.0-20170819153634-c2fbb09e6c08 // indirect
|
||||
github.com/exoscale/egoscale v0.18.1
|
||||
github.com/fatih/camelcase v1.0.0
|
||||
github.com/fatih/structtag v1.0.0
|
||||
|
@ -42,11 +42,11 @@ require (
|
|||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/go-resty/resty/v2 v2.3.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/gofrs/flock v0.7.3
|
||||
github.com/gofrs/flock v0.7.3 // indirect
|
||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
|
||||
github.com/google/go-cmp v0.5.2
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 // indirect
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/gophercloud/gophercloud v0.12.0
|
||||
github.com/gophercloud/utils v0.0.0-20200508015959-b0167b94122c
|
||||
|
@ -54,13 +54,13 @@ require (
|
|||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
||||
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
|
||||
github.com/hashicorp/aws-sdk-go-base v0.6.0
|
||||
github.com/hashicorp/consul/api v1.4.0
|
||||
github.com/hashicorp/consul/api v1.4.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.0.0
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1
|
||||
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
||||
github.com/hashicorp/go-getter/gcs/v2 v2.0.0-20200604122502-a6995fa1edad
|
||||
github.com/hashicorp/go-getter/s3/v2 v2.0.0-20200604122502-a6995fa1edad
|
||||
github.com/hashicorp/go-getter/gcs/v2 v2.0.0-20200604122502-a6995fa1edad // indirect
|
||||
github.com/hashicorp/go-getter/s3/v2 v2.0.0-20200604122502-a6995fa1edad // indirect
|
||||
github.com/hashicorp/go-getter/v2 v2.0.0-20200604122502-a6995fa1edad
|
||||
github.com/hashicorp/go-immutable-radix v1.1.0 // indirect
|
||||
github.com/hashicorp/go-msgpack v0.5.5 // indirect
|
||||
|
@ -73,11 +73,11 @@ require (
|
|||
github.com/hashicorp/packer-plugin-sdk v0.0.4
|
||||
github.com/hashicorp/serf v0.9.2 // indirect
|
||||
github.com/hashicorp/vault/api v1.0.4
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect
|
||||
github.com/hetznercloud/hcloud-go v1.15.1
|
||||
github.com/hyperonecom/h1-client-go v0.0.0-20191203060043-b46280e4c4a4
|
||||
github.com/jdcloud-api/jdcloud-sdk-go v1.9.1-0.20190605102154-3d81a50ca961
|
||||
github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869
|
||||
github.com/jehiah/go-strftime v0.0.0-20171201141054-1d33003b3869 // indirect
|
||||
github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62
|
||||
github.com/json-iterator/go v1.1.6 // indirect
|
||||
github.com/jtolds/gls v4.2.1+incompatible // indirect
|
||||
|
@ -91,12 +91,12 @@ require (
|
|||
github.com/masterzen/winrm v0.0.0-20200615185753-c42b5136ff88
|
||||
github.com/mattn/go-tty v0.0.0-20191112051231-74040eebce08
|
||||
github.com/mitchellh/cli v1.1.0
|
||||
github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7
|
||||
github.com/mitchellh/go-fs v0.0.0-20180402234041-7b48fa161ea7 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/mitchellh/go-testing-interface v1.0.3 // indirect
|
||||
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed
|
||||
github.com/mitchellh/gox v1.0.1 // indirect
|
||||
github.com/mitchellh/iochan v1.0.0
|
||||
github.com/mitchellh/iochan v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.2.3
|
||||
github.com/mitchellh/panicwrap v1.0.0
|
||||
github.com/mitchellh/prefixedio v0.0.0-20151214002211-6e6954073784
|
||||
|
@ -107,13 +107,13 @@ require (
|
|||
github.com/olekukonko/tablewriter v0.0.0-20180105111133-96aac992fc8b
|
||||
github.com/oracle/oci-go-sdk v18.0.0+incompatible
|
||||
github.com/outscale/osc-sdk-go/osc v0.0.0-20200722135656-d654809d0699
|
||||
github.com/packer-community/winrmcp v0.0.0-20180921204643-0fd363d6159a
|
||||
github.com/packer-community/winrmcp v0.0.0-20180921204643-0fd363d6159a // indirect
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v0.0.0-20160118190721-e84cc8c755ca
|
||||
github.com/pkg/sftp v0.0.0-20160118190721-e84cc8c755ca // indirect
|
||||
github.com/posener/complete v1.2.3
|
||||
github.com/profitbricks/profitbricks-sdk-go v4.0.2+incompatible
|
||||
github.com/ryanuber/go-glob v1.0.0
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7
|
||||
github.com/shirou/gopsutil v2.18.12+incompatible
|
||||
|
@ -125,7 +125,7 @@ require (
|
|||
github.com/tencentcloud/tencentcloud-sdk-go v3.0.222+incompatible
|
||||
github.com/ucloud/ucloud-sdk-go v0.16.3
|
||||
github.com/ufilesdk-dev/ufile-gosdk v0.0.0-20190830075812-b4dbc4ef43a6
|
||||
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1
|
||||
github.com/ugorji/go v0.0.0-20151218193438-646ae4a518c1 // indirect
|
||||
github.com/ulikunitz/xz v0.5.5
|
||||
github.com/vmware/govmomi v0.23.1
|
||||
github.com/xanzy/go-cloudstack v0.0.0-20190526095453-42f262b63ed0
|
||||
|
|
|
@ -1,32 +0,0 @@
|
|||
# vscode
|
||||
.vscode
|
||||
debug
|
||||
*.test
|
||||
|
||||
./build
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
|
@ -1,12 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
|
||||
install:
|
||||
- go get github.com/mattn/goveralls
|
||||
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci
|
|
@ -1,17 +0,0 @@
|
|||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -1,119 +0,0 @@
|
|||
XPath
|
||||
====
|
||||
[![GoDoc](https://godoc.org/github.com/antchfx/xpath?status.svg)](https://godoc.org/github.com/antchfx/xpath)
|
||||
[![Coverage Status](https://coveralls.io/repos/github/antchfx/xpath/badge.svg?branch=master)](https://coveralls.io/github/antchfx/xpath?branch=master)
|
||||
[![Build Status](https://travis-ci.org/antchfx/xpath.svg?branch=master)](https://travis-ci.org/antchfx/xpath)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/antchfx/xpath)](https://goreportcard.com/report/github.com/antchfx/xpath)
|
||||
|
||||
XPath is Go package provides selecting nodes from XML, HTML or other documents using XPath expression.
|
||||
|
||||
[XQuery](https://github.com/antchfx/xquery) : lets you extract data from HTML/XML documents using XPath package.
|
||||
|
||||
### Features
|
||||
|
||||
#### The basic XPath patterns.
|
||||
|
||||
> The basic XPath patterns cover 90% of the cases that most stylesheets will need.
|
||||
|
||||
- `node` : Selects all child elements with nodeName of node.
|
||||
|
||||
- `*` : Selects all child elements.
|
||||
|
||||
- `@attr` : Selects the attribute attr.
|
||||
|
||||
- `@*` : Selects all attributes.
|
||||
|
||||
- `node()` : Matches an org.w3c.dom.Node.
|
||||
|
||||
- `text()` : Matches a org.w3c.dom.Text node.
|
||||
|
||||
- `comment()` : Matches a comment.
|
||||
|
||||
- `.` : Selects the current node.
|
||||
|
||||
- `..` : Selects the parent of current node.
|
||||
|
||||
- `/` : Selects the document node.
|
||||
|
||||
- `a[expr]` : Select only those nodes matching a which also satisfy the expression expr.
|
||||
|
||||
- `a[n]` : Selects the nth matching node matching a When a filter's expression is a number, XPath selects based on position.
|
||||
|
||||
- `a/b` : For each node matching a, add the nodes matching b to the result.
|
||||
|
||||
- `a//b` : For each node matching a, add the descendant nodes matching b to the result.
|
||||
|
||||
- `//b` : Returns elements in the entire document matching b.
|
||||
|
||||
- `a|b` : All nodes matching a or b.
|
||||
|
||||
#### Node Axes
|
||||
|
||||
- `child::*` : The child axis selects children of the current node.
|
||||
|
||||
- `descendant::*` : The descendant axis selects descendants of the current node. It is equivalent to '//'.
|
||||
|
||||
- `descendant-or-self::*` : Selects descendants including the current node.
|
||||
|
||||
- `attribute::*` : Selects attributes of the current element. It is equivalent to @*
|
||||
|
||||
- `following-sibling::*` : Selects nodes after the current node.
|
||||
|
||||
- `preceding-sibling::*` : Selects nodes before the current node.
|
||||
|
||||
- `following::*` : Selects the first matching node following in document order, excluding descendants.
|
||||
|
||||
- `preceding::*` : Selects the first matching node preceding in document order, excluding ancestors.
|
||||
|
||||
- `parent::*` : Selects the parent if it matches. The '..' pattern from the core is equivalent to 'parent::node()'.
|
||||
|
||||
- `ancestor::*` : Selects matching ancestors.
|
||||
|
||||
- `ancestor-or-self::*` : Selects ancestors including the current node.
|
||||
|
||||
- `self::*` : Selects the current node. '.' is equivalent to 'self::node()'.
|
||||
|
||||
#### Expressions
|
||||
|
||||
The gxpath supported three types: number, boolean, string.
|
||||
|
||||
- `path` : Selects nodes based on the path.
|
||||
|
||||
- `a = b` : Standard comparisons.
|
||||
|
||||
* a = b True if a equals b.
|
||||
* a != b True if a is not equal to b.
|
||||
* a < b True if a is less than b.
|
||||
* a <= b True if a is less than or equal to b.
|
||||
* a > b True if a is greater than b.
|
||||
* a >= b True if a is greater than or equal to b.
|
||||
|
||||
- `a + b` : Arithmetic expressions.
|
||||
|
||||
* `- a` Unary minus
|
||||
* a + b Add
|
||||
* a - b Substract
|
||||
* a * b Multiply
|
||||
* a div b Divide
|
||||
* a mod b Floating point mod, like Java.
|
||||
|
||||
- `(expr)` : Parenthesized expressions.
|
||||
|
||||
- `fun(arg1, ..., argn)` : Function calls.
|
||||
|
||||
* position()
|
||||
* last()
|
||||
* count( node-set )
|
||||
* name()
|
||||
* starts-with( string, string )
|
||||
* normalize-space( string )
|
||||
* substring( string , start [, length] )
|
||||
* not( expression )
|
||||
* string-length( [string] )
|
||||
* contains( string, string )
|
||||
* sum( node-set )
|
||||
* concat( string1 , string2 [, stringn]* )
|
||||
|
||||
- `a or b` : Boolean or.
|
||||
|
||||
- `a and b` : Boolean and.
|
|
@ -1,359 +0,0 @@
|
|||
package xpath
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type flag int
|
||||
|
||||
const (
|
||||
noneFlag flag = iota
|
||||
filterFlag
|
||||
)
|
||||
|
||||
// builder provides building an XPath expressions.
|
||||
type builder struct {
|
||||
depth int
|
||||
flag flag
|
||||
firstInput query
|
||||
}
|
||||
|
||||
// axisPredicate creates a predicate to predicating for this axis node.
|
||||
func axisPredicate(root *axisNode) func(NodeNavigator) bool {
|
||||
// get current axix node type.
|
||||
typ := ElementNode
|
||||
if root.AxeType == "attribute" {
|
||||
typ = AttributeNode
|
||||
} else {
|
||||
switch root.Prop {
|
||||
case "comment":
|
||||
typ = CommentNode
|
||||
case "text":
|
||||
typ = TextNode
|
||||
// case "processing-instruction":
|
||||
// typ = ProcessingInstructionNode
|
||||
case "node":
|
||||
typ = ElementNode
|
||||
}
|
||||
}
|
||||
predicate := func(n NodeNavigator) bool {
|
||||
if typ == n.NodeType() || typ == TextNode {
|
||||
if root.LocalName == "" || (root.LocalName == n.LocalName() && root.Prefix == n.Prefix()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return predicate
|
||||
}
|
||||
|
||||
// processAxisNode processes a query for the XPath axis node.
|
||||
func (b *builder) processAxisNode(root *axisNode) (query, error) {
|
||||
var (
|
||||
err error
|
||||
qyInput query
|
||||
qyOutput query
|
||||
predicate = axisPredicate(root)
|
||||
)
|
||||
|
||||
if root.Input == nil {
|
||||
qyInput = &contextQuery{}
|
||||
} else {
|
||||
if b.flag&filterFlag == 0 {
|
||||
if root.AxeType == "child" && (root.Input.Type() == nodeAxis) {
|
||||
if input := root.Input.(*axisNode); input.AxeType == "descendant-or-self" {
|
||||
var qyGrandInput query
|
||||
if input.Input != nil {
|
||||
qyGrandInput, _ = b.processNode(input.Input)
|
||||
} else {
|
||||
qyGrandInput = &contextQuery{}
|
||||
}
|
||||
qyOutput = &descendantQuery{Input: qyGrandInput, Predicate: predicate, Self: true}
|
||||
return qyOutput, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
qyInput, err = b.processNode(root.Input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
switch root.AxeType {
|
||||
case "ancestor":
|
||||
qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate}
|
||||
case "ancestor-or-self":
|
||||
qyOutput = &ancestorQuery{Input: qyInput, Predicate: predicate, Self: true}
|
||||
case "attribute":
|
||||
qyOutput = &attributeQuery{Input: qyInput, Predicate: predicate}
|
||||
case "child":
|
||||
filter := func(n NodeNavigator) bool {
|
||||
v := predicate(n)
|
||||
switch root.Prop {
|
||||
case "text":
|
||||
v = v && n.NodeType() == TextNode
|
||||
case "node":
|
||||
v = v && (n.NodeType() == ElementNode || n.NodeType() == TextNode)
|
||||
case "comment":
|
||||
v = v && n.NodeType() == CommentNode
|
||||
}
|
||||
return v
|
||||
}
|
||||
qyOutput = &childQuery{Input: qyInput, Predicate: filter}
|
||||
case "descendant":
|
||||
qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate}
|
||||
case "descendant-or-self":
|
||||
qyOutput = &descendantQuery{Input: qyInput, Predicate: predicate, Self: true}
|
||||
case "following":
|
||||
qyOutput = &followingQuery{Input: qyInput, Predicate: predicate}
|
||||
case "following-sibling":
|
||||
qyOutput = &followingQuery{Input: qyInput, Predicate: predicate, Sibling: true}
|
||||
case "parent":
|
||||
qyOutput = &parentQuery{Input: qyInput, Predicate: predicate}
|
||||
case "preceding":
|
||||
qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate}
|
||||
case "preceding-sibling":
|
||||
qyOutput = &precedingQuery{Input: qyInput, Predicate: predicate, Sibling: true}
|
||||
case "self":
|
||||
qyOutput = &selfQuery{Input: qyInput, Predicate: predicate}
|
||||
case "namespace":
|
||||
// haha,what will you do someting??
|
||||
default:
|
||||
err = fmt.Errorf("unknown axe type: %s", root.AxeType)
|
||||
return nil, err
|
||||
}
|
||||
return qyOutput, nil
|
||||
}
|
||||
|
||||
// processFilterNode builds query for the XPath filter predicate.
|
||||
func (b *builder) processFilterNode(root *filterNode) (query, error) {
|
||||
b.flag |= filterFlag
|
||||
|
||||
qyInput, err := b.processNode(root.Input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyCond, err := b.processNode(root.Condition)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput := &filterQuery{Input: qyInput, Predicate: qyCond}
|
||||
return qyOutput, nil
|
||||
}
|
||||
|
||||
// processFunctionNode processes query for the XPath function node.
|
||||
func (b *builder) processFunctionNode(root *functionNode) (query, error) {
|
||||
var qyOutput query
|
||||
switch root.FuncName {
|
||||
case "starts-with":
|
||||
arg1, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arg2, err := b.processNode(root.Args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: startwithFunc(arg1, arg2)}
|
||||
case "contains":
|
||||
arg1, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arg2, err := b.processNode(root.Args[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: containsFunc(arg1, arg2)}
|
||||
case "substring":
|
||||
//substring( string , start [, length] )
|
||||
if len(root.Args) < 2 {
|
||||
return nil, errors.New("xpath: substring function must have at least two parameter")
|
||||
}
|
||||
var (
|
||||
arg1, arg2, arg3 query
|
||||
err error
|
||||
)
|
||||
if arg1, err = b.processNode(root.Args[0]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if arg2, err = b.processNode(root.Args[1]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(root.Args) == 3 {
|
||||
if arg3, err = b.processNode(root.Args[2]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: substringFunc(arg1, arg2, arg3)}
|
||||
case "string-length":
|
||||
// string-length( [string] )
|
||||
if len(root.Args) < 1 {
|
||||
return nil, errors.New("xpath: string-length function must have at least one parameter")
|
||||
}
|
||||
arg1, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: stringLengthFunc(arg1)}
|
||||
case "normalize-space":
|
||||
if len(root.Args) == 0 {
|
||||
return nil, errors.New("xpath: normalize-space function must have at least one parameter")
|
||||
}
|
||||
argQuery, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: argQuery, Func: normalizespaceFunc}
|
||||
case "not":
|
||||
if len(root.Args) == 0 {
|
||||
return nil, errors.New("xpath: not function must have at least one parameter")
|
||||
}
|
||||
argQuery, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: argQuery, Func: notFunc}
|
||||
case "name":
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: nameFunc}
|
||||
case "last":
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: lastFunc}
|
||||
case "position":
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: positionFunc}
|
||||
case "count":
|
||||
//if b.firstInput == nil {
|
||||
// return nil, errors.New("xpath: expression must evaluate to node-set")
|
||||
//}
|
||||
if len(root.Args) == 0 {
|
||||
return nil, fmt.Errorf("xpath: count(node-sets) function must with have parameters node-sets")
|
||||
}
|
||||
argQuery, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: argQuery, Func: countFunc}
|
||||
case "sum":
|
||||
if len(root.Args) == 0 {
|
||||
return nil, fmt.Errorf("xpath: sum(node-sets) function must with have parameters node-sets")
|
||||
}
|
||||
argQuery, err := b.processNode(root.Args[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qyOutput = &functionQuery{Input: argQuery, Func: sumFunc}
|
||||
case "concat":
|
||||
if len(root.Args) < 2 {
|
||||
return nil, fmt.Errorf("xpath: concat() must have at least two arguments")
|
||||
}
|
||||
var args []query
|
||||
for _, v := range root.Args {
|
||||
q, err := b.processNode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
args = append(args, q)
|
||||
}
|
||||
qyOutput = &functionQuery{Input: b.firstInput, Func: concatFunc(args...)}
|
||||
default:
|
||||
return nil, fmt.Errorf("not yet support this function %s()", root.FuncName)
|
||||
}
|
||||
return qyOutput, nil
|
||||
}
|
||||
|
||||
func (b *builder) processOperatorNode(root *operatorNode) (query, error) {
|
||||
left, err := b.processNode(root.Left)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
right, err := b.processNode(root.Right)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var qyOutput query
|
||||
switch root.Op {
|
||||
case "+", "-", "div", "mod": // Numeric operator
|
||||
var exprFunc func(interface{}, interface{}) interface{}
|
||||
switch root.Op {
|
||||
case "+":
|
||||
exprFunc = plusFunc
|
||||
case "-":
|
||||
exprFunc = minusFunc
|
||||
case "div":
|
||||
exprFunc = divFunc
|
||||
case "mod":
|
||||
exprFunc = modFunc
|
||||
}
|
||||
qyOutput = &numericQuery{Left: left, Right: right, Do: exprFunc}
|
||||
case "=", ">", ">=", "<", "<=", "!=":
|
||||
var exprFunc func(iterator, interface{}, interface{}) interface{}
|
||||
switch root.Op {
|
||||
case "=":
|
||||
exprFunc = eqFunc
|
||||
case ">":
|
||||
exprFunc = gtFunc
|
||||
case ">=":
|
||||
exprFunc = geFunc
|
||||
case "<":
|
||||
exprFunc = ltFunc
|
||||
case "<=":
|
||||
exprFunc = leFunc
|
||||
case "!=":
|
||||
exprFunc = neFunc
|
||||
}
|
||||
qyOutput = &logicalQuery{Left: left, Right: right, Do: exprFunc}
|
||||
case "or", "and", "|":
|
||||
isOr := false
|
||||
if root.Op == "or" || root.Op == "|" {
|
||||
isOr = true
|
||||
}
|
||||
qyOutput = &booleanQuery{Left: left, Right: right, IsOr: isOr}
|
||||
}
|
||||
return qyOutput, nil
|
||||
}
|
||||
|
||||
func (b *builder) processNode(root node) (q query, err error) {
|
||||
if b.depth = b.depth + 1; b.depth > 1024 {
|
||||
err = errors.New("the xpath expressions is too complex")
|
||||
return
|
||||
}
|
||||
|
||||
switch root.Type() {
|
||||
case nodeConstantOperand:
|
||||
n := root.(*operandNode)
|
||||
q = &constantQuery{Val: n.Val}
|
||||
case nodeRoot:
|
||||
q = &contextQuery{Root: true}
|
||||
case nodeAxis:
|
||||
q, err = b.processAxisNode(root.(*axisNode))
|
||||
b.firstInput = q
|
||||
case nodeFilter:
|
||||
q, err = b.processFilterNode(root.(*filterNode))
|
||||
case nodeFunction:
|
||||
q, err = b.processFunctionNode(root.(*functionNode))
|
||||
case nodeOperator:
|
||||
q, err = b.processOperatorNode(root.(*operatorNode))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// build builds a specified XPath expressions expr.
|
||||
func build(expr string) (q query, err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
switch x := e.(type) {
|
||||
case string:
|
||||
err = errors.New(x)
|
||||
case error:
|
||||
err = x
|
||||
default:
|
||||
err = errors.New("unknown panic")
|
||||
}
|
||||
}
|
||||
}()
|
||||
root := parse(expr)
|
||||
b := &builder{}
|
||||
return b.processNode(root)
|
||||
}
|
|
@ -1,254 +0,0 @@
|
|||
package xpath
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The XPath function list.
|
||||
|
||||
func predicate(q query) func(NodeNavigator) bool {
|
||||
type Predicater interface {
|
||||
Test(NodeNavigator) bool
|
||||
}
|
||||
if p, ok := q.(Predicater); ok {
|
||||
return p.Test
|
||||
}
|
||||
return func(NodeNavigator) bool { return true }
|
||||
}
|
||||
|
||||
// positionFunc is a XPath Node Set functions position().
|
||||
func positionFunc(q query, t iterator) interface{} {
|
||||
var (
|
||||
count = 1
|
||||
node = t.Current()
|
||||
)
|
||||
test := predicate(q)
|
||||
for node.MoveToPrevious() {
|
||||
if test(node) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return float64(count)
|
||||
}
|
||||
|
||||
// lastFunc is a XPath Node Set functions last().
|
||||
func lastFunc(q query, t iterator) interface{} {
|
||||
var (
|
||||
count = 0
|
||||
node = t.Current()
|
||||
)
|
||||
node.MoveToFirst()
|
||||
test := predicate(q)
|
||||
for {
|
||||
if test(node) {
|
||||
count++
|
||||
}
|
||||
if !node.MoveToNext() {
|
||||
break
|
||||
}
|
||||
}
|
||||
return float64(count)
|
||||
}
|
||||
|
||||
// countFunc is a XPath Node Set functions count(node-set).
|
||||
func countFunc(q query, t iterator) interface{} {
|
||||
var count = 0
|
||||
test := predicate(q)
|
||||
switch typ := q.Evaluate(t).(type) {
|
||||
case query:
|
||||
for node := typ.Select(t); node != nil; node = typ.Select(t) {
|
||||
if test(node) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
return float64(count)
|
||||
}
|
||||
|
||||
// sumFunc is a XPath Node Set functions sum(node-set).
|
||||
func sumFunc(q query, t iterator) interface{} {
|
||||
var sum float64
|
||||
switch typ := q.Evaluate(t).(type) {
|
||||
case query:
|
||||
for node := typ.Select(t); node != nil; node = typ.Select(t) {
|
||||
if v, err := strconv.ParseFloat(node.Value(), 64); err == nil {
|
||||
sum += v
|
||||
}
|
||||
}
|
||||
case float64:
|
||||
sum = typ
|
||||
case string:
|
||||
if v, err := strconv.ParseFloat(typ, 64); err != nil {
|
||||
sum = v
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
// nameFunc is a XPath functions name([node-set]).
|
||||
func nameFunc(q query, t iterator) interface{} {
|
||||
return t.Current().LocalName()
|
||||
}
|
||||
|
||||
// startwithFunc is a XPath functions starts-with(string, string).
|
||||
func startwithFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
var (
|
||||
m, n string
|
||||
ok bool
|
||||
)
|
||||
switch typ := arg1.Evaluate(t).(type) {
|
||||
case string:
|
||||
m = typ
|
||||
case query:
|
||||
node := typ.Select(t)
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
m = node.Value()
|
||||
default:
|
||||
panic(errors.New("starts-with() function argument type must be string"))
|
||||
}
|
||||
n, ok = arg2.Evaluate(t).(string)
|
||||
if !ok {
|
||||
panic(errors.New("starts-with() function argument type must be string"))
|
||||
}
|
||||
return strings.HasPrefix(m, n)
|
||||
}
|
||||
}
|
||||
|
||||
// containsFunc is a XPath functions contains(string or @attr, string).
|
||||
func containsFunc(arg1, arg2 query) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
var (
|
||||
m, n string
|
||||
ok bool
|
||||
)
|
||||
|
||||
switch typ := arg1.Evaluate(t).(type) {
|
||||
case string:
|
||||
m = typ
|
||||
case query:
|
||||
node := typ.Select(t)
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
m = node.Value()
|
||||
default:
|
||||
panic(errors.New("contains() function argument type must be string"))
|
||||
}
|
||||
|
||||
n, ok = arg2.Evaluate(t).(string)
|
||||
if !ok {
|
||||
panic(errors.New("contains() function argument type must be string"))
|
||||
}
|
||||
|
||||
return strings.Contains(m, n)
|
||||
}
|
||||
}
|
||||
|
||||
// normalizespaceFunc is XPath functions normalize-space(string?)
|
||||
func normalizespaceFunc(q query, t iterator) interface{} {
|
||||
var m string
|
||||
switch typ := q.Evaluate(t).(type) {
|
||||
case string:
|
||||
m = typ
|
||||
case query:
|
||||
node := typ.Select(t)
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
m = node.Value()
|
||||
}
|
||||
return strings.TrimSpace(m)
|
||||
}
|
||||
|
||||
// substringFunc is XPath functions substring function returns a part of a given string.
|
||||
func substringFunc(arg1, arg2, arg3 query) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
var m string
|
||||
switch typ := arg1.Evaluate(t).(type) {
|
||||
case string:
|
||||
m = typ
|
||||
case query:
|
||||
node := typ.Select(t)
|
||||
if node == nil {
|
||||
return false
|
||||
}
|
||||
m = node.Value()
|
||||
}
|
||||
|
||||
var start, length float64
|
||||
var ok bool
|
||||
|
||||
if start, ok = arg2.Evaluate(t).(float64); !ok {
|
||||
panic(errors.New("substring() function first argument type must be int"))
|
||||
}
|
||||
if arg3 != nil {
|
||||
if length, ok = arg3.Evaluate(t).(float64); !ok {
|
||||
panic(errors.New("substring() function second argument type must be int"))
|
||||
}
|
||||
}
|
||||
if (len(m) - int(start)) < int(length) {
|
||||
panic(errors.New("substring() function start and length argument out of range"))
|
||||
}
|
||||
if length > 0 {
|
||||
return m[int(start):int(length+start)]
|
||||
}
|
||||
return m[int(start):]
|
||||
}
|
||||
}
|
||||
|
||||
// stringLengthFunc is XPATH string-length( [string] ) function that returns a number
|
||||
// equal to the number of characters in a given string.
|
||||
func stringLengthFunc(arg1 query) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
switch v := arg1.Evaluate(t).(type) {
|
||||
case string:
|
||||
return float64(len(v))
|
||||
case query:
|
||||
node := v.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
return float64(len(node.Value()))
|
||||
}
|
||||
return float64(0)
|
||||
}
|
||||
}
|
||||
|
||||
// notFunc is XPATH functions not(expression) function operation.
|
||||
func notFunc(q query, t iterator) interface{} {
|
||||
switch v := q.Evaluate(t).(type) {
|
||||
case bool:
|
||||
return !v
|
||||
case query:
|
||||
node := v.Select(t)
|
||||
return node == nil
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// concatFunc is the concat function concatenates two or more
|
||||
// strings and returns the resulting string.
|
||||
// concat( string1 , string2 [, stringn]* )
|
||||
func concatFunc(args ...query) func(query, iterator) interface{} {
|
||||
return func(q query, t iterator) interface{} {
|
||||
var a []string
|
||||
for _, v := range args {
|
||||
switch v := v.Evaluate(t).(type) {
|
||||
case string:
|
||||
a = append(a, v)
|
||||
case query:
|
||||
node := v.Select(t)
|
||||
if node != nil {
|
||||
a = append(a, node.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
return strings.Join(a, "")
|
||||
}
|
||||
}
|
|
@ -1,295 +0,0 @@
|
|||
package xpath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// The XPath number operator function list.
|
||||
|
||||
// valueType is a return value type.
|
||||
type valueType int
|
||||
|
||||
const (
|
||||
booleanType valueType = iota
|
||||
numberType
|
||||
stringType
|
||||
nodeSetType
|
||||
)
|
||||
|
||||
func getValueType(i interface{}) valueType {
|
||||
v := reflect.ValueOf(i)
|
||||
switch v.Kind() {
|
||||
case reflect.Float64:
|
||||
return numberType
|
||||
case reflect.String:
|
||||
return stringType
|
||||
case reflect.Bool:
|
||||
return booleanType
|
||||
default:
|
||||
if _, ok := i.(query); ok {
|
||||
return nodeSetType
|
||||
}
|
||||
}
|
||||
panic(fmt.Errorf("xpath unknown value type: %v", v.Kind()))
|
||||
}
|
||||
|
||||
type logical func(iterator, string, interface{}, interface{}) bool
|
||||
|
||||
var logicalFuncs = [][]logical{
|
||||
{cmpBooleanBoolean, nil, nil, nil},
|
||||
{nil, cmpNumericNumeric, cmpNumericString, cmpNumericNodeSet},
|
||||
{nil, cmpStringNumeric, cmpStringString, cmpStringNodeSet},
|
||||
{nil, cmpNodeSetNumeric, cmpNodeSetString, cmpNodeSetNodeSet},
|
||||
}
|
||||
|
||||
// number vs number
|
||||
func cmpNumberNumberF(op string, a, b float64) bool {
|
||||
switch op {
|
||||
case "=":
|
||||
return a == b
|
||||
case ">":
|
||||
return a > b
|
||||
case "<":
|
||||
return a < b
|
||||
case ">=":
|
||||
return a >= b
|
||||
case "<=":
|
||||
return a <= b
|
||||
case "!=":
|
||||
return a != b
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// string vs string
|
||||
func cmpStringStringF(op string, a, b string) bool {
|
||||
switch op {
|
||||
case "=":
|
||||
return a == b
|
||||
case ">":
|
||||
return a > b
|
||||
case "<":
|
||||
return a < b
|
||||
case ">=":
|
||||
return a >= b
|
||||
case "<=":
|
||||
return a <= b
|
||||
case "!=":
|
||||
return a != b
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cmpBooleanBooleanF(op string, a, b bool) bool {
|
||||
switch op {
|
||||
case "or":
|
||||
return a || b
|
||||
case "and":
|
||||
return a && b
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cmpNumericNumeric(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(float64)
|
||||
b := n.(float64)
|
||||
return cmpNumberNumberF(op, a, b)
|
||||
}
|
||||
|
||||
func cmpNumericString(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(float64)
|
||||
b := n.(string)
|
||||
num, err := strconv.ParseFloat(b, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return cmpNumberNumberF(op, a, num)
|
||||
}
|
||||
|
||||
func cmpNumericNodeSet(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(float64)
|
||||
b := n.(query)
|
||||
|
||||
for {
|
||||
node := b.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
num, err := strconv.ParseFloat(node.Value(), 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if cmpNumberNumberF(op, a, num) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cmpNodeSetNumeric(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(query)
|
||||
b := n.(float64)
|
||||
for {
|
||||
node := a.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
num, err := strconv.ParseFloat(node.Value(), 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if cmpNumberNumberF(op, num, b) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cmpNodeSetString(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(query)
|
||||
b := n.(string)
|
||||
for {
|
||||
node := a.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
if cmpStringStringF(op, b, node.Value()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cmpNodeSetNodeSet(t iterator, op string, m, n interface{}) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func cmpStringNumeric(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(string)
|
||||
b := n.(float64)
|
||||
num, err := strconv.ParseFloat(a, 64)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return cmpNumberNumberF(op, b, num)
|
||||
}
|
||||
|
||||
func cmpStringString(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(string)
|
||||
b := n.(string)
|
||||
return cmpStringStringF(op, a, b)
|
||||
}
|
||||
|
||||
func cmpStringNodeSet(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(string)
|
||||
b := n.(query)
|
||||
for {
|
||||
node := b.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
if cmpStringStringF(op, a, node.Value()) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func cmpBooleanBoolean(t iterator, op string, m, n interface{}) bool {
|
||||
a := m.(bool)
|
||||
b := n.(bool)
|
||||
return cmpBooleanBooleanF(op, a, b)
|
||||
}
|
||||
|
||||
// eqFunc is an `=` operator.
|
||||
func eqFunc(t iterator, m, n interface{}) interface{} {
|
||||
t1 := getValueType(m)
|
||||
t2 := getValueType(n)
|
||||
return logicalFuncs[t1][t2](t, "=", m, n)
|
||||
}
|
||||
|
||||
// gtFunc is an `>` operator.
|
||||
func gtFunc(t iterator, m, n interface{}) interface{} {
|
||||
t1 := getValueType(m)
|
||||
t2 := getValueType(n)
|
||||
return logicalFuncs[t1][t2](t, ">", m, n)
|
||||
}
|
||||
|
||||
// geFunc is an `>=` operator.
|
||||
func geFunc(t iterator, m, n interface{}) interface{} {
|
||||
t1 := getValueType(m)
|
||||
t2 := getValueType(n)
|
||||
return logicalFuncs[t1][t2](t, ">=", m, n)
|
||||
}
|
||||
|
||||
// ltFunc is an `<` operator.
|
||||
func ltFunc(t iterator, m, n interface{}) interface{} {
|
||||
t1 := getValueType(m)
|
||||
t2 := getValueType(n)
|
||||
return logicalFuncs[t1][t2](t, "<", m, n)
|
||||
}
|
||||
|
||||
// leFunc is an `<=` operator.
|
||||
func leFunc(t iterator, m, n interface{}) interface{} {
|
||||
t1 := getValueType(m)
|
||||
t2 := getValueType(n)
|
||||
return logicalFuncs[t1][t2](t, "<=", m, n)
|
||||
}
|
||||
|
||||
// neFunc is an `!=` operator.
|
||||
func neFunc(t iterator, m, n interface{}) interface{} {
|
||||
t1 := getValueType(m)
|
||||
t2 := getValueType(n)
|
||||
return logicalFuncs[t1][t2](t, "!=", m, n)
|
||||
}
|
||||
|
||||
// orFunc is an `or` operator.
|
||||
var orFunc = func(t iterator, m, n interface{}) interface{} {
|
||||
t1 := getValueType(m)
|
||||
t2 := getValueType(n)
|
||||
return logicalFuncs[t1][t2](t, "or", m, n)
|
||||
}
|
||||
|
||||
func numericExpr(m, n interface{}, cb func(float64, float64) float64) float64 {
|
||||
typ := reflect.TypeOf(float64(0))
|
||||
a := reflect.ValueOf(m).Convert(typ)
|
||||
b := reflect.ValueOf(n).Convert(typ)
|
||||
return cb(a.Float(), b.Float())
|
||||
}
|
||||
|
||||
// plusFunc is an `+` operator.
|
||||
var plusFunc = func(m, n interface{}) interface{} {
|
||||
return numericExpr(m, n, func(a, b float64) float64 {
|
||||
return a + b
|
||||
})
|
||||
}
|
||||
|
||||
// minusFunc is an `-` operator.
|
||||
var minusFunc = func(m, n interface{}) interface{} {
|
||||
return numericExpr(m, n, func(a, b float64) float64 {
|
||||
return a - b
|
||||
})
|
||||
}
|
||||
|
||||
// mulFunc is an `*` operator.
|
||||
var mulFunc = func(m, n interface{}) interface{} {
|
||||
return numericExpr(m, n, func(a, b float64) float64 {
|
||||
return a * b
|
||||
})
|
||||
}
|
||||
|
||||
// divFunc is an `DIV` operator.
|
||||
var divFunc = func(m, n interface{}) interface{} {
|
||||
return numericExpr(m, n, func(a, b float64) float64 {
|
||||
return a / b
|
||||
})
|
||||
}
|
||||
|
||||
// modFunc is an 'MOD' operator.
|
||||
var modFunc = func(m, n interface{}) interface{} {
|
||||
return numericExpr(m, n, func(a, b float64) float64 {
|
||||
return float64(int(a) % int(b))
|
||||
})
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,728 +0,0 @@
|
|||
package xpath
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type iterator interface {
|
||||
Current() NodeNavigator
|
||||
}
|
||||
|
||||
// An XPath query interface.
|
||||
type query interface {
|
||||
// Select traversing iterator returns a query matched node NodeNavigator.
|
||||
Select(iterator) NodeNavigator
|
||||
|
||||
// Evaluate evaluates query and returns values of the current query.
|
||||
Evaluate(iterator) interface{}
|
||||
|
||||
Clone() query
|
||||
}
|
||||
|
||||
// contextQuery is returns current node on the iterator object query.
|
||||
type contextQuery struct {
|
||||
count int
|
||||
Root bool // Moving to root-level node in the current context iterator.
|
||||
}
|
||||
|
||||
func (c *contextQuery) Select(t iterator) (n NodeNavigator) {
|
||||
if c.count == 0 {
|
||||
c.count++
|
||||
n = t.Current().Copy()
|
||||
if c.Root {
|
||||
n.MoveToRoot()
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (c *contextQuery) Evaluate(iterator) interface{} {
|
||||
c.count = 0
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *contextQuery) Clone() query {
|
||||
return &contextQuery{count: 0, Root: c.Root}
|
||||
}
|
||||
|
||||
// ancestorQuery is an XPath ancestor node query.(ancestor::*|ancestor-self::*)
|
||||
type ancestorQuery struct {
|
||||
iterator func() NodeNavigator
|
||||
|
||||
Self bool
|
||||
Input query
|
||||
Predicate func(NodeNavigator) bool
|
||||
}
|
||||
|
||||
func (a *ancestorQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
if a.iterator == nil {
|
||||
node := a.Input.Select(t)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
first := true
|
||||
a.iterator = func() NodeNavigator {
|
||||
if first && a.Self {
|
||||
first = false
|
||||
if a.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
for node.MoveToParent() {
|
||||
if !a.Predicate(node) {
|
||||
break
|
||||
}
|
||||
return node
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if node := a.iterator(); node != nil {
|
||||
return node
|
||||
}
|
||||
a.iterator = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ancestorQuery) Evaluate(t iterator) interface{} {
|
||||
a.Input.Evaluate(t)
|
||||
a.iterator = nil
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *ancestorQuery) Test(n NodeNavigator) bool {
|
||||
return a.Predicate(n)
|
||||
}
|
||||
|
||||
func (a *ancestorQuery) Clone() query {
|
||||
return &ancestorQuery{Self: a.Self, Input: a.Input.Clone(), Predicate: a.Predicate}
|
||||
}
|
||||
|
||||
// attributeQuery is an XPath attribute node query.(@*)
|
||||
type attributeQuery struct {
|
||||
iterator func() NodeNavigator
|
||||
|
||||
Input query
|
||||
Predicate func(NodeNavigator) bool
|
||||
}
|
||||
|
||||
func (a *attributeQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
if a.iterator == nil {
|
||||
node := a.Input.Select(t)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node = node.Copy()
|
||||
a.iterator = func() NodeNavigator {
|
||||
for {
|
||||
onAttr := node.MoveToNextAttribute()
|
||||
if !onAttr {
|
||||
return nil
|
||||
}
|
||||
if a.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node := a.iterator(); node != nil {
|
||||
return node
|
||||
}
|
||||
a.iterator = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (a *attributeQuery) Evaluate(t iterator) interface{} {
|
||||
a.Input.Evaluate(t)
|
||||
a.iterator = nil
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *attributeQuery) Test(n NodeNavigator) bool {
|
||||
return a.Predicate(n)
|
||||
}
|
||||
|
||||
func (a *attributeQuery) Clone() query {
|
||||
return &attributeQuery{Input: a.Input.Clone(), Predicate: a.Predicate}
|
||||
}
|
||||
|
||||
// childQuery is an XPath child node query.(child::*)
|
||||
type childQuery struct {
|
||||
posit int
|
||||
iterator func() NodeNavigator
|
||||
|
||||
Input query
|
||||
Predicate func(NodeNavigator) bool
|
||||
}
|
||||
|
||||
func (c *childQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
if c.iterator == nil {
|
||||
c.posit = 0
|
||||
node := c.Input.Select(t)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node = node.Copy()
|
||||
first := true
|
||||
c.iterator = func() NodeNavigator {
|
||||
for {
|
||||
if (first && !node.MoveToChild()) || (!first && !node.MoveToNext()) {
|
||||
return nil
|
||||
}
|
||||
first = false
|
||||
if c.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node := c.iterator(); node != nil {
|
||||
c.posit++
|
||||
return node
|
||||
}
|
||||
c.iterator = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (c *childQuery) Evaluate(t iterator) interface{} {
|
||||
c.Input.Evaluate(t)
|
||||
c.iterator = nil
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *childQuery) Test(n NodeNavigator) bool {
|
||||
return c.Predicate(n)
|
||||
}
|
||||
|
||||
func (c *childQuery) Clone() query {
|
||||
return &childQuery{Input: c.Input.Clone(), Predicate: c.Predicate}
|
||||
}
|
||||
|
||||
// position returns a position of current NodeNavigator.
|
||||
func (c *childQuery) position() int {
|
||||
return c.posit
|
||||
}
|
||||
|
||||
// descendantQuery is an XPath descendant node query.(descendant::* | descendant-or-self::*)
|
||||
type descendantQuery struct {
|
||||
iterator func() NodeNavigator
|
||||
posit int
|
||||
|
||||
Self bool
|
||||
Input query
|
||||
Predicate func(NodeNavigator) bool
|
||||
}
|
||||
|
||||
func (d *descendantQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
if d.iterator == nil {
|
||||
d.posit = 0
|
||||
node := d.Input.Select(t)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node = node.Copy()
|
||||
level := 0
|
||||
first := true
|
||||
d.iterator = func() NodeNavigator {
|
||||
if first && d.Self {
|
||||
first = false
|
||||
if d.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
if node.MoveToChild() {
|
||||
level++
|
||||
} else {
|
||||
for {
|
||||
if level == 0 {
|
||||
return nil
|
||||
}
|
||||
if node.MoveToNext() {
|
||||
break
|
||||
}
|
||||
node.MoveToParent()
|
||||
level--
|
||||
}
|
||||
}
|
||||
if d.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node := d.iterator(); node != nil {
|
||||
d.posit++
|
||||
return node
|
||||
}
|
||||
d.iterator = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *descendantQuery) Evaluate(t iterator) interface{} {
|
||||
d.Input.Evaluate(t)
|
||||
d.iterator = nil
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *descendantQuery) Test(n NodeNavigator) bool {
|
||||
return d.Predicate(n)
|
||||
}
|
||||
|
||||
// position returns a position of current NodeNavigator.
|
||||
func (d *descendantQuery) position() int {
|
||||
return d.posit
|
||||
}
|
||||
|
||||
func (d *descendantQuery) Clone() query {
|
||||
return &descendantQuery{Self: d.Self, Input: d.Input.Clone(), Predicate: d.Predicate}
|
||||
}
|
||||
|
||||
// followingQuery is an XPath following node query.(following::*|following-sibling::*)
|
||||
type followingQuery struct {
|
||||
iterator func() NodeNavigator
|
||||
|
||||
Input query
|
||||
Sibling bool // The matching sibling node of current node.
|
||||
Predicate func(NodeNavigator) bool
|
||||
}
|
||||
|
||||
func (f *followingQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
if f.iterator == nil {
|
||||
node := f.Input.Select(t)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node = node.Copy()
|
||||
if f.Sibling {
|
||||
f.iterator = func() NodeNavigator {
|
||||
for {
|
||||
if !node.MoveToNext() {
|
||||
return nil
|
||||
}
|
||||
if f.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var q query // descendant query
|
||||
f.iterator = func() NodeNavigator {
|
||||
for {
|
||||
if q == nil {
|
||||
for !node.MoveToNext() {
|
||||
if !node.MoveToParent() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
q = &descendantQuery{
|
||||
Self: true,
|
||||
Input: &contextQuery{},
|
||||
Predicate: f.Predicate,
|
||||
}
|
||||
t.Current().MoveTo(node)
|
||||
}
|
||||
if node := q.Select(t); node != nil {
|
||||
return node
|
||||
}
|
||||
q = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if node := f.iterator(); node != nil {
|
||||
return node
|
||||
}
|
||||
f.iterator = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (f *followingQuery) Evaluate(t iterator) interface{} {
|
||||
f.Input.Evaluate(t)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *followingQuery) Test(n NodeNavigator) bool {
|
||||
return f.Predicate(n)
|
||||
}
|
||||
|
||||
func (f *followingQuery) Clone() query {
|
||||
return &followingQuery{Input: f.Input.Clone(), Sibling: f.Sibling, Predicate: f.Predicate}
|
||||
}
|
||||
|
||||
// precedingQuery is an XPath preceding node query.(preceding::*)
|
||||
type precedingQuery struct {
|
||||
iterator func() NodeNavigator
|
||||
Input query
|
||||
Sibling bool // The matching sibling node of current node.
|
||||
Predicate func(NodeNavigator) bool
|
||||
}
|
||||
|
||||
func (p *precedingQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
if p.iterator == nil {
|
||||
node := p.Input.Select(t)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node = node.Copy()
|
||||
if p.Sibling {
|
||||
p.iterator = func() NodeNavigator {
|
||||
for {
|
||||
for !node.MoveToPrevious() {
|
||||
return nil
|
||||
}
|
||||
if p.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var q query
|
||||
p.iterator = func() NodeNavigator {
|
||||
for {
|
||||
if q == nil {
|
||||
for !node.MoveToPrevious() {
|
||||
if !node.MoveToParent() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
q = &descendantQuery{
|
||||
Self: true,
|
||||
Input: &contextQuery{},
|
||||
Predicate: p.Predicate,
|
||||
}
|
||||
t.Current().MoveTo(node)
|
||||
}
|
||||
if node := q.Select(t); node != nil {
|
||||
return node
|
||||
}
|
||||
q = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if node := p.iterator(); node != nil {
|
||||
return node
|
||||
}
|
||||
p.iterator = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (p *precedingQuery) Evaluate(t iterator) interface{} {
|
||||
p.Input.Evaluate(t)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *precedingQuery) Test(n NodeNavigator) bool {
|
||||
return p.Predicate(n)
|
||||
}
|
||||
|
||||
func (p *precedingQuery) Clone() query {
|
||||
return &precedingQuery{Input: p.Input.Clone(), Sibling: p.Sibling, Predicate: p.Predicate}
|
||||
}
|
||||
|
||||
// parentQuery is an XPath parent node query.(parent::*)
|
||||
type parentQuery struct {
|
||||
Input query
|
||||
Predicate func(NodeNavigator) bool
|
||||
}
|
||||
|
||||
func (p *parentQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
node := p.Input.Select(t)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
node = node.Copy()
|
||||
if node.MoveToParent() && p.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parentQuery) Evaluate(t iterator) interface{} {
|
||||
p.Input.Evaluate(t)
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *parentQuery) Clone() query {
|
||||
return &parentQuery{Input: p.Input.Clone(), Predicate: p.Predicate}
|
||||
}
|
||||
|
||||
func (p *parentQuery) Test(n NodeNavigator) bool {
|
||||
return p.Predicate(n)
|
||||
}
|
||||
|
||||
// selfQuery is an Self node query.(self::*)
|
||||
type selfQuery struct {
|
||||
Input query
|
||||
Predicate func(NodeNavigator) bool
|
||||
}
|
||||
|
||||
func (s *selfQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
node := s.Input.Select(t)
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.Predicate(node) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *selfQuery) Evaluate(t iterator) interface{} {
|
||||
s.Input.Evaluate(t)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *selfQuery) Test(n NodeNavigator) bool {
|
||||
return s.Predicate(n)
|
||||
}
|
||||
|
||||
func (s *selfQuery) Clone() query {
|
||||
return &selfQuery{Input: s.Input.Clone(), Predicate: s.Predicate}
|
||||
}
|
||||
|
||||
// filterQuery is an XPath query for predicate filter.
|
||||
type filterQuery struct {
|
||||
Input query
|
||||
Predicate query
|
||||
}
|
||||
|
||||
func (f *filterQuery) do(t iterator) bool {
|
||||
val := reflect.ValueOf(f.Predicate.Evaluate(t))
|
||||
switch val.Kind() {
|
||||
case reflect.Bool:
|
||||
return val.Bool()
|
||||
case reflect.String:
|
||||
return len(val.String()) > 0
|
||||
case reflect.Float64:
|
||||
pt := float64(getNodePosition(f.Input))
|
||||
return int(val.Float()) == int(pt)
|
||||
default:
|
||||
if q, ok := f.Predicate.(query); ok {
|
||||
return q.Select(t) != nil
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *filterQuery) Select(t iterator) NodeNavigator {
|
||||
for {
|
||||
node := f.Input.Select(t)
|
||||
if node == nil {
|
||||
return node
|
||||
}
|
||||
node = node.Copy()
|
||||
//fmt.Println(node.LocalName())
|
||||
|
||||
t.Current().MoveTo(node)
|
||||
if f.do(t) {
|
||||
return node
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *filterQuery) Evaluate(t iterator) interface{} {
|
||||
f.Input.Evaluate(t)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *filterQuery) Clone() query {
|
||||
return &filterQuery{Input: f.Input.Clone(), Predicate: f.Predicate.Clone()}
|
||||
}
|
||||
|
||||
// functionQuery is an XPath function that call a function to returns
|
||||
// value of current NodeNavigator node.
|
||||
type functionQuery struct {
|
||||
Input query // Node Set
|
||||
Func func(query, iterator) interface{} // The xpath function.
|
||||
}
|
||||
|
||||
func (f *functionQuery) Select(t iterator) NodeNavigator {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Evaluate call a specified function that will returns the
|
||||
// following value type: number,string,boolean.
|
||||
func (f *functionQuery) Evaluate(t iterator) interface{} {
|
||||
return f.Func(f.Input, t)
|
||||
}
|
||||
|
||||
func (f *functionQuery) Clone() query {
|
||||
return &functionQuery{Input: f.Input.Clone(), Func: f.Func}
|
||||
}
|
||||
|
||||
// constantQuery is an XPath constant operand.
|
||||
type constantQuery struct {
|
||||
Val interface{}
|
||||
}
|
||||
|
||||
func (c *constantQuery) Select(t iterator) NodeNavigator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *constantQuery) Evaluate(t iterator) interface{} {
|
||||
return c.Val
|
||||
}
|
||||
|
||||
func (c *constantQuery) Clone() query {
|
||||
return c
|
||||
}
|
||||
|
||||
// logicalQuery is an XPath logical expression.
|
||||
type logicalQuery struct {
|
||||
Left, Right query
|
||||
|
||||
Do func(iterator, interface{}, interface{}) interface{}
|
||||
}
|
||||
|
||||
func (l *logicalQuery) Select(t iterator) NodeNavigator {
|
||||
// When a XPath expr is logical expression.
|
||||
node := t.Current().Copy()
|
||||
val := l.Evaluate(t)
|
||||
switch val.(type) {
|
||||
case bool:
|
||||
if val.(bool) == true {
|
||||
return node
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *logicalQuery) Evaluate(t iterator) interface{} {
|
||||
m := l.Left.Evaluate(t)
|
||||
n := l.Right.Evaluate(t)
|
||||
return l.Do(t, m, n)
|
||||
}
|
||||
|
||||
func (l *logicalQuery) Clone() query {
|
||||
return &logicalQuery{Left: l.Left.Clone(), Right: l.Right.Clone(), Do: l.Do}
|
||||
}
|
||||
|
||||
// numericQuery is an XPath numeric operator expression.
|
||||
type numericQuery struct {
|
||||
Left, Right query
|
||||
|
||||
Do func(interface{}, interface{}) interface{}
|
||||
}
|
||||
|
||||
func (n *numericQuery) Select(t iterator) NodeNavigator {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *numericQuery) Evaluate(t iterator) interface{} {
|
||||
m := n.Left.Evaluate(t)
|
||||
k := n.Right.Evaluate(t)
|
||||
return n.Do(m, k)
|
||||
}
|
||||
|
||||
func (n *numericQuery) Clone() query {
|
||||
return &numericQuery{Left: n.Left.Clone(), Right: n.Right.Clone(), Do: n.Do}
|
||||
}
|
||||
|
||||
type booleanQuery struct {
|
||||
IsOr bool
|
||||
Left, Right query
|
||||
iterator func() NodeNavigator
|
||||
}
|
||||
|
||||
func (b *booleanQuery) Select(t iterator) NodeNavigator {
|
||||
if b.iterator == nil {
|
||||
var list []NodeNavigator
|
||||
i := 0
|
||||
root := t.Current().Copy()
|
||||
if b.IsOr {
|
||||
for {
|
||||
node := b.Left.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
node = node.Copy()
|
||||
list = append(list, node)
|
||||
}
|
||||
t.Current().MoveTo(root)
|
||||
for {
|
||||
node := b.Right.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
node = node.Copy()
|
||||
list = append(list, node)
|
||||
}
|
||||
} else {
|
||||
var m []NodeNavigator
|
||||
var n []NodeNavigator
|
||||
for {
|
||||
node := b.Left.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
node = node.Copy()
|
||||
list = append(m, node)
|
||||
}
|
||||
t.Current().MoveTo(root)
|
||||
for {
|
||||
node := b.Right.Select(t)
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
node = node.Copy()
|
||||
list = append(n, node)
|
||||
}
|
||||
for _, k := range m {
|
||||
for _, j := range n {
|
||||
if k == j {
|
||||
list = append(list, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.iterator = func() NodeNavigator {
|
||||
if i >= len(list) {
|
||||
return nil
|
||||
}
|
||||
node := list[i]
|
||||
i++
|
||||
return node
|
||||
}
|
||||
}
|
||||
return b.iterator()
|
||||
}
|
||||
|
||||
func (b *booleanQuery) Evaluate(t iterator) interface{} {
|
||||
m := b.Left.Evaluate(t)
|
||||
if m.(bool) == b.IsOr {
|
||||
return m
|
||||
}
|
||||
return b.Right.Evaluate(t)
|
||||
}
|
||||
|
||||
func (b *booleanQuery) Clone() query {
|
||||
return &booleanQuery{IsOr: b.IsOr, Left: b.Left.Clone(), Right: b.Right.Clone()}
|
||||
}
|
||||
|
||||
func getNodePosition(q query) int {
|
||||
type Position interface {
|
||||
position() int
|
||||
}
|
||||
if count, ok := q.(Position); ok {
|
||||
return count.position()
|
||||
}
|
||||
return 1
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
package xpath
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// NodeType represents a type of XPath node.
|
||||
type NodeType int
|
||||
|
||||
const (
|
||||
// RootNode is a root node of the XML document or node tree.
|
||||
RootNode NodeType = iota
|
||||
|
||||
// ElementNode is an element, such as <element>.
|
||||
ElementNode
|
||||
|
||||
// AttributeNode is an attribute, such as id='123'.
|
||||
AttributeNode
|
||||
|
||||
// TextNode is the text content of a node.
|
||||
TextNode
|
||||
|
||||
// CommentNode is a comment node, such as <!-- my comment -->
|
||||
CommentNode
|
||||
)
|
||||
|
||||
// NodeNavigator provides cursor model for navigating XML data.
|
||||
type NodeNavigator interface {
|
||||
// NodeType returns the XPathNodeType of the current node.
|
||||
NodeType() NodeType
|
||||
|
||||
// LocalName gets the Name of the current node.
|
||||
LocalName() string
|
||||
|
||||
// Prefix returns namespace prefix associated with the current node.
|
||||
Prefix() string
|
||||
|
||||
// Value gets the value of current node.
|
||||
Value() string
|
||||
|
||||
// Copy does a deep copy of the NodeNavigator and all its components.
|
||||
Copy() NodeNavigator
|
||||
|
||||
// MoveToRoot moves the NodeNavigator to the root node of the current node.
|
||||
MoveToRoot()
|
||||
|
||||
// MoveToParent moves the NodeNavigator to the parent node of the current node.
|
||||
MoveToParent() bool
|
||||
|
||||
// MoveToNextAttribute moves the NodeNavigator to the next attribute on current node.
|
||||
MoveToNextAttribute() bool
|
||||
|
||||
// MoveToChild moves the NodeNavigator to the first child node of the current node.
|
||||
MoveToChild() bool
|
||||
|
||||
// MoveToFirst moves the NodeNavigator to the first sibling node of the current node.
|
||||
MoveToFirst() bool
|
||||
|
||||
// MoveToNext moves the NodeNavigator to the next sibling node of the current node.
|
||||
MoveToNext() bool
|
||||
|
||||
// MoveToPrevious moves the NodeNavigator to the previous sibling node of the current node.
|
||||
MoveToPrevious() bool
|
||||
|
||||
// MoveTo moves the NodeNavigator to the same position as the specified NodeNavigator.
|
||||
MoveTo(NodeNavigator) bool
|
||||
}
|
||||
|
||||
// NodeIterator holds all matched Node object.
|
||||
type NodeIterator struct {
|
||||
node NodeNavigator
|
||||
query query
|
||||
}
|
||||
|
||||
// Current returns current node which matched.
|
||||
func (t *NodeIterator) Current() NodeNavigator {
|
||||
return t.node
|
||||
}
|
||||
|
||||
// MoveNext moves Navigator to the next match node.
|
||||
func (t *NodeIterator) MoveNext() bool {
|
||||
n := t.query.Select(t)
|
||||
if n != nil {
|
||||
if !t.node.MoveTo(n) {
|
||||
t.node = n.Copy()
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Select selects a node set using the specified XPath expression.
|
||||
// This method is deprecated, recommend using Expr.Select() method instead.
|
||||
func Select(root NodeNavigator, expr string) *NodeIterator {
|
||||
exp, err := Compile(expr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return exp.Select(root)
|
||||
}
|
||||
|
||||
// Expr is an XPath expression for query.
|
||||
type Expr struct {
|
||||
s string
|
||||
q query
|
||||
}
|
||||
|
||||
type iteratorFunc func() NodeNavigator
|
||||
|
||||
func (f iteratorFunc) Current() NodeNavigator {
|
||||
return f()
|
||||
}
|
||||
|
||||
// Evaluate returns the result of the expression.
|
||||
// The result type of the expression is one of the follow: bool,float64,string,NodeIterator).
|
||||
func (expr *Expr) Evaluate(root NodeNavigator) interface{} {
|
||||
val := expr.q.Evaluate(iteratorFunc(func() NodeNavigator { return root }))
|
||||
switch val.(type) {
|
||||
case query:
|
||||
return &NodeIterator{query: expr.q.Clone(), node: root}
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// Select selects a node set using the specified XPath expression.
|
||||
func (expr *Expr) Select(root NodeNavigator) *NodeIterator {
|
||||
return &NodeIterator{query: expr.q.Clone(), node: root}
|
||||
}
|
||||
|
||||
// String returns XPath expression string.
|
||||
func (expr *Expr) String() string {
|
||||
return expr.s
|
||||
}
|
||||
|
||||
// Compile compiles an XPath expression string.
|
||||
func Compile(expr string) (*Expr, error) {
|
||||
if expr == "" {
|
||||
return nil, errors.New("expr expression is nil")
|
||||
}
|
||||
qy, err := build(expr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Expr{s: expr, q: qy}, nil
|
||||
}
|
||||
|
||||
// MustCompile compiles an XPath expression string and ignored error.
|
||||
func MustCompile(expr string) *Expr {
|
||||
exp, err := Compile(expr)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return exp
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -1,252 +0,0 @@
|
|||
package xmlquery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/html/charset"
|
||||
)
|
||||
|
||||
// A NodeType is the type of a Node.
|
||||
type NodeType uint
|
||||
|
||||
const (
|
||||
// DocumentNode is a document object that, as the root of the document tree,
|
||||
// provides access to the entire XML document.
|
||||
DocumentNode NodeType = iota
|
||||
// DeclarationNode is the document type declaration, indicated by the following
|
||||
// tag (for example, <!DOCTYPE...> ).
|
||||
DeclarationNode
|
||||
// ElementNode is an element (for example, <item> ).
|
||||
ElementNode
|
||||
// TextNode is the text content of a node.
|
||||
TextNode
|
||||
// CommentNode a comment (for example, <!-- my comment --> ).
|
||||
CommentNode
|
||||
)
|
||||
|
||||
// A Node consists of a NodeType and some Data (tag name for
|
||||
// element nodes, content for text) and are part of a tree of Nodes.
|
||||
type Node struct {
|
||||
Parent, FirstChild, LastChild, PrevSibling, NextSibling *Node
|
||||
|
||||
Type NodeType
|
||||
Data string
|
||||
Prefix string
|
||||
NamespaceURI string
|
||||
Attr []xml.Attr
|
||||
|
||||
level int // node level in the tree
|
||||
}
|
||||
|
||||
// InnerText returns the text between the start and end tags of the object.
|
||||
func (n *Node) InnerText() string {
|
||||
var output func(*bytes.Buffer, *Node)
|
||||
output = func(buf *bytes.Buffer, n *Node) {
|
||||
switch n.Type {
|
||||
case TextNode:
|
||||
buf.WriteString(n.Data)
|
||||
return
|
||||
case CommentNode:
|
||||
return
|
||||
}
|
||||
for child := n.FirstChild; child != nil; child = child.NextSibling {
|
||||
output(buf, child)
|
||||
}
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
output(&buf, n)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func outputXML(buf *bytes.Buffer, n *Node) {
|
||||
if n.Type == TextNode || n.Type == CommentNode {
|
||||
buf.WriteString(strings.TrimSpace(n.Data))
|
||||
return
|
||||
}
|
||||
buf.WriteString("<" + n.Data)
|
||||
for _, attr := range n.Attr {
|
||||
if attr.Name.Space != "" {
|
||||
buf.WriteString(fmt.Sprintf(` %s:%s="%s"`, attr.Name.Space, attr.Name.Local, attr.Value))
|
||||
} else {
|
||||
buf.WriteString(fmt.Sprintf(` %s="%s"`, attr.Name.Local, attr.Value))
|
||||
}
|
||||
}
|
||||
buf.WriteString(">")
|
||||
for child := n.FirstChild; child != nil; child = child.NextSibling {
|
||||
outputXML(buf, child)
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("</%s>", n.Data))
|
||||
}
|
||||
|
||||
// OutputXML returns the text that including tags name.
|
||||
func (n *Node) OutputXML(self bool) string {
|
||||
var buf bytes.Buffer
|
||||
if self {
|
||||
outputXML(&buf, n)
|
||||
} else {
|
||||
for n := n.FirstChild; n != nil; n = n.NextSibling {
|
||||
outputXML(&buf, n)
|
||||
}
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func addAttr(n *Node, key, val string) {
|
||||
var attr xml.Attr
|
||||
if i := strings.Index(key, ":"); i > 0 {
|
||||
attr = xml.Attr{
|
||||
Name: xml.Name{Space: key[:i], Local: key[i+1:]},
|
||||
Value: val,
|
||||
}
|
||||
} else {
|
||||
attr = xml.Attr{
|
||||
Name: xml.Name{Local: key},
|
||||
Value: val,
|
||||
}
|
||||
}
|
||||
|
||||
n.Attr = append(n.Attr, attr)
|
||||
}
|
||||
|
||||
func addChild(parent, n *Node) {
|
||||
n.Parent = parent
|
||||
if parent.FirstChild == nil {
|
||||
parent.FirstChild = n
|
||||
} else {
|
||||
parent.LastChild.NextSibling = n
|
||||
n.PrevSibling = parent.LastChild
|
||||
}
|
||||
|
||||
parent.LastChild = n
|
||||
}
|
||||
|
||||
func addSibling(sibling, n *Node) {
|
||||
n.Parent = sibling.Parent
|
||||
sibling.NextSibling = n
|
||||
n.PrevSibling = sibling
|
||||
if sibling.Parent != nil {
|
||||
sibling.Parent.LastChild = n
|
||||
}
|
||||
}
|
||||
|
||||
// LoadURL loads the XML document from the specified URL.
|
||||
func LoadURL(url string) (*Node, error) {
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return parse(resp.Body)
|
||||
}
|
||||
|
||||
func parse(r io.Reader) (*Node, error) {
|
||||
var (
|
||||
decoder = xml.NewDecoder(r)
|
||||
doc = &Node{Type: DocumentNode}
|
||||
space2prefix = make(map[string]string)
|
||||
level = 0
|
||||
)
|
||||
decoder.CharsetReader = charset.NewReaderLabel
|
||||
prev := doc
|
||||
for {
|
||||
tok, err := decoder.Token()
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
goto quit
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch tok := tok.(type) {
|
||||
case xml.StartElement:
|
||||
if level == 0 {
|
||||
// mising XML declaration
|
||||
node := &Node{Type: DeclarationNode, Data: "xml", level: 1}
|
||||
addChild(prev, node)
|
||||
level = 1
|
||||
prev = node
|
||||
}
|
||||
node := &Node{
|
||||
Type: ElementNode,
|
||||
Data: tok.Name.Local,
|
||||
Prefix: space2prefix[tok.Name.Space],
|
||||
NamespaceURI: tok.Name.Space,
|
||||
Attr: tok.Attr,
|
||||
level: level,
|
||||
}
|
||||
for _, att := range tok.Attr {
|
||||
if att.Name.Space == "xmlns" {
|
||||
space2prefix[att.Value] = att.Name.Local
|
||||
}
|
||||
}
|
||||
//fmt.Println(fmt.Sprintf("start > %s : %d", node.Data, level))
|
||||
if level == prev.level {
|
||||
addSibling(prev, node)
|
||||
} else if level > prev.level {
|
||||
addChild(prev, node)
|
||||
} else if level < prev.level {
|
||||
for i := prev.level - level; i > 1; i-- {
|
||||
prev = prev.Parent
|
||||
}
|
||||
addSibling(prev.Parent, node)
|
||||
}
|
||||
prev = node
|
||||
level++
|
||||
case xml.EndElement:
|
||||
level--
|
||||
case xml.CharData:
|
||||
node := &Node{Type: TextNode, Data: string(tok), level: level}
|
||||
if level == prev.level {
|
||||
addSibling(prev, node)
|
||||
} else if level > prev.level {
|
||||
addChild(prev, node)
|
||||
}
|
||||
case xml.Comment:
|
||||
node := &Node{Type: CommentNode, Data: string(tok), level: level}
|
||||
if level == prev.level {
|
||||
addSibling(prev, node)
|
||||
} else if level > prev.level {
|
||||
addChild(prev, node)
|
||||
}
|
||||
case xml.ProcInst: // Processing Instruction
|
||||
if prev.Type != DeclarationNode {
|
||||
level++
|
||||
}
|
||||
node := &Node{Type: DeclarationNode, Data: tok.Target, level: level}
|
||||
pairs := strings.Split(string(tok.Inst), " ")
|
||||
for _, pair := range pairs {
|
||||
pair = strings.TrimSpace(pair)
|
||||
if i := strings.Index(pair, "="); i > 0 {
|
||||
addAttr(node, pair[:i], strings.Trim(pair[i+1:], `"`))
|
||||
}
|
||||
}
|
||||
if level == prev.level {
|
||||
addSibling(prev, node)
|
||||
} else if level > prev.level {
|
||||
addChild(prev, node)
|
||||
}
|
||||
prev = node
|
||||
case xml.Directive:
|
||||
}
|
||||
|
||||
}
|
||||
quit:
|
||||
return doc, nil
|
||||
}
|
||||
|
||||
// Parse returns the parse tree for the XML from the given Reader.
|
||||
func Parse(r io.Reader) (*Node, error) {
|
||||
return parse(r)
|
||||
}
|
||||
|
||||
// ParseXML returns the parse tree for the XML from the given Reader.Deprecated.
|
||||
func ParseXML(r io.Reader) (*Node, error) {
|
||||
return parse(r)
|
||||
}
|
|
@ -1,230 +0,0 @@
|
|||
/*
|
||||
Package xmlquery provides extract data from XML documents using XPath expression.
|
||||
*/
|
||||
package xmlquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/antchfx/xpath"
|
||||
)
|
||||
|
||||
// SelectElements finds child elements with the specified name.
|
||||
func (n *Node) SelectElements(name string) []*Node {
|
||||
return Find(n, name)
|
||||
}
|
||||
|
||||
// SelectElement finds child elements with the specified name.
|
||||
func (n *Node) SelectElement(name string) *Node {
|
||||
return FindOne(n, name)
|
||||
}
|
||||
|
||||
// SelectAttr returns the attribute value with the specified name.
|
||||
func (n *Node) SelectAttr(name string) string {
|
||||
var local, space string
|
||||
local = name
|
||||
if i := strings.Index(name, ":"); i > 0 {
|
||||
space = name[:i]
|
||||
local = name[i+1:]
|
||||
}
|
||||
for _, attr := range n.Attr {
|
||||
if attr.Name.Local == local && attr.Name.Space == space {
|
||||
return attr.Value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var _ xpath.NodeNavigator = &NodeNavigator{}
|
||||
|
||||
// CreateXPathNavigator creates a new xpath.NodeNavigator for the specified html.Node.
|
||||
func CreateXPathNavigator(top *Node) *NodeNavigator {
|
||||
return &NodeNavigator{curr: top, root: top, attr: -1}
|
||||
}
|
||||
|
||||
// Find searches the Node that matches by the specified XPath expr.
|
||||
func Find(top *Node, expr string) []*Node {
|
||||
exp, err := xpath.Compile(expr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t := exp.Select(CreateXPathNavigator(top))
|
||||
var elems []*Node
|
||||
for t.MoveNext() {
|
||||
elems = append(elems, (t.Current().(*NodeNavigator)).curr)
|
||||
}
|
||||
return elems
|
||||
}
|
||||
|
||||
// FindOne searches the Node that matches by the specified XPath expr,
|
||||
// and returns first element of matched.
|
||||
func FindOne(top *Node, expr string) *Node {
|
||||
exp, err := xpath.Compile(expr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t := exp.Select(CreateXPathNavigator(top))
|
||||
var elem *Node
|
||||
if t.MoveNext() {
|
||||
elem = (t.Current().(*NodeNavigator)).curr
|
||||
}
|
||||
return elem
|
||||
}
|
||||
|
||||
// FindEach searches the html.Node and calls functions cb.
|
||||
func FindEach(top *Node, expr string, cb func(int, *Node)) {
|
||||
exp, err := xpath.Compile(expr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t := exp.Select(CreateXPathNavigator(top))
|
||||
var i int
|
||||
for t.MoveNext() {
|
||||
cb(i, (t.Current().(*NodeNavigator)).curr)
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
type NodeNavigator struct {
|
||||
root, curr *Node
|
||||
attr int
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) Current() *Node {
|
||||
return x.curr
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) NodeType() xpath.NodeType {
|
||||
switch x.curr.Type {
|
||||
case CommentNode:
|
||||
return xpath.CommentNode
|
||||
case TextNode:
|
||||
return xpath.TextNode
|
||||
case DeclarationNode, DocumentNode:
|
||||
return xpath.RootNode
|
||||
case ElementNode:
|
||||
if x.attr != -1 {
|
||||
return xpath.AttributeNode
|
||||
}
|
||||
return xpath.ElementNode
|
||||
}
|
||||
panic(fmt.Sprintf("unknown XML node type: %v", x.curr.Type))
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) LocalName() string {
|
||||
if x.attr != -1 {
|
||||
return x.curr.Attr[x.attr].Name.Local
|
||||
}
|
||||
return x.curr.Data
|
||||
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) Prefix() string {
|
||||
return x.curr.Prefix
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) Value() string {
|
||||
switch x.curr.Type {
|
||||
case CommentNode:
|
||||
return x.curr.Data
|
||||
case ElementNode:
|
||||
if x.attr != -1 {
|
||||
return x.curr.Attr[x.attr].Value
|
||||
}
|
||||
return x.curr.InnerText()
|
||||
case TextNode:
|
||||
return x.curr.Data
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) Copy() xpath.NodeNavigator {
|
||||
n := *x
|
||||
return &n
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) MoveToRoot() {
|
||||
x.curr = x.root
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) MoveToParent() bool {
|
||||
if x.attr != -1 {
|
||||
x.attr = -1
|
||||
return true
|
||||
} else if node := x.curr.Parent; node != nil {
|
||||
x.curr = node
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) MoveToNextAttribute() bool {
|
||||
if x.attr >= len(x.curr.Attr)-1 {
|
||||
return false
|
||||
}
|
||||
x.attr++
|
||||
return true
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) MoveToChild() bool {
|
||||
if x.attr != -1 {
|
||||
return false
|
||||
}
|
||||
if node := x.curr.FirstChild; node != nil {
|
||||
x.curr = node
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) MoveToFirst() bool {
|
||||
if x.attr != -1 || x.curr.PrevSibling == nil {
|
||||
return false
|
||||
}
|
||||
for {
|
||||
node := x.curr.PrevSibling
|
||||
if node == nil {
|
||||
break
|
||||
}
|
||||
x.curr = node
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) String() string {
|
||||
return x.Value()
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) MoveToNext() bool {
|
||||
if x.attr != -1 {
|
||||
return false
|
||||
}
|
||||
if node := x.curr.NextSibling; node != nil {
|
||||
x.curr = node
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) MoveToPrevious() bool {
|
||||
if x.attr != -1 {
|
||||
return false
|
||||
}
|
||||
if node := x.curr.PrevSibling; node != nil {
|
||||
x.curr = node
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *NodeNavigator) MoveTo(other xpath.NodeNavigator) bool {
|
||||
node, ok := other.(*NodeNavigator)
|
||||
if !ok || node.root != x.root {
|
||||
return false
|
||||
}
|
||||
|
||||
x.curr = node.curr
|
||||
x.attr = node.attr
|
||||
return true
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
.idea*
|
|
@ -1,48 +0,0 @@
|
|||
|
||||
# winrmtest
|
||||
|
||||
An in-progress testing package to compliment the [masterzen/winrm](https://github.com/masterzen/winrm) Go-based winrm library.
|
||||
|
||||
My primary use-case for this is for [dylanmei/packer-communicator-winrm](https://github.com/dylanmei/packer-communicator-winrm), a [Packer](http://packer.io) communicator plugin for interacting with machines using Windows Remote Management.
|
||||
|
||||
## Example Use
|
||||
|
||||
A fictitious "Windows tools" package.
|
||||
|
||||
```
|
||||
|
||||
package wintools
|
||||
|
||||
import (
|
||||
"io"
|
||||
"testing"
|
||||
"github.com/dylanmei/winrmtest"
|
||||
)
|
||||
|
||||
func Test_empty_temp_directory(t *testing.T) {
|
||||
r := winrmtest.NewRemote()
|
||||
defer r.Close()
|
||||
|
||||
r.CommandFunc(wimrmtest.MatchText("dir C:\Temp"), func(out, err io.Writer) int {
|
||||
out.Write([]byte(` Volume in drive C is Windows 2012 R2
|
||||
Volume Serial Number is XXXX-XXXX
|
||||
|
||||
Directory of C:\
|
||||
|
||||
File Not Found`))
|
||||
return 0
|
||||
})
|
||||
|
||||
lister := NewDirectoryLister(r.Host, r.Port)
|
||||
list, _ := lister.TempDirectory()
|
||||
|
||||
if count := len(list.Dirs()); count != 0 {
|
||||
t.Errorf("Expected 0 directories but found %d.\n", count)
|
||||
}
|
||||
|
||||
if count := len(list.Files()); count != 0 {
|
||||
t.Errorf("Expected 0 files but found %d.\n", count)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
package winrmtest
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Remote respresents a WinRM server
|
||||
type Remote struct {
|
||||
Host string
|
||||
Port int
|
||||
server *httptest.Server
|
||||
service *wsman
|
||||
}
|
||||
|
||||
// NewRemote returns a new initialized Remote
|
||||
func NewRemote() *Remote {
|
||||
mux := http.NewServeMux()
|
||||
srv := httptest.NewServer(mux)
|
||||
|
||||
host, port, _ := splitAddr(srv.URL)
|
||||
remote := Remote{
|
||||
Host: host,
|
||||
Port: port,
|
||||
server: srv,
|
||||
service: &wsman{},
|
||||
}
|
||||
|
||||
mux.Handle("/wsman", remote.service)
|
||||
return &remote
|
||||
}
|
||||
|
||||
// Close closes the WinRM server
|
||||
func (r *Remote) Close() {
|
||||
r.server.Close()
|
||||
}
|
||||
|
||||
// MatcherFunc respresents a function used to match WinRM commands
|
||||
type MatcherFunc func(candidate string) bool
|
||||
|
||||
// MatchText return a new MatcherFunc based on text matching
|
||||
func MatchText(text string) MatcherFunc {
|
||||
return func(candidate string) bool {
|
||||
return text == candidate
|
||||
}
|
||||
}
|
||||
|
||||
// MatchPattern return a new MatcherFunc based on pattern matching
|
||||
func MatchPattern(pattern string) MatcherFunc {
|
||||
r := regexp.MustCompile(pattern)
|
||||
return func(candidate string) bool {
|
||||
return r.MatchString(candidate)
|
||||
}
|
||||
}
|
||||
|
||||
// CommandFunc respresents a function used to mock WinRM commands
|
||||
type CommandFunc func(out, err io.Writer) (exitCode int)
|
||||
|
||||
// CommandFunc adds a WinRM command mock function to the WinRM server
|
||||
func (r *Remote) CommandFunc(m MatcherFunc, f CommandFunc) {
|
||||
r.service.HandleCommand(m, f)
|
||||
}
|
||||
|
||||
func splitAddr(addr string) (host string, port int, err error) {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
split := strings.Split(u.Host, ":")
|
||||
host = split[0]
|
||||
port, err = strconv.Atoi(split[1])
|
||||
return
|
||||
}
|
|
@ -1,160 +0,0 @@
|
|||
package winrmtest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/antchfx/xquery/xml"
|
||||
"github.com/satori/go.uuid"
|
||||
)
|
||||
|
||||
type wsman struct {
|
||||
commands []*command
|
||||
identitySeed int
|
||||
}
|
||||
|
||||
type command struct {
|
||||
id string
|
||||
matcher MatcherFunc
|
||||
handler CommandFunc
|
||||
}
|
||||
|
||||
func (w *wsman) HandleCommand(m MatcherFunc, f CommandFunc) string {
|
||||
id := uuid.NewV4().String()
|
||||
w.commands = append(w.commands, &command{
|
||||
id: id,
|
||||
matcher: m,
|
||||
handler: f,
|
||||
})
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
func (w *wsman) CommandByText(cmd string) *command {
|
||||
for _, c := range w.commands {
|
||||
if c.matcher(cmd) {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *wsman) CommandByID(id string) *command {
|
||||
for _, c := range w.commands {
|
||||
if c.id == id {
|
||||
return c
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *wsman) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Add("Content-Type", "application/soap+xml")
|
||||
|
||||
defer r.Body.Close()
|
||||
env, err := xmlquery.Parse(r.Body)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
action := readAction(env)
|
||||
switch {
|
||||
case strings.HasSuffix(action, "transfer/Create"):
|
||||
// create a new shell
|
||||
|
||||
rw.Write([]byte(`
|
||||
<env:Envelope xmlns:env="http://www.w3.org/2003/05/soap-envelope" xmlns:rsp="http://schemas.microsoft.com/wbem/wsman/1/windows/shell">
|
||||
<rsp:ShellId>123</rsp:ShellId>
|
||||
</env:Envelope>`))
|
||||
|
||||
case strings.HasSuffix(action, "shell/Command"):
|
||||
// execute on behalf of the client
|
||||
text := readCommand(env)
|
||||
cmd := w.CommandByText(text)
|
||||
|
||||
if cmd == nil {
|
||||
fmt.Printf("I don't know this command: Command=%s\n", text)
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
rw.Write([]byte(fmt.Sprintf(`
|
||||
<env:Envelope xmlns:env="http://www.w3.org/2003/05/soap-envelope" xmlns:rsp="http://schemas.microsoft.com/wbem/wsman/1/windows/shell">
|
||||
<rsp:CommandId>%s</rsp:CommandId>
|
||||
</env:Envelope>`, cmd.id)))
|
||||
|
||||
case strings.HasSuffix(action, "shell/Receive"):
|
||||
// client ready to receive the results
|
||||
|
||||
id := readCommandIDFromDesiredStream(env)
|
||||
cmd := w.CommandByID(id)
|
||||
|
||||
if cmd == nil {
|
||||
fmt.Printf("I don't know this command: CommandId=%s\n", id)
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
stdout := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
result := cmd.handler(stdout, stderr)
|
||||
content := base64.StdEncoding.EncodeToString(stdout.Bytes())
|
||||
|
||||
rw.Write([]byte(fmt.Sprintf(`
|
||||
<env:Envelope xmlns:env="http://www.w3.org/2003/05/soap-envelope" xmlns:rsp="http://schemas.microsoft.com/wbem/wsman/1/windows/shell">
|
||||
<rsp:ReceiveResponse>
|
||||
<rsp:Stream Name="stdout" CommandId="%s">%s</rsp:Stream>
|
||||
<rsp:Stream Name="stdout" CommandId="%s" End="true"></rsp:Stream>
|
||||
<rsp:Stream Name="stderr" CommandId="%s" End="true"></rsp:Stream>
|
||||
<rsp:CommandState State="http://schemas.microsoft.com/wbem/wsman/1/windows/shell/CommandState/Done">
|
||||
<rsp:ExitCode>%d</rsp:ExitCode>
|
||||
</rsp:CommandState>
|
||||
</rsp:ReceiveResponse>
|
||||
</env:Envelope>`, id, content, id, id, result)))
|
||||
|
||||
case strings.HasSuffix(action, "shell/Signal"):
|
||||
// end of the shell command
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
case strings.HasSuffix(action, "transfer/Delete"):
|
||||
// end of the session
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
default:
|
||||
fmt.Printf("I don't know this action: %s\n", action)
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
func readAction(env *xmlquery.Node) string {
|
||||
xpath := xmlquery.FindOne(env, "//a:Action")
|
||||
if xpath == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return xpath.InnerText()
|
||||
}
|
||||
|
||||
func readCommand(env *xmlquery.Node) string {
|
||||
xpath := xmlquery.FindOne(env, "//rsp:Command")
|
||||
if xpath == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if unquoted, err := strconv.Unquote(xpath.InnerText()); err == nil {
|
||||
return unquoted
|
||||
}
|
||||
return xpath.InnerText()
|
||||
}
|
||||
|
||||
func readCommandIDFromDesiredStream(env *xmlquery.Node) string {
|
||||
xpath := xmlquery.FindOne(env, "//rsp:DesiredStream")
|
||||
if xpath == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return xpath.SelectAttr("CommandId")
|
||||
}
|
|
@ -0,0 +1,243 @@
|
|||
/*
|
||||
Package acctest provides an acceptance testing framework for testing builders
|
||||
and provisioners.
|
||||
|
||||
Writing Provisioner Acceptance Tests
|
||||
|
||||
Packer has implemented a `ProvisionerTestCase` structure to help write
|
||||
provisioner acceptance tests.
|
||||
|
||||
```go
|
||||
type ProvisionerTestCase struct {
|
||||
// Check is called after this step is executed in order to test that
|
||||
// the step executed successfully. If this is not set, then the next
|
||||
// step will be called
|
||||
Check func(*exec.Cmd, string) error
|
||||
// IsCompatible checks whether a provisioner is able to run against a
|
||||
// given builder type and guest operating system, and returns a boolean.
|
||||
// if it returns true, the test combination is okay to run. If false, the
|
||||
// test combination is not okay to run.
|
||||
IsCompatible func(builderType string, BuilderGuestOS string) bool
|
||||
// Name is the name of the test case. Be simple but unique and descriptive.
|
||||
Name string
|
||||
// Setup, if non-nil, will be called once before the test case
|
||||
// runs. This can be used for some setup like setting environment
|
||||
// variables, or for validation prior to the
|
||||
// test running. For example, you can use this to make sure certain
|
||||
// binaries are installed, or text fixtures are in place.
|
||||
Setup func() error
|
||||
// Teardown will be called before the test case is over regardless
|
||||
// of if the test succeeded or failed. This should return an error
|
||||
// in the case that the test can't guarantee all resources were
|
||||
// properly cleaned up.
|
||||
Teardown builderT.TestTeardownFunc
|
||||
// Template is the provisioner template to use.
|
||||
// The provisioner template fragment must be a json-formatted string
|
||||
// containing the provisioner definition but no other portions of a packer
|
||||
// template. For
|
||||
// example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// }
|
||||
//```
|
||||
//
|
||||
// is a valid entry for "template" here, but the complete Packer template:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "provisioners": [
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// is invalid as input.
|
||||
//
|
||||
// You may provide multiple provisioners in the same template. For example:
|
||||
// ```json
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// },
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world 2"]
|
||||
// }
|
||||
// ```
|
||||
Template string
|
||||
// Type is the type of provisioner.
|
||||
Type string
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
To start writing a new provisioner acceptance test, you should add a test file
|
||||
named `provisioner_acc_test.go` in the same folder as your provisioner is
|
||||
defined. Create a test case by implementing the above struct, and run it
|
||||
by calling `provisioneracc.TestProvisionersAgainstBuilders(testCase, t)`
|
||||
|
||||
The following example has been adapted from a shell-local provisioner test:
|
||||
|
||||
```
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc"
|
||||
"github.com/hashicorp/packer-plugin-sdk/acctest/testutils"
|
||||
)
|
||||
|
||||
// ...
|
||||
|
||||
func TestAccShellProvisioner_basic(t *testing.T) {
|
||||
// Create a json template fragment containing just the provisioners you want
|
||||
// to run.
|
||||
templateString := `{
|
||||
"type": "shell-local",
|
||||
"script": "test-fixtures/script.sh",
|
||||
"max_retries" : 5
|
||||
}`
|
||||
|
||||
// instantiate a test case.
|
||||
testCase := &provisioneracc.ProvisionerTestCase{
|
||||
IsCompatible: func() bool {return true},
|
||||
Name: "shell-local-provisioner-basic",
|
||||
Teardown: func() error {
|
||||
testutils.CleanupFiles("test-fixtures/file.txt")
|
||||
return nil
|
||||
},
|
||||
Template: templateString,
|
||||
Type: "shell-local",
|
||||
Check: func(buildcommand *exec.Cmd, logfile string) error {
|
||||
if buildcommand.ProcessState != nil {
|
||||
if buildcommand.ProcessState.ExitCode() != 0 {
|
||||
return fmt.Errorf("Bad exit code. Logfile: %s", logfile)
|
||||
}
|
||||
}
|
||||
filecontents, err := loadFile("file.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !strings.Contains(filecontents, "hello") {
|
||||
return fmt.Errorf("file contents were wrong: %s", filecontents)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
provisioneracc.TestProvisionersAgainstBuilders(testCase, t)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
After writing the struct and implementing the interface, now is time to write the test that will run all
|
||||
of this code you wrote. Your test should be like:
|
||||
|
||||
```go
|
||||
func TestShellProvisioner(t *testing.T) {
|
||||
acc.TestProvisionersPreCheck("shell", t)
|
||||
acc.TestProvisionersAgainstBuilders(new(ShellProvisionerAccTest), t)
|
||||
}
|
||||
```
|
||||
|
||||
The method `TestProvisionersAgainstBuilders` will run the provisioner against
|
||||
all available and compatible builders. If there are not builders compatible with
|
||||
the test you want to run, you can add a builder using the following steps:
|
||||
|
||||
Create a subdirectory in provisioneracc/test-fixtures for the type of builder
|
||||
you are adding. In this subdirectory, add one json file containing a single
|
||||
builder fragment. For example, one of our amazon-ebs builders is defined in
|
||||
provisioneracc/test-fixtures/amazon-ebs/amazon-ebs.txt and contains:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "amazon-ebs",
|
||||
"ami_name": "packer-acc-test",
|
||||
"instance_type": "t2.micro",
|
||||
"region": "us-east-1",
|
||||
"ssh_username": "ubuntu",
|
||||
"source_ami_filter": {
|
||||
"filters": {
|
||||
"virtualization-type": "hvm",
|
||||
"name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
|
||||
"root-device-type": "ebs"
|
||||
},
|
||||
"owners": ["099720109477"],
|
||||
"most_recent": true
|
||||
},
|
||||
"force_deregister" : true,
|
||||
"tags": {
|
||||
"packer-test": "true"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
note that this fragment does not contain anything other than a single builder
|
||||
definition. The testing framework will combine this with the provisioner
|
||||
fragment to create a working json template.
|
||||
|
||||
In order to tell the testing framework how to use this builder fragment, you
|
||||
need to implement a `BuilderFixture` struct:
|
||||
|
||||
```go
|
||||
type BuilderFixture struct {
|
||||
// Name is the name of the builder fixture.
|
||||
// Be simple and descriptive.
|
||||
Name string
|
||||
// Setup creates necessary extra test fixtures, and renders their values
|
||||
// into the BuilderFixture.Template.
|
||||
Setup func()
|
||||
// Template is the path to a builder template fragment.
|
||||
// The builder template fragment must be a json-formatted file containing
|
||||
// the builder definition but no other portions of a packer template. For
|
||||
// example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "type": "null",
|
||||
// "communicator", "none"
|
||||
// }
|
||||
//```
|
||||
//
|
||||
// is a valid entry for "template" here, but the complete Packer template:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "builders": [
|
||||
// "type": "null",
|
||||
// "communicator": "none"
|
||||
// ]
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// is invalid as input.
|
||||
//
|
||||
// Only provide one builder template fragment per file.
|
||||
TemplatePath string
|
||||
|
||||
// GuestOS says what guest os type the builder template fragment creates.
|
||||
// Valid values are "windows", "linux" or "darwin" guests.
|
||||
GuestOS string
|
||||
|
||||
// HostOS says what host os type the builder is capable of running on.
|
||||
// Valid values are "any", windows", or "posix". If you set "posix", then
|
||||
// this builder can run on a "linux" or "darwin" platform. If you set
|
||||
// "any", then this builder can be used on any platform.
|
||||
HostOS string
|
||||
|
||||
Teardown builderT.TestTeardownFunc
|
||||
}
|
||||
```
|
||||
Implement this struct to the file "provisioneracc/builders.go", then add
|
||||
the new implementation to the `BuildersAccTest` map in
|
||||
`provisioneracc/provisioners.go`
|
||||
|
||||
Once you finish these steps, you should be ready to run your new provisioner
|
||||
acceptance test by setting the name used in the BuildersAccTest map as your
|
||||
`ACC_TEST_BUILDERS` environment variable.
|
||||
*/
|
||||
package acctest
|
63
vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/builders.go
generated
vendored
Normal file
63
vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/builders.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
Package provisioneracc creates a framework for provisioner acceptance
|
||||
testing. For builder acceptance testing, use the top level tooling in the
|
||||
acctest package.
|
||||
*/
|
||||
package provisioneracc
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/acctest/testutils"
|
||||
)
|
||||
|
||||
// Variables stored in this file represent implementations of the BuilderFixture
|
||||
// struct inside of provisioners.go
|
||||
|
||||
// AmasonEBSBuilderFixtureLinux points to a build stub of a simple amazon-ebs
|
||||
// build running on a linux operating system.
|
||||
var AmasonEBSBuilderFixtureLinux = &BuilderFixture{
|
||||
Name: "Amazon-ebs Linux builder",
|
||||
TemplatePath: "amazon-ebs/amazon-ebs.txt",
|
||||
GuestOS: "linux",
|
||||
HostOS: "any",
|
||||
Teardown: func() error {
|
||||
// TODO
|
||||
// helper := AWSHelper{
|
||||
// Region: "us-east-1",
|
||||
// AMIName: "packer-acc-test",
|
||||
// }
|
||||
// return helper.CleanUpAmi()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// AmasonEBSBuilderFixtureWindows points to a build stub of a simple amazon-ebs
|
||||
// build running on a Windows operating system.
|
||||
var AmasonEBSBuilderFixtureWindows = &BuilderFixture{
|
||||
Name: "Amazon-ebs Windows builder",
|
||||
TemplatePath: "amazon-ebs/amazon-ebs_windows.txt",
|
||||
GuestOS: "windows",
|
||||
HostOS: "any",
|
||||
Teardown: func() error {
|
||||
// TODO
|
||||
// helper := AWSHelper{
|
||||
// Region: "us-east-1",
|
||||
// AMIName: "packer-acc-test",
|
||||
// }
|
||||
// return helper.CleanUpAmi()
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// VirtualboxBuilderFixtureLinux points to a build stub of a simple amazon-ebs
|
||||
// build running on a linux operating system.
|
||||
var VirtualboxBuilderFixtureLinux = &BuilderFixture{
|
||||
Name: "Virtualbox Windows builder",
|
||||
TemplatePath: "virtualbox/virtualbox-iso.txt",
|
||||
GuestOS: "linux",
|
||||
HostOS: "any",
|
||||
Teardown: func() error {
|
||||
testutils.CleanupFiles("virtualbox-iso-packer-acc-test")
|
||||
testutils.CleanupFiles("packer_cache")
|
||||
return nil
|
||||
},
|
||||
}
|
338
vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/provisioners.go
generated
vendored
Normal file
338
vendor/github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc/provisioners.go
generated
vendored
Normal file
|
@ -0,0 +1,338 @@
|
|||
package provisioneracc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
builderT "github.com/hashicorp/packer-plugin-sdk/acctest"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// ProvisionerTestCase is a single set of tests to run for a provisioner.
|
||||
// A ProvisionerTestCase should generally map 1:1 to each test method for your
|
||||
// acceptance tests.
|
||||
type ProvisionerTestCase struct {
|
||||
// Check is called after this step is executed in order to test that
|
||||
// the step executed successfully. If this is not set, then the next
|
||||
// step will be called
|
||||
Check func(*exec.Cmd, string) error
|
||||
// IsCompatible checks whether a provisioner is able to run against a
|
||||
// given builder type and guest operating system, and returns a boolean.
|
||||
// if it returns true, the test combination is okay to run. If false, the
|
||||
// test combination is not okay to run.
|
||||
IsCompatible func(builderType string, BuilderGuestOS string) bool
|
||||
// Name is the name of the test case. Be simple but unique and descriptive.
|
||||
Name string
|
||||
// Setup, if non-nil, will be called once before the test case
|
||||
// runs. This can be used for some setup like setting environment
|
||||
// variables, or for validation prior to the
|
||||
// test running. For example, you can use this to make sure certain
|
||||
// binaries are installed, or text fixtures are in place.
|
||||
Setup func() error
|
||||
// Teardown will be called before the test case is over regardless
|
||||
// of if the test succeeded or failed. This should return an error
|
||||
// in the case that the test can't guarantee all resources were
|
||||
// properly cleaned up.
|
||||
Teardown builderT.TestTeardownFunc
|
||||
// Template is the provisioner template to use.
|
||||
// The provisioner template fragment must be a json-formatted string
|
||||
// containing the provisioner definition but no other portions of a packer
|
||||
// template. For
|
||||
// example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// }
|
||||
//```
|
||||
//
|
||||
// is a valid entry for "template" here, but the complete Packer template:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "provisioners": [
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// is invalid as input.
|
||||
//
|
||||
// You may provide multiple provisioners in the same template. For example:
|
||||
// ```json
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world"]
|
||||
// },
|
||||
// {
|
||||
// "type": "shell-local",
|
||||
// "inline", ["echo hello world 2"]
|
||||
// }
|
||||
// ```
|
||||
Template string
|
||||
// Type is the type of provisioner.
|
||||
Type string
|
||||
}
|
||||
|
||||
// BuilderFixtures are basic builder test configurations and metadata used
|
||||
// in provisioner acceptance testing. These are frameworks to be used by
|
||||
// provisioner tests, not tests in and of themselves. BuilderFixtures should
|
||||
// generally be simple and not contain excessive or complex configurations.
|
||||
// Instantiations of this struct are stored in the builders.go file in this
|
||||
// module.
|
||||
type BuilderFixture struct {
|
||||
// Name is the name of the builder fixture.
|
||||
// Be simple and descriptive.
|
||||
Name string
|
||||
// Setup creates necessary extra test fixtures, and renders their values
|
||||
// into the BuilderFixture.Template.
|
||||
Setup func()
|
||||
// Template is the path to a builder template fragment.
|
||||
// The builder template fragment must be a json-formatted file containing
|
||||
// the builder definition but no other portions of a packer template. For
|
||||
// example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "type": "null",
|
||||
// "communicator", "none"
|
||||
// }
|
||||
//```
|
||||
//
|
||||
// is a valid entry for "template" here, but the complete Packer template:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "builders": [
|
||||
// "type": "null",
|
||||
// "communicator": "none"
|
||||
// ]
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// is invalid as input.
|
||||
//
|
||||
// Only provide one builder template fragment per file.
|
||||
TemplatePath string
|
||||
|
||||
// GuestOS says what guest os type the builder template fragment creates.
|
||||
// Valid values are "windows", "linux" or "darwin" guests.
|
||||
GuestOS string
|
||||
|
||||
// HostOS says what host os type the builder is capable of running on.
|
||||
// Valid values are "any", windows", or "posix". If you set "posix", then
|
||||
// this builder can run on a "linux" or "darwin" platform. If you set
|
||||
// "any", then this builder can be used on any platform.
|
||||
HostOS string
|
||||
|
||||
Teardown builderT.TestTeardownFunc
|
||||
}
|
||||
|
||||
func fixtureDir() string {
|
||||
_, file, _, _ := runtime.Caller(0)
|
||||
return filepath.Join(filepath.Dir(file), "test-fixtures")
|
||||
}
|
||||
|
||||
func LoadBuilderFragment(templateFragmentPath string) (string, error) {
|
||||
dir := fixtureDir()
|
||||
fragmentAbsPath := filepath.Join(dir, templateFragmentPath)
|
||||
fragmentFile, err := os.Open(fragmentAbsPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Unable find %s", fragmentAbsPath)
|
||||
}
|
||||
defer fragmentFile.Close()
|
||||
|
||||
fragmentString, err := ioutil.ReadAll(fragmentFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Unable to read %s", fragmentAbsPath)
|
||||
}
|
||||
|
||||
return string(fragmentString), nil
|
||||
}
|
||||
|
||||
func RunProvisionerAccTest(testCase *ProvisionerTestCase, t *testing.T) {
|
||||
TestProvisionersAgainstBuilders(testCase, t)
|
||||
}
|
||||
|
||||
//nolint:errcheck
|
||||
func TestProvisionersAgainstBuilders(testCase *ProvisionerTestCase, t *testing.T) {
|
||||
// retrieve user-desired builders.
|
||||
builderTypes := checkBuilders(t)
|
||||
|
||||
// Run this provisioner test case against each builder type requested.
|
||||
for _, builderType := range builderTypes {
|
||||
buildFixtures := BuildersAccTest[builderType]
|
||||
// loop over individual build templates, merge with provisioner
|
||||
// templates, and shell out to run test.
|
||||
for _, buildFixture := range buildFixtures {
|
||||
if !testCase.IsCompatible(builderType, buildFixture.GuestOS) {
|
||||
continue
|
||||
}
|
||||
|
||||
testName := fmt.Sprintf("%s on %s", testCase.Name, buildFixture.Name)
|
||||
|
||||
if testCase.Setup != nil {
|
||||
err := testCase.Setup()
|
||||
if err != nil {
|
||||
t.Fatalf("test %s setup failed: %s", testName, err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Run(testName, func(t *testing.T) {
|
||||
builderFragment, err := LoadBuilderFragment(buildFixture.TemplatePath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to load builder fragment: %s", err)
|
||||
}
|
||||
|
||||
// Combine provisioner and builder template fragments; write to
|
||||
// file.
|
||||
out := bytes.NewBuffer(nil)
|
||||
fmt.Fprintf(out, `{"builders": [%s],"provisioners": [%s]}`,
|
||||
builderFragment, testCase.Template)
|
||||
templateName := fmt.Sprintf("%s_%s.json", builderType, testCase.Type)
|
||||
templatePath := filepath.Join("./", templateName)
|
||||
writeJsonTemplate(out, templatePath, t)
|
||||
logfile := fmt.Sprintf("packer_log_%s_%s.txt", builderType, testCase.Type)
|
||||
|
||||
// Make sure packer is installed:
|
||||
packerbin, err := exec.LookPath("packer")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't find packer binary installed on system: %s", err.Error())
|
||||
}
|
||||
// Run build
|
||||
buildCommand := exec.Command(packerbin, "build", "--machine-readable", templatePath)
|
||||
buildCommand.Env = append(buildCommand.Env, os.Environ()...)
|
||||
buildCommand.Env = append(buildCommand.Env, "PACKER_LOG=1",
|
||||
fmt.Sprintf("PACKER_LOG_PATH=%s", logfile))
|
||||
buildCommand.Run()
|
||||
|
||||
// Check for test custom pass/fail before we clean up
|
||||
var checkErr error
|
||||
if testCase.Check != nil {
|
||||
checkErr = testCase.Check(buildCommand, logfile)
|
||||
}
|
||||
|
||||
// Cleanup stuff created by builder.
|
||||
cleanErr := buildFixture.Teardown()
|
||||
if cleanErr != nil {
|
||||
log.Printf("bad: failed to clean up builder-created resources: %s", cleanErr.Error())
|
||||
}
|
||||
// Clean up anything created in provisioner run
|
||||
if testCase.Teardown != nil {
|
||||
cleanErr = testCase.Teardown()
|
||||
if cleanErr != nil {
|
||||
log.Printf("bad: failed to clean up test-created resources: %s", cleanErr.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Fail test if check failed.
|
||||
if checkErr != nil {
|
||||
cwd, _ := os.Getwd()
|
||||
t.Fatalf(fmt.Sprintf("Error running provisioner acceptance"+
|
||||
" tests: %s\nLogs can be found at %s\nand the "+
|
||||
"acceptance test template can be found at %s",
|
||||
checkErr.Error(), filepath.Join(cwd, logfile),
|
||||
filepath.Join(cwd, templatePath)))
|
||||
} else {
|
||||
os.Remove(templatePath)
|
||||
os.Remove(logfile)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkBuilders retrieves all of the builders that the user has requested to
|
||||
// run acceptance tests against.
|
||||
func checkBuilders(t *testing.T) []string {
|
||||
b := os.Getenv("ACC_TEST_BUILDERS")
|
||||
// validate if we want to run provisioners acc tests
|
||||
if b == "" {
|
||||
t.Skip("Provisioners Acceptance tests skipped unless env 'ACC_TEST_BUILDERS' is set")
|
||||
}
|
||||
|
||||
// Get builders type to test provisioners against
|
||||
var builders []string
|
||||
for k := range BuildersAccTest {
|
||||
// This will validate that only defined builders are executed against
|
||||
if b != "all" && !strings.Contains(b, k) {
|
||||
continue
|
||||
}
|
||||
builders = append(builders, k)
|
||||
}
|
||||
return builders
|
||||
}
|
||||
|
||||
func writeJsonTemplate(out *bytes.Buffer, filePath string, t *testing.T) {
|
||||
outputFile, err := os.Create(filePath)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: failed to create template file: %s", err.Error())
|
||||
}
|
||||
_, err = outputFile.Write(out.Bytes())
|
||||
if err != nil {
|
||||
t.Fatalf("bad: failed to write template file: %s", err.Error())
|
||||
}
|
||||
outputFile.Sync()
|
||||
}
|
||||
|
||||
// BuilderAcceptance is specialized tooling implemented by individual builders.
|
||||
// To add your builder to the provisioner testing framework, create a struct
|
||||
// that implements this interface, add it to the BuildersAccTest map below.
|
||||
// TODO add this interface to the plugin server so that Packer can request it
|
||||
// From the plugin rather than importing it here.
|
||||
type BuilderAcceptance interface {
|
||||
// GetConfigs provides a mapping of guest OS architecture to builder
|
||||
// template fragment.
|
||||
// The builder template fragment must be a json-formatted string containing
|
||||
// the builder definition but no other portions of a packer template. For
|
||||
// example:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "type": "null",
|
||||
// "communicator", "none"
|
||||
// }
|
||||
//```
|
||||
//
|
||||
// is a valid entry for "template" here, but the complete Packer template:
|
||||
//
|
||||
// ```json
|
||||
// {
|
||||
// "builders": [
|
||||
// "type": "null",
|
||||
// "communicator": "none"
|
||||
// ]
|
||||
// }
|
||||
// ```
|
||||
//
|
||||
// is invalid as input.
|
||||
//
|
||||
// Valid keys for the map are "linux" and "windows". These keys will be used
|
||||
// to determine whether a given builder template is compatible with a given
|
||||
// provisioner template.
|
||||
GetConfigs() (map[string]string, error)
|
||||
// GetBuilderStore() returns a MapOfBuilder that contains the actual builder
|
||||
// struct definition being used for this test.
|
||||
GetBuilderStore() packersdk.MapOfBuilder
|
||||
// CleanUp cleans up any side-effects of the builder not already cleaned up
|
||||
// by the builderT framework.
|
||||
CleanUp() error
|
||||
}
|
||||
|
||||
// Mapping of all builder fixtures defined for a given builder type.
|
||||
var BuildersAccTest = map[string][]*BuilderFixture{
|
||||
"virtualbox-iso": []*BuilderFixture{VirtualboxBuilderFixtureLinux},
|
||||
"amazon-ebs": []*BuilderFixture{AmasonEBSBuilderFixtureLinux, AmasonEBSBuilderFixtureWindows},
|
||||
}
|
|
@ -0,0 +1,222 @@
|
|||
package acctest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template"
|
||||
"github.com/hashicorp/packer/packer"
|
||||
"github.com/hashicorp/packer/provisioner/file"
|
||||
shellprovisioner "github.com/hashicorp/packer/provisioner/shell"
|
||||
)
|
||||
|
||||
// TestEnvVar must be set to a non-empty value for acceptance tests to run.
|
||||
const TestEnvVar = "PACKER_ACC"
|
||||
|
||||
// TestCase is a single set of tests to run for a backend. A TestCase
|
||||
// should generally map 1:1 to each test method for your acceptance
|
||||
// tests.
|
||||
type TestCase struct {
|
||||
// Precheck, if non-nil, will be called once before the test case
|
||||
// runs at all. This can be used for some validation prior to the
|
||||
// test running.
|
||||
PreCheck func()
|
||||
|
||||
// Builder is the Builder that will be tested. It will be available
|
||||
// as the "test" builder in the template.
|
||||
Builder packersdk.Builder
|
||||
|
||||
// Template is the template contents to use.
|
||||
Template string
|
||||
|
||||
// Check is called after this step is executed in order to test that
|
||||
// the step executed successfully. If this is not set, then the next
|
||||
// step will be called
|
||||
Check TestCheckFunc
|
||||
|
||||
// Teardown will be called before the test case is over regardless
|
||||
// of if the test succeeded or failed. This should return an error
|
||||
// in the case that the test can't guarantee all resources were
|
||||
// properly cleaned up.
|
||||
Teardown TestTeardownFunc
|
||||
|
||||
// If SkipArtifactTeardown is true, we will not attempt to destroy the
|
||||
// artifact created in this test run.
|
||||
SkipArtifactTeardown bool
|
||||
// If set, overrides the default provisioner store with custom provisioners.
|
||||
// This can be useful for running acceptance tests for a particular
|
||||
// provisioner using a specific builder.
|
||||
// Default provisioner store:
|
||||
// ProvisionerStore: packersdk.MapOfProvisioner{
|
||||
// "shell": func() (packersdk.Provisioner, error) { return &shellprovisioner.Provisioner{}, nil },
|
||||
// "file": func() (packersdk.Provisioner, error) { return &file.Provisioner{}, nil },
|
||||
// },
|
||||
ProvisionerStore packersdk.MapOfProvisioner
|
||||
}
|
||||
|
||||
// TestCheckFunc is the callback used for Check in TestStep.
|
||||
type TestCheckFunc func([]packersdk.Artifact) error
|
||||
|
||||
// TestTeardownFunc is the callback used for Teardown in TestCase.
|
||||
type TestTeardownFunc func() error
|
||||
|
||||
// TestT is the interface used to handle the test lifecycle of a test.
|
||||
//
|
||||
// Users should just use a *testing.T object, which implements this.
|
||||
type TestT interface {
|
||||
Error(args ...interface{})
|
||||
Fatal(args ...interface{})
|
||||
Skip(args ...interface{})
|
||||
}
|
||||
|
||||
type TestBuilderStore struct {
|
||||
packer.BuilderStore
|
||||
StartFn func(name string) (packersdk.Builder, error)
|
||||
}
|
||||
|
||||
func (tbs TestBuilderStore) Start(name string) (packersdk.Builder, error) { return tbs.StartFn(name) }
|
||||
|
||||
// Test performs an acceptance test on a backend with the given test case.
|
||||
//
|
||||
// Tests are not run unless an environmental variable "PACKER_ACC" is
|
||||
// set to some non-empty value. This is to avoid test cases surprising
|
||||
// a user by creating real resources.
|
||||
//
|
||||
// Tests will fail unless the verbose flag (`go test -v`, or explicitly
|
||||
// the "-test.v" flag) is set. Because some acceptance tests take quite
|
||||
// long, we require the verbose flag so users are able to see progress
|
||||
// output.
|
||||
func Test(t TestT, c TestCase) {
|
||||
// We only run acceptance tests if an env var is set because they're
|
||||
// slow and generally require some outside configuration.
|
||||
if os.Getenv(TestEnvVar) == "" {
|
||||
t.Skip(fmt.Sprintf(
|
||||
"Acceptance tests skipped unless env '%s' set",
|
||||
TestEnvVar))
|
||||
return
|
||||
}
|
||||
|
||||
// We require verbose mode so that the user knows what is going on.
|
||||
if !testTesting && !testing.Verbose() {
|
||||
t.Fatal("Acceptance tests must be run with the -v flag on tests")
|
||||
return
|
||||
}
|
||||
|
||||
// Run the PreCheck if we have it
|
||||
if c.PreCheck != nil {
|
||||
c.PreCheck()
|
||||
}
|
||||
|
||||
// Parse the template
|
||||
log.Printf("[DEBUG] Parsing template...")
|
||||
tpl, err := template.Parse(strings.NewReader(c.Template))
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Failed to parse template: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
if c.ProvisionerStore == nil {
|
||||
c.ProvisionerStore = packersdk.MapOfProvisioner{
|
||||
"shell": func() (packersdk.Provisioner, error) { return &shellprovisioner.Provisioner{}, nil },
|
||||
"file": func() (packersdk.Provisioner, error) { return &file.Provisioner{}, nil },
|
||||
}
|
||||
}
|
||||
// Build the core
|
||||
log.Printf("[DEBUG] Initializing core...")
|
||||
core := packer.NewCore(&packer.CoreConfig{
|
||||
Components: packer.ComponentFinder{
|
||||
BuilderStore: TestBuilderStore{
|
||||
StartFn: func(n string) (packersdk.Builder, error) {
|
||||
if n == "test" {
|
||||
return c.Builder, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
ProvisionerStore: c.ProvisionerStore,
|
||||
},
|
||||
Template: tpl,
|
||||
})
|
||||
err = core.Initialize()
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Failed to init core: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Get the build
|
||||
log.Printf("[DEBUG] Retrieving 'test' build")
|
||||
build, err := core.Build("test")
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Failed to get 'test' build: %s", err))
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare it
|
||||
log.Printf("[DEBUG] Preparing 'test' build")
|
||||
warnings, err := build.Prepare()
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Prepare error: %s", err))
|
||||
return
|
||||
}
|
||||
if len(warnings) > 0 {
|
||||
t.Fatal(fmt.Sprintf(
|
||||
"Prepare warnings:\n\n%s",
|
||||
strings.Join(warnings, "\n")))
|
||||
return
|
||||
}
|
||||
|
||||
// Run it! We use a temporary directory for caching and discard
|
||||
// any UI output. We discard since it shows up in logs anyways.
|
||||
log.Printf("[DEBUG] Running 'test' build")
|
||||
ui := &packersdk.BasicUi{
|
||||
Reader: os.Stdin,
|
||||
Writer: ioutil.Discard,
|
||||
ErrorWriter: ioutil.Discard,
|
||||
PB: &packersdk.NoopProgressTracker{},
|
||||
}
|
||||
artifacts, err := build.Run(context.Background(), ui)
|
||||
if err != nil {
|
||||
t.Fatal(fmt.Sprintf("Run error:\n\n%s", err))
|
||||
goto TEARDOWN
|
||||
}
|
||||
|
||||
// Check function
|
||||
if c.Check != nil {
|
||||
log.Printf("[DEBUG] Running check function")
|
||||
if err := c.Check(artifacts); err != nil {
|
||||
t.Fatal(fmt.Sprintf("Check error:\n\n%s", err))
|
||||
goto TEARDOWN
|
||||
}
|
||||
}
|
||||
|
||||
TEARDOWN:
|
||||
if !c.SkipArtifactTeardown {
|
||||
// Delete all artifacts
|
||||
for _, a := range artifacts {
|
||||
if err := a.Destroy(); err != nil {
|
||||
t.Error(fmt.Sprintf(
|
||||
"!!! ERROR REMOVING ARTIFACT '%s': %s !!!",
|
||||
a.String(), err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Teardown
|
||||
if c.Teardown != nil {
|
||||
log.Printf("[DEBUG] Running teardown function")
|
||||
if err := c.Teardown(); err != nil {
|
||||
t.Fatal(fmt.Sprintf("Teardown failure:\n\n%s", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This is for unit tests of this package.
|
||||
var testTesting = false
|
20
vendor/github.com/hashicorp/packer-plugin-sdk/acctest/testutils/utils.go
generated
vendored
Normal file
20
vendor/github.com/hashicorp/packer-plugin-sdk/acctest/testutils/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Package testutils provides some simple ease-of-use tools for implementing
|
||||
// acceptance testing.
|
||||
package testutils
|
||||
|
||||
import "os"
|
||||
|
||||
// CleanupFiles removes all the provided filenames.
|
||||
func CleanupFiles(moreFiles ...string) {
|
||||
for _, file := range moreFiles {
|
||||
os.RemoveAll(file)
|
||||
}
|
||||
}
|
||||
|
||||
// FileExists returns true if the filename is found.
|
||||
func FileExists(filename string) bool {
|
||||
if _, err := os.Stat(filename); err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,338 @@
|
|||
package adapter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/google/shlex"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// An adapter satisfies SSH requests (from an Ansible client) by delegating SSH
|
||||
// exec and subsystem commands to a packersdk.Communicator.
|
||||
type Adapter struct {
|
||||
done <-chan struct{}
|
||||
l net.Listener
|
||||
config *ssh.ServerConfig
|
||||
sftpCmd string
|
||||
ui packersdk.Ui
|
||||
comm packersdk.Communicator
|
||||
}
|
||||
|
||||
func NewAdapter(done <-chan struct{}, l net.Listener, config *ssh.ServerConfig, sftpCmd string, ui packersdk.Ui, comm packersdk.Communicator) *Adapter {
|
||||
return &Adapter{
|
||||
done: done,
|
||||
l: l,
|
||||
config: config,
|
||||
sftpCmd: sftpCmd,
|
||||
ui: ui,
|
||||
comm: comm,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Adapter) Serve() {
|
||||
log.Printf("SSH proxy: serving on %s", c.l.Addr())
|
||||
|
||||
for {
|
||||
// Accept will return if either the underlying connection is closed or if a connection is made.
|
||||
// after returning, check to see if c.done can be received. If so, then Accept() returned because
|
||||
// the connection has been closed.
|
||||
conn, err := c.l.Accept()
|
||||
select {
|
||||
case <-c.done:
|
||||
return
|
||||
default:
|
||||
if err != nil {
|
||||
c.ui.Error(fmt.Sprintf("listen.Accept failed: %v", err))
|
||||
continue
|
||||
}
|
||||
go func(conn net.Conn) {
|
||||
if err := c.Handle(conn, c.ui); err != nil {
|
||||
c.ui.Error(err.Error())
|
||||
}
|
||||
}(conn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Adapter) Handle(conn net.Conn, ui packersdk.Ui) error {
|
||||
log.Print("SSH proxy: accepted connection")
|
||||
_, chans, reqs, err := ssh.NewServerConn(conn, c.config)
|
||||
if err != nil {
|
||||
return errors.New("failed to handshake")
|
||||
}
|
||||
|
||||
// discard all global requests
|
||||
go ssh.DiscardRequests(reqs)
|
||||
|
||||
// Service the incoming NewChannels
|
||||
for newChannel := range chans {
|
||||
if newChannel.ChannelType() != "session" {
|
||||
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
|
||||
continue
|
||||
}
|
||||
|
||||
go func(ch ssh.NewChannel) {
|
||||
if err := c.handleSession(ch); err != nil {
|
||||
c.ui.Error(err.Error())
|
||||
}
|
||||
}(newChannel)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Adapter) handleSession(newChannel ssh.NewChannel) error {
|
||||
channel, requests, err := newChannel.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer channel.Close()
|
||||
|
||||
done := make(chan struct{})
|
||||
|
||||
// Sessions have requests such as "pty-req", "shell", "env", and "exec".
|
||||
// see RFC 4254, section 6
|
||||
go func(in <-chan *ssh.Request) {
|
||||
env := make([]envRequestPayload, 4)
|
||||
for req := range in {
|
||||
switch req.Type {
|
||||
case "pty-req":
|
||||
log.Println("ansible provisioner pty-req request")
|
||||
// accept pty-req requests, but don't actually do anything. Necessary for OpenSSH and sudo.
|
||||
req.Reply(true, nil)
|
||||
|
||||
case "env":
|
||||
req, err := newEnvRequest(req)
|
||||
if err != nil {
|
||||
c.ui.Error(err.Error())
|
||||
req.Reply(false, nil)
|
||||
continue
|
||||
}
|
||||
env = append(env, req.Payload)
|
||||
log.Printf("new env request: %s", req.Payload)
|
||||
req.Reply(true, nil)
|
||||
case "exec":
|
||||
req, err := newExecRequest(req)
|
||||
if err != nil {
|
||||
c.ui.Error(err.Error())
|
||||
req.Reply(false, nil)
|
||||
close(done)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("new exec request: %s", req.Payload)
|
||||
|
||||
if len(req.Payload) == 0 {
|
||||
req.Reply(false, nil)
|
||||
close(done)
|
||||
return
|
||||
}
|
||||
|
||||
go func(channel ssh.Channel) {
|
||||
exit := c.exec(string(req.Payload), channel, channel, channel.Stderr())
|
||||
|
||||
exitStatus := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(exitStatus, uint32(exit))
|
||||
channel.SendRequest("exit-status", false, exitStatus)
|
||||
close(done)
|
||||
}(channel)
|
||||
req.Reply(true, nil)
|
||||
case "subsystem":
|
||||
req, err := newSubsystemRequest(req)
|
||||
if err != nil {
|
||||
c.ui.Error(err.Error())
|
||||
req.Reply(false, nil)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("new subsystem request: %s", req.Payload)
|
||||
switch req.Payload {
|
||||
case "sftp":
|
||||
sftpCmd := c.sftpCmd
|
||||
if len(sftpCmd) == 0 {
|
||||
sftpCmd = "/usr/lib/sftp-server -e"
|
||||
}
|
||||
|
||||
log.Print("starting sftp subsystem")
|
||||
go func() {
|
||||
_ = c.remoteExec(sftpCmd, channel, channel, channel.Stderr())
|
||||
close(done)
|
||||
}()
|
||||
req.Reply(true, nil)
|
||||
default:
|
||||
c.ui.Error(fmt.Sprintf("unsupported subsystem requested: %s", req.Payload))
|
||||
req.Reply(false, nil)
|
||||
}
|
||||
default:
|
||||
log.Printf("rejecting %s request", req.Type)
|
||||
req.Reply(false, nil)
|
||||
}
|
||||
}
|
||||
}(requests)
|
||||
|
||||
<-done
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Adapter) Shutdown() {
|
||||
c.l.Close()
|
||||
}
|
||||
|
||||
func (c *Adapter) exec(command string, in io.Reader, out io.Writer, err io.Writer) int {
|
||||
var exitStatus int
|
||||
switch {
|
||||
case strings.HasPrefix(command, "scp ") && serveSCP(command[4:]):
|
||||
err := c.scpExec(command[4:], in, out)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
exitStatus = 1
|
||||
}
|
||||
default:
|
||||
exitStatus = c.remoteExec(command, in, out, err)
|
||||
}
|
||||
return exitStatus
|
||||
}
|
||||
|
||||
func serveSCP(args string) bool {
|
||||
opts, _ := scpOptions(args)
|
||||
return bytes.IndexAny(opts, "tf") >= 0
|
||||
}
|
||||
|
||||
func (c *Adapter) scpExec(args string, in io.Reader, out io.Writer) error {
|
||||
opts, rest := scpOptions(args)
|
||||
|
||||
// remove the quoting that ansible added to rest for shell safety.
|
||||
shargs, err := shlex.Split(rest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rest = strings.Join(shargs, "")
|
||||
|
||||
if i := bytes.IndexByte(opts, 't'); i >= 0 {
|
||||
return scpUploadSession(opts, rest, in, out, c.comm)
|
||||
}
|
||||
|
||||
if i := bytes.IndexByte(opts, 'f'); i >= 0 {
|
||||
return scpDownloadSession(opts, rest, in, out, c.comm)
|
||||
}
|
||||
return errors.New("no scp mode specified")
|
||||
}
|
||||
|
||||
func (c *Adapter) remoteExec(command string, in io.Reader, out io.Writer, err io.Writer) int {
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Stdin: in,
|
||||
Stdout: out,
|
||||
Stderr: err,
|
||||
Command: command,
|
||||
}
|
||||
ctx := context.TODO()
|
||||
|
||||
if err := c.comm.Start(ctx, cmd); err != nil {
|
||||
c.ui.Error(err.Error())
|
||||
}
|
||||
|
||||
cmd.Wait()
|
||||
|
||||
return cmd.ExitStatus()
|
||||
}
|
||||
|
||||
type envRequest struct {
|
||||
*ssh.Request
|
||||
Payload envRequestPayload
|
||||
}
|
||||
|
||||
type envRequestPayload struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (p envRequestPayload) String() string {
|
||||
return fmt.Sprintf("%s=%s", p.Name, p.Value)
|
||||
}
|
||||
|
||||
func newEnvRequest(raw *ssh.Request) (*envRequest, error) {
|
||||
r := new(envRequest)
|
||||
r.Request = raw
|
||||
|
||||
if err := ssh.Unmarshal(raw.Payload, &r.Payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func sshString(buf io.Reader) (string, error) {
|
||||
var size uint32
|
||||
err := binary.Read(buf, binary.BigEndian, &size)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
b := make([]byte, size)
|
||||
err = binary.Read(buf, binary.BigEndian, b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(b), nil
|
||||
}
|
||||
|
||||
type execRequest struct {
|
||||
*ssh.Request
|
||||
Payload execRequestPayload
|
||||
}
|
||||
|
||||
type execRequestPayload string
|
||||
|
||||
func (p execRequestPayload) String() string {
|
||||
return string(p)
|
||||
}
|
||||
|
||||
func newExecRequest(raw *ssh.Request) (*execRequest, error) {
|
||||
r := new(execRequest)
|
||||
r.Request = raw
|
||||
buf := bytes.NewReader(r.Request.Payload)
|
||||
|
||||
var err error
|
||||
var payload string
|
||||
if payload, err = sshString(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.Payload = execRequestPayload(payload)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
type subsystemRequest struct {
|
||||
*ssh.Request
|
||||
Payload subsystemRequestPayload
|
||||
}
|
||||
|
||||
type subsystemRequestPayload string
|
||||
|
||||
func (p subsystemRequestPayload) String() string {
|
||||
return string(p)
|
||||
}
|
||||
|
||||
func newSubsystemRequest(raw *ssh.Request) (*subsystemRequest, error) {
|
||||
r := new(subsystemRequest)
|
||||
r.Request = raw
|
||||
buf := bytes.NewReader(r.Request.Payload)
|
||||
|
||||
var err error
|
||||
var payload string
|
||||
if payload, err = sshString(buf); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.Payload = subsystemRequestPayload(payload)
|
||||
return r, nil
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
/*
|
||||
Package adapter helps command line tools connect to the guest via a Packer
|
||||
communicator.
|
||||
|
||||
A typical use is for custom provisioners that wrap command line
|
||||
tools. For example, the Ansible provisioner and the Inspec provisioner both
|
||||
use this package to proxy communicator calls.
|
||||
|
||||
You may want to use this adapter if you are writing a provisioner that wraps a
|
||||
tool which under normal usage would be run locally and form a connection to the
|
||||
remote instance itself.
|
||||
*/
|
||||
package adapter
|
|
@ -0,0 +1,349 @@
|
|||
package adapter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/tmp"
|
||||
)
|
||||
|
||||
const (
|
||||
scpOK = "\x00"
|
||||
scpEmptyError = "\x02\n"
|
||||
)
|
||||
|
||||
/*
|
||||
scp is a simple, but poorly documented, protocol. Thankfully, its source is
|
||||
freely available, and there is at least one page that describes it reasonably
|
||||
well.
|
||||
|
||||
* https://raw.githubusercontent.com/openssh/openssh-portable/master/scp.c
|
||||
* https://opensource.apple.com/source/OpenSSH/OpenSSH-7.1/openssh/scp.c
|
||||
* https://blogs.oracle.com/janp/entry/how_the_scp_protocol_works is a great
|
||||
resource, but has some bad information. Its first problem is that it doesn't
|
||||
correctly describe why the producer has to read more responses than messages
|
||||
it sends (because it has to read the 0 sent by the sink to start the
|
||||
transfer). The second problem is that it omits that the producer needs to
|
||||
send a 0 byte after file contents.
|
||||
*/
|
||||
|
||||
func scpUploadSession(opts []byte, rest string, in io.Reader, out io.Writer, comm packersdk.Communicator) error {
|
||||
rest = strings.TrimSpace(rest)
|
||||
if len(rest) == 0 {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return errors.New("no scp target specified")
|
||||
}
|
||||
|
||||
d, err := tmp.Dir("ansible-upload")
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
// To properly implement scp, rest should be checked to see if it is a
|
||||
// directory on the remote side, but ansible only sends files, so there's no
|
||||
// need to set targetIsDir, because it can be safely assumed that rest is
|
||||
// intended to be a file, and whatever names are used in 'C' commands are
|
||||
// irrelevant.
|
||||
state := &scpUploadState{target: rest, srcRoot: d, comm: comm}
|
||||
|
||||
fmt.Fprintf(out, scpOK) // signal the client to start the transfer.
|
||||
return state.Protocol(bufio.NewReader(in), out)
|
||||
}
|
||||
|
||||
func scpDownloadSession(opts []byte, rest string, in io.Reader, out io.Writer, comm packersdk.Communicator) error {
|
||||
rest = strings.TrimSpace(rest)
|
||||
if len(rest) == 0 {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return errors.New("no scp source specified")
|
||||
}
|
||||
|
||||
d, err := tmp.Dir("ansible-download")
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(d)
|
||||
|
||||
if bytes.Contains([]byte{'d'}, opts) {
|
||||
// the only ansible module that supports downloading via scp is fetch,
|
||||
// fetch only supports file downloads as of Ansible 2.1.
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return errors.New("directory downloads not supported")
|
||||
}
|
||||
|
||||
f, err := os.Create(filepath.Join(d, filepath.Base(rest)))
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
err = comm.Download(rest, f)
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return err
|
||||
}
|
||||
|
||||
state := &scpDownloadState{srcRoot: d}
|
||||
|
||||
return state.Protocol(bufio.NewReader(in), out)
|
||||
}
|
||||
|
||||
func (state *scpDownloadState) FileProtocol(path string, info os.FileInfo, in *bufio.Reader, out io.Writer) error {
|
||||
size := info.Size()
|
||||
perms := fmt.Sprintf("C%04o", info.Mode().Perm())
|
||||
fmt.Fprintln(out, perms, size, info.Name())
|
||||
if err := scpResponse(in); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
io.CopyN(out, f, size)
|
||||
fmt.Fprintf(out, scpOK)
|
||||
|
||||
return scpResponse(in)
|
||||
}
|
||||
|
||||
type scpUploadState struct {
|
||||
comm packersdk.Communicator
|
||||
target string // target is the directory on the target
|
||||
srcRoot string // srcRoot is the directory on the host
|
||||
mtime time.Time
|
||||
atime time.Time
|
||||
dir string // dir is a path relative to the roots
|
||||
targetIsDir bool
|
||||
}
|
||||
|
||||
func (scp scpUploadState) DestPath() string {
|
||||
return filepath.Join(scp.target, scp.dir)
|
||||
}
|
||||
|
||||
func (scp scpUploadState) SrcPath() string {
|
||||
return filepath.Join(scp.srcRoot, scp.dir)
|
||||
}
|
||||
|
||||
func (state *scpUploadState) Protocol(in *bufio.Reader, out io.Writer) error {
|
||||
for {
|
||||
b, err := in.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch b {
|
||||
case 'T':
|
||||
err := state.TimeProtocol(in, out)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case 'C':
|
||||
return state.FileProtocol(in, out)
|
||||
case 'E':
|
||||
state.dir = filepath.Dir(state.dir)
|
||||
fmt.Fprintf(out, scpOK)
|
||||
return nil
|
||||
case 'D':
|
||||
return state.DirProtocol(in, out)
|
||||
default:
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return fmt.Errorf("unexpected message: %c", b)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (state *scpUploadState) FileProtocol(in *bufio.Reader, out io.Writer) error {
|
||||
defer func() {
|
||||
state.mtime = time.Time{}
|
||||
}()
|
||||
|
||||
var mode os.FileMode
|
||||
var size int64
|
||||
var name string
|
||||
_, err := fmt.Fscanf(in, "%04o %d %s\n", &mode, &size, &name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return fmt.Errorf("invalid file message: %v", err)
|
||||
}
|
||||
fmt.Fprintf(out, scpOK)
|
||||
|
||||
var fi os.FileInfo = fileInfo{name: name, size: size, mode: mode, mtime: state.mtime}
|
||||
|
||||
dest := state.DestPath()
|
||||
if state.targetIsDir {
|
||||
dest = filepath.Join(dest, fi.Name())
|
||||
}
|
||||
|
||||
err = state.comm.Upload(dest, io.LimitReader(in, fi.Size()), &fi)
|
||||
if err != nil {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := scpResponse(in); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, scpOK)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (state *scpUploadState) TimeProtocol(in *bufio.Reader, out io.Writer) error {
|
||||
var m, a int64
|
||||
if _, err := fmt.Fscanf(in, "%d 0 %d 0\n", &m, &a); err != nil {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(out, scpOK)
|
||||
|
||||
state.atime = time.Unix(a, 0)
|
||||
state.mtime = time.Unix(m, 0)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (state *scpUploadState) DirProtocol(in *bufio.Reader, out io.Writer) error {
|
||||
var mode os.FileMode
|
||||
var length uint
|
||||
var name string
|
||||
|
||||
if _, err := fmt.Fscanf(in, "%04o %d %s\n", &mode, &length, &name); err != nil {
|
||||
fmt.Fprintf(out, scpEmptyError)
|
||||
return fmt.Errorf("invalid directory message: %v", err)
|
||||
}
|
||||
fmt.Fprintf(out, scpOK)
|
||||
|
||||
path := filepath.Join(state.dir, name)
|
||||
if err := os.Mkdir(path, mode); err != nil {
|
||||
return err
|
||||
}
|
||||
state.dir = path
|
||||
|
||||
if state.atime.IsZero() {
|
||||
state.atime = time.Now()
|
||||
}
|
||||
if state.mtime.IsZero() {
|
||||
state.mtime = time.Now()
|
||||
}
|
||||
|
||||
if err := os.Chtimes(path, state.atime, state.mtime); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := state.comm.UploadDir(filepath.Dir(state.DestPath()), state.SrcPath(), nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
state.mtime = time.Time{}
|
||||
state.atime = time.Time{}
|
||||
return state.Protocol(in, out)
|
||||
}
|
||||
|
||||
type scpDownloadState struct {
|
||||
srcRoot string // srcRoot is the directory on the host
|
||||
}
|
||||
|
||||
func (state *scpDownloadState) Protocol(in *bufio.Reader, out io.Writer) error {
|
||||
r := bufio.NewReader(in)
|
||||
// read the byte sent by the other side to start the transfer
|
||||
if err := scpResponse(r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return filepath.Walk(state.srcRoot, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if path == state.srcRoot {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
// no need to get fancy; srcRoot should only contain one file, because
|
||||
// Ansible only allows fetching a single file.
|
||||
return errors.New("unexpected directory")
|
||||
}
|
||||
|
||||
return state.FileProtocol(path, info, r, out)
|
||||
})
|
||||
}
|
||||
|
||||
func scpOptions(s string) (opts []byte, rest string) {
|
||||
end := 0
|
||||
opt := false
|
||||
|
||||
Loop:
|
||||
for i := 0; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case b == ' ':
|
||||
opt = false
|
||||
end++
|
||||
case b == '-':
|
||||
opt = true
|
||||
end++
|
||||
case opt:
|
||||
opts = append(opts, b)
|
||||
end++
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
rest = s[end:]
|
||||
return
|
||||
}
|
||||
|
||||
func scpResponse(r *bufio.Reader) error {
|
||||
code, err := r.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if code != 0 {
|
||||
message, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading error message: %s", err)
|
||||
}
|
||||
|
||||
// 1 is a warning. Anything higher (really just 2) is an error.
|
||||
if code > 1 {
|
||||
return errors.New(message)
|
||||
}
|
||||
|
||||
log.Println("WARNING:", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type fileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
mtime time.Time
|
||||
}
|
||||
|
||||
func (fi fileInfo) Name() string { return fi.name }
|
||||
func (fi fileInfo) Size() int64 { return fi.size }
|
||||
func (fi fileInfo) Mode() os.FileMode { return fi.mode }
|
||||
func (fi fileInfo) ModTime() time.Time {
|
||||
if fi.mtime.IsZero() {
|
||||
return time.Now()
|
||||
}
|
||||
return fi.mtime
|
||||
}
|
||||
func (fi fileInfo) IsDir() bool { return fi.mode.IsDir() }
|
||||
func (fi fileInfo) Sys() interface{} { return nil }
|
2123
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.go
generated
vendored
Normal file
2123
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
79
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.pigeon
generated
vendored
Normal file
79
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command.pigeon
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
{
|
||||
package bootcommand
|
||||
|
||||
}
|
||||
|
||||
Input <- expr:Expr EOF {
|
||||
return expr, nil
|
||||
}
|
||||
|
||||
Expr <- l:( Wait / CharToggle / Special / Literal)+ {
|
||||
return l, nil
|
||||
}
|
||||
|
||||
Wait = ExprStart "wait" duration:( Duration / Integer )? ExprEnd {
|
||||
var d time.Duration
|
||||
switch t := duration.(type) {
|
||||
case time.Duration:
|
||||
d = t
|
||||
case int64:
|
||||
d = time.Duration(t) * time.Second
|
||||
default:
|
||||
d = time.Second
|
||||
}
|
||||
return &waitExpression{d}, nil
|
||||
}
|
||||
|
||||
CharToggle = ExprStart lit:(Literal) t:(On / Off) ExprEnd {
|
||||
return &literal{lit.(*literal).s, t.(KeyAction)}, nil
|
||||
}
|
||||
|
||||
Special = ExprStart s:(SpecialKey) t:(On / Off)? ExprEnd {
|
||||
l := strings.ToLower(string(s.([]byte)))
|
||||
if t == nil {
|
||||
return &specialExpression{l, KeyPress}, nil
|
||||
}
|
||||
return &specialExpression{l, t.(KeyAction)}, nil
|
||||
}
|
||||
|
||||
Number = '-'? Integer ( '.' Digit+ )? {
|
||||
return string(c.text), nil
|
||||
}
|
||||
|
||||
Integer = '0' / NonZeroDigit Digit* {
|
||||
return strconv.ParseInt(string(c.text), 10, 64)
|
||||
}
|
||||
|
||||
Duration = ( Number TimeUnit )+ {
|
||||
return time.ParseDuration(string(c.text))
|
||||
}
|
||||
|
||||
On = "on"i {
|
||||
return KeyOn, nil
|
||||
}
|
||||
|
||||
Off = "off"i {
|
||||
return KeyOff, nil
|
||||
}
|
||||
|
||||
Literal = . {
|
||||
r, _ := utf8.DecodeRune(c.text)
|
||||
return &literal{r, KeyPress}, nil
|
||||
}
|
||||
|
||||
ExprEnd = ">"
|
||||
ExprStart = "<"
|
||||
SpecialKey = "bs"i / "del"i / "enter"i / "esc"i / "f10"i / "f11"i / "f12"i
|
||||
/ "f1"i / "f2"i / "f3"i / "f4"i / "f5"i / "f6"i / "f7"i / "f8"i / "f9"i
|
||||
/ "return"i / "tab"i / "up"i / "down"i / "spacebar"i / "insert"i / "home"i
|
||||
/ "end"i / "pageUp"i / "pageDown"i / "leftAlt"i / "leftCtrl"i / "leftShift"i
|
||||
/ "rightAlt"i / "rightCtrl"i / "rightShift"i / "leftSuper"i / "rightSuper"i
|
||||
/ "left"i / "right"i
|
||||
|
||||
NonZeroDigit = [1-9]
|
||||
Digit = [0-9]
|
||||
TimeUnit = ("ns" / "us" / "µs" / "ms" / "s" / "m" / "h")
|
||||
|
||||
_ "whitespace" <- [ \n\t\r]*
|
||||
|
||||
EOF <- !.
|
157
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command_ast.go
generated
vendored
Normal file
157
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/boot_command_ast.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
package bootcommand
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// KeysAction represents what we want to do with a key press.
|
||||
// It can take 3 states. We either want to:
|
||||
// * press the key once
|
||||
// * press and hold
|
||||
// * press and release
|
||||
type KeyAction int
|
||||
|
||||
const (
|
||||
KeyOn KeyAction = 1 << iota
|
||||
KeyOff
|
||||
KeyPress
|
||||
)
|
||||
|
||||
func (k KeyAction) String() string {
|
||||
switch k {
|
||||
case KeyOn:
|
||||
return "On"
|
||||
case KeyOff:
|
||||
return "Off"
|
||||
case KeyPress:
|
||||
return "Press"
|
||||
}
|
||||
panic(fmt.Sprintf("Unknwon KeyAction %d", k))
|
||||
}
|
||||
|
||||
type expression interface {
|
||||
// Do executes the expression
|
||||
Do(context.Context, BCDriver) error
|
||||
// Validate validates the expression without executing it
|
||||
Validate() error
|
||||
}
|
||||
|
||||
type expressionSequence []expression
|
||||
|
||||
// Do executes every expression in the sequence and then flushes remaining
|
||||
// scancodes.
|
||||
func (s expressionSequence) Do(ctx context.Context, b BCDriver) error {
|
||||
// validate should never fail here, since it should be called before
|
||||
// expressionSequence.Do. Only reason we don't panic is so we can clean up.
|
||||
if errs := s.Validate(); errs != nil {
|
||||
return fmt.Errorf("Found an invalid boot command. This is likely an error in Packer, so please open a ticket.")
|
||||
}
|
||||
|
||||
for _, exp := range s {
|
||||
if err := ctx.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := exp.Do(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return b.Flush()
|
||||
}
|
||||
|
||||
// Validate tells us if every expression in the sequence is valid.
|
||||
func (s expressionSequence) Validate() (errs []error) {
|
||||
for _, exp := range s {
|
||||
if err := exp.Validate(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GenerateExpressionSequence generates a sequence of expressions from the
|
||||
// given command. This is the primary entry point to the boot command parser.
|
||||
func GenerateExpressionSequence(command string) (expressionSequence, error) {
|
||||
seq := expressionSequence{}
|
||||
if command == "" {
|
||||
return seq, nil
|
||||
}
|
||||
got, err := ParseReader("", strings.NewReader(command))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, exp := range got.([]interface{}) {
|
||||
seq = append(seq, exp.(expression))
|
||||
}
|
||||
return seq, nil
|
||||
}
|
||||
|
||||
type waitExpression struct {
|
||||
d time.Duration
|
||||
}
|
||||
|
||||
// Do waits the amount of time described by the expression. It is cancellable
|
||||
// through the context.
|
||||
func (w *waitExpression) Do(ctx context.Context, driver BCDriver) error {
|
||||
driver.Flush()
|
||||
log.Printf("[INFO] Waiting %s", w.d)
|
||||
select {
|
||||
case <-time.After(w.d):
|
||||
return nil
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// Validate returns an error if the time is <= 0
|
||||
func (w *waitExpression) Validate() error {
|
||||
if w.d <= 0 {
|
||||
return fmt.Errorf("Expecting a positive wait value. Got %s", w.d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *waitExpression) String() string {
|
||||
return fmt.Sprintf("Wait<%s>", w.d)
|
||||
}
|
||||
|
||||
type specialExpression struct {
|
||||
s string
|
||||
action KeyAction
|
||||
}
|
||||
|
||||
// Do sends the special command to the driver, along with the key action.
|
||||
func (s *specialExpression) Do(ctx context.Context, driver BCDriver) error {
|
||||
return driver.SendSpecial(s.s, s.action)
|
||||
}
|
||||
|
||||
// Validate always passes
|
||||
func (s *specialExpression) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *specialExpression) String() string {
|
||||
return fmt.Sprintf("Spec-%s(%s)", s.action, s.s)
|
||||
}
|
||||
|
||||
type literal struct {
|
||||
s rune
|
||||
action KeyAction
|
||||
}
|
||||
|
||||
// Do sends the key to the driver, along with the key action.
|
||||
func (l *literal) Do(ctx context.Context, driver BCDriver) error {
|
||||
return driver.SendKey(l.s, l.action)
|
||||
}
|
||||
|
||||
// Validate always passes
|
||||
func (l *literal) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *literal) String() string {
|
||||
return fmt.Sprintf("LIT-%s(%s)", l.action, string(l.s))
|
||||
}
|
215
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/config.go
generated
vendored
Normal file
215
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/config.go
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package bootcommand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// PackerKeyEnv is used to specify the key interval (delay) between keystrokes
|
||||
// sent to the VM, typically in boot commands. This is to prevent host CPU
|
||||
// utilization from causing key presses to be skipped or repeated incorrectly.
|
||||
const PackerKeyEnv = "PACKER_KEY_INTERVAL"
|
||||
|
||||
// PackerKeyDefault 100ms is appropriate for shared build infrastructure while a
|
||||
// shorter delay (e.g. 10ms) can be used on a workstation. See PackerKeyEnv.
|
||||
const PackerKeyDefault = 100 * time.Millisecond
|
||||
|
||||
// The boot configuration is very important: `boot_command` specifies the keys
|
||||
// to type when the virtual machine is first booted in order to start the OS
|
||||
// installer. This command is typed after boot_wait, which gives the virtual
|
||||
// machine some time to actually load.
|
||||
//
|
||||
// The boot_command is an array of strings. The strings are all typed in
|
||||
// sequence. It is an array only to improve readability within the template.
|
||||
//
|
||||
// There are a set of special keys available. If these are in your boot
|
||||
// command, they will be replaced by the proper key:
|
||||
//
|
||||
// - `<bs>` - Backspace
|
||||
//
|
||||
// - `<del>` - Delete
|
||||
//
|
||||
// - `<enter> <return>` - Simulates an actual "enter" or "return" keypress.
|
||||
//
|
||||
// - `<esc>` - Simulates pressing the escape key.
|
||||
//
|
||||
// - `<tab>` - Simulates pressing the tab key.
|
||||
//
|
||||
// - `<f1> - <f12>` - Simulates pressing a function key.
|
||||
//
|
||||
// - `<up> <down> <left> <right>` - Simulates pressing an arrow key.
|
||||
//
|
||||
// - `<spacebar>` - Simulates pressing the spacebar.
|
||||
//
|
||||
// - `<insert>` - Simulates pressing the insert key.
|
||||
//
|
||||
// - `<home> <end>` - Simulates pressing the home and end keys.
|
||||
//
|
||||
// - `<pageUp> <pageDown>` - Simulates pressing the page up and page down
|
||||
// keys.
|
||||
//
|
||||
// - `<menu>` - Simulates pressing the Menu key.
|
||||
//
|
||||
// - `<leftAlt> <rightAlt>` - Simulates pressing the alt key.
|
||||
//
|
||||
// - `<leftCtrl> <rightCtrl>` - Simulates pressing the ctrl key.
|
||||
//
|
||||
// - `<leftShift> <rightShift>` - Simulates pressing the shift key.
|
||||
//
|
||||
// - `<leftSuper> <rightSuper>` - Simulates pressing the ⌘ or Windows key.
|
||||
//
|
||||
// - `<wait> <wait5> <wait10>` - Adds a 1, 5 or 10 second pause before
|
||||
// sending any additional keys. This is useful if you have to generally
|
||||
// wait for the UI to update before typing more.
|
||||
//
|
||||
// - `<waitXX>` - Add an arbitrary pause before sending any additional keys.
|
||||
// The format of `XX` is a sequence of positive decimal numbers, each with
|
||||
// optional fraction and a unit suffix, such as `300ms`, `1.5h` or `2h45m`.
|
||||
// Valid time units are `ns`, `us` (or `µs`), `ms`, `s`, `m`, `h`. For
|
||||
// example `<wait10m>` or `<wait1m20s>`.
|
||||
//
|
||||
// - `<XXXOn> <XXXOff>` - Any printable keyboard character, and of these
|
||||
// "special" expressions, with the exception of the `<wait>` types, can
|
||||
// also be toggled on or off. For example, to simulate ctrl+c, use
|
||||
// `<leftCtrlOn>c<leftCtrlOff>`. Be sure to release them, otherwise they
|
||||
// will be held down until the machine reboots. To hold the `c` key down,
|
||||
// you would use `<cOn>`. Likewise, `<cOff>` to release.
|
||||
//
|
||||
// - `{{ .HTTPIP }} {{ .HTTPPort }}` - The IP and port, respectively of an
|
||||
// HTTP server that is started serving the directory specified by the
|
||||
// `http_directory` configuration parameter. If `http_directory` isn't
|
||||
// specified, these will be blank!
|
||||
//
|
||||
// - `{{ .Name }}` - The name of the VM.
|
||||
//
|
||||
// Example boot command. This is actually a working boot command used to start an
|
||||
// CentOS 6.4 installer:
|
||||
//
|
||||
// In JSON:
|
||||
//
|
||||
// ```json
|
||||
// "boot_command": [
|
||||
// "<tab><wait>",
|
||||
// " ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/centos6-ks.cfg<enter>"
|
||||
// ]
|
||||
// ```
|
||||
//
|
||||
// In HCL2:
|
||||
//
|
||||
// ```hcl
|
||||
// boot_command = [
|
||||
// "<tab><wait>",
|
||||
// " ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/centos6-ks.cfg<enter>"
|
||||
// ]
|
||||
// ```
|
||||
//
|
||||
// The example shown below is a working boot command used to start an Ubuntu
|
||||
// 12.04 installer:
|
||||
//
|
||||
// In JSON:
|
||||
//
|
||||
// ```json
|
||||
// "boot_command": [
|
||||
// "<esc><esc><enter><wait>",
|
||||
// "/install/vmlinuz noapic ",
|
||||
// "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ",
|
||||
// "debian-installer=en_US auto locale=en_US kbd-chooser/method=us ",
|
||||
// "hostname={{ .Name }} ",
|
||||
// "fb=false debconf/frontend=noninteractive ",
|
||||
// "keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ",
|
||||
// "keyboard-configuration/variant=USA console-setup/ask_detect=false ",
|
||||
// "initrd=/install/initrd.gz -- <enter>"
|
||||
// ]
|
||||
// ```
|
||||
//
|
||||
// In HCL2:
|
||||
//
|
||||
// ```hcl
|
||||
// boot_command = [
|
||||
// "<esc><esc><enter><wait>",
|
||||
// "/install/vmlinuz noapic ",
|
||||
// "preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/preseed.cfg ",
|
||||
// "debian-installer=en_US auto locale=en_US kbd-chooser/method=us ",
|
||||
// "hostname={{ .Name }} ",
|
||||
// "fb=false debconf/frontend=noninteractive ",
|
||||
// "keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA ",
|
||||
// "keyboard-configuration/variant=USA console-setup/ask_detect=false ",
|
||||
// "initrd=/install/initrd.gz -- <enter>"
|
||||
// ]
|
||||
// ```
|
||||
//
|
||||
// For more examples of various boot commands, see the sample projects from our
|
||||
// [community templates page](/community-tools#templates).
|
||||
type BootConfig struct {
|
||||
// Time to wait after sending a group of key pressses. The value of this
|
||||
// should be a duration. Examples are `5s` and `1m30s` which will cause
|
||||
// Packer to wait five seconds and one minute 30 seconds, respectively. If
|
||||
// this isn't specified, a sensible default value is picked depending on
|
||||
// the builder type.
|
||||
BootGroupInterval time.Duration `mapstructure:"boot_keygroup_interval"`
|
||||
// The time to wait after booting the initial virtual machine before typing
|
||||
// the `boot_command`. The value of this should be a duration. Examples are
|
||||
// `5s` and `1m30s` which will cause Packer to wait five seconds and one
|
||||
// minute 30 seconds, respectively. If this isn't specified, the default is
|
||||
// `10s` or 10 seconds. To set boot_wait to 0s, use a negative number, such
|
||||
// as "-1s"
|
||||
BootWait time.Duration `mapstructure:"boot_wait"`
|
||||
// This is an array of commands to type when the virtual machine is first
|
||||
// booted. The goal of these commands should be to type just enough to
|
||||
// initialize the operating system installer. Special keys can be typed as
|
||||
// well, and are covered in the section below on the boot command. If this
|
||||
// is not specified, it is assumed the installer will start itself.
|
||||
BootCommand []string `mapstructure:"boot_command"`
|
||||
}
|
||||
|
||||
// The boot command "typed" character for character over a VNC connection to
|
||||
// the machine, simulating a human actually typing the keyboard.
|
||||
//
|
||||
// Keystrokes are typed as separate key up/down events over VNC with a default
|
||||
// 100ms delay. The delay alleviates issues with latency and CPU contention.
|
||||
// You can tune this delay on a per-builder basis by specifying
|
||||
// "boot_key_interval" in your Packer template.
|
||||
type VNCConfig struct {
|
||||
BootConfig `mapstructure:",squash"`
|
||||
// Whether to create a VNC connection or not. A boot_command cannot be used
|
||||
// when this is true. Defaults to false.
|
||||
DisableVNC bool `mapstructure:"disable_vnc"`
|
||||
// Time in ms to wait between each key press
|
||||
BootKeyInterval time.Duration `mapstructure:"boot_key_interval"`
|
||||
}
|
||||
|
||||
func (c *BootConfig) Prepare(ctx *interpolate.Context) (errs []error) {
|
||||
if c.BootWait == 0 {
|
||||
c.BootWait = 10 * time.Second
|
||||
}
|
||||
|
||||
if c.BootCommand != nil {
|
||||
expSeq, err := GenerateExpressionSequence(c.FlatBootCommand())
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else if vErrs := expSeq.Validate(); vErrs != nil {
|
||||
errs = append(errs, vErrs...)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *BootConfig) FlatBootCommand() string {
|
||||
return strings.Join(c.BootCommand, "")
|
||||
}
|
||||
|
||||
func (c *VNCConfig) Prepare(ctx *interpolate.Context) (errs []error) {
|
||||
if len(c.BootCommand) > 0 && c.DisableVNC {
|
||||
errs = append(errs,
|
||||
fmt.Errorf("A boot command cannot be used when vnc is disabled."))
|
||||
}
|
||||
|
||||
errs = append(errs, c.BootConfig.Prepare(ctx)...)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
// Package bootcommand generates and sends boot commands to the remote instance.
|
||||
//
|
||||
// This package is relevant to people who want to create new builders, particularly
|
||||
// builders with the capacity to build a VM from an iso.
|
||||
//
|
||||
// You can choose between three different drivers to send the command: a vnc
|
||||
// driver, a usb driver, and a PX-XT keyboard driver. The driver you choose will
|
||||
// depend on what kind of keyboard codes your hypervisor expects, and how you want
|
||||
// to implement the connection.
|
||||
package bootcommand
|
11
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/driver.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/driver.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
package bootcommand
|
||||
|
||||
const shiftedChars = "~!@#$%^&*()_+{}|:\"<>?"
|
||||
|
||||
// BCDriver is our access to the VM we want to type boot commands to
|
||||
type BCDriver interface {
|
||||
SendKey(key rune, action KeyAction) error
|
||||
SendSpecial(special string, action KeyAction) error
|
||||
// Flush will be called when we want to send scancodes to the VM.
|
||||
Flush() error
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
//go:generate pigeon -o boot_command.go boot_command.pigeon
|
||||
|
||||
package bootcommand
|
213
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/pc_xt_driver.go
generated
vendored
Normal file
213
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/pc_xt_driver.go
generated
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
package bootcommand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// SendCodeFunc will be called to send codes to the VM
|
||||
type SendCodeFunc func([]string) error
|
||||
type scMap map[string]*scancode
|
||||
|
||||
type pcXTDriver struct {
|
||||
interval time.Duration
|
||||
sendImpl SendCodeFunc
|
||||
specialMap scMap
|
||||
scancodeMap map[rune]byte
|
||||
buffer [][]string
|
||||
// TODO: set from env
|
||||
scancodeChunkSize int
|
||||
}
|
||||
|
||||
type scancode struct {
|
||||
make []string
|
||||
break_ []string
|
||||
}
|
||||
|
||||
func (sc *scancode) makeBreak() []string {
|
||||
return append(sc.make, sc.break_...)
|
||||
}
|
||||
|
||||
// NewPCXTDriver creates a new boot command driver for VMs that expect PC-XT
|
||||
// keyboard codes. `send` should send its argument to the VM. `chunkSize` should
|
||||
// be the maximum number of keyboard codes to send to `send` at one time.
|
||||
func NewPCXTDriver(send SendCodeFunc, chunkSize int, interval time.Duration) *pcXTDriver {
|
||||
// We delay (default 100ms) between each input event to allow for CPU or
|
||||
// network latency. See PackerKeyEnv for tuning.
|
||||
keyInterval := PackerKeyDefault
|
||||
if delay, err := time.ParseDuration(os.Getenv(PackerKeyEnv)); err == nil {
|
||||
keyInterval = delay
|
||||
}
|
||||
// Override interval based on builder-specific override
|
||||
if interval > time.Duration(0) {
|
||||
keyInterval = interval
|
||||
}
|
||||
// Scancodes reference: https://www.win.tue.nl/~aeb/linux/kbd/scancodes-1.html
|
||||
// https://www.win.tue.nl/~aeb/linux/kbd/scancodes-10.html
|
||||
//
|
||||
// Scancodes are recorded here in pairs. The first entry represents
|
||||
// the key press and the second entry represents the key release and is
|
||||
// derived from the first by the addition of 0x80.
|
||||
sMap := make(scMap)
|
||||
sMap["bs"] = &scancode{[]string{"0e"}, []string{"8e"}}
|
||||
sMap["del"] = &scancode{[]string{"e0", "53"}, []string{"e0", "d3"}}
|
||||
sMap["down"] = &scancode{[]string{"e0", "50"}, []string{"e0", "d0"}}
|
||||
sMap["end"] = &scancode{[]string{"e0", "4f"}, []string{"e0", "cf"}}
|
||||
sMap["enter"] = &scancode{[]string{"1c"}, []string{"9c"}}
|
||||
sMap["esc"] = &scancode{[]string{"01"}, []string{"81"}}
|
||||
sMap["f1"] = &scancode{[]string{"3b"}, []string{"bb"}}
|
||||
sMap["f2"] = &scancode{[]string{"3c"}, []string{"bc"}}
|
||||
sMap["f3"] = &scancode{[]string{"3d"}, []string{"bd"}}
|
||||
sMap["f4"] = &scancode{[]string{"3e"}, []string{"be"}}
|
||||
sMap["f5"] = &scancode{[]string{"3f"}, []string{"bf"}}
|
||||
sMap["f6"] = &scancode{[]string{"40"}, []string{"c0"}}
|
||||
sMap["f7"] = &scancode{[]string{"41"}, []string{"c1"}}
|
||||
sMap["f8"] = &scancode{[]string{"42"}, []string{"c2"}}
|
||||
sMap["f9"] = &scancode{[]string{"43"}, []string{"c3"}}
|
||||
sMap["f10"] = &scancode{[]string{"44"}, []string{"c4"}}
|
||||
sMap["f11"] = &scancode{[]string{"57"}, []string{"d7"}}
|
||||
sMap["f12"] = &scancode{[]string{"58"}, []string{"d8"}}
|
||||
sMap["home"] = &scancode{[]string{"e0", "47"}, []string{"e0", "c7"}}
|
||||
sMap["insert"] = &scancode{[]string{"e0", "52"}, []string{"e0", "d2"}}
|
||||
sMap["left"] = &scancode{[]string{"e0", "4b"}, []string{"e0", "cb"}}
|
||||
sMap["leftalt"] = &scancode{[]string{"38"}, []string{"b8"}}
|
||||
sMap["leftctrl"] = &scancode{[]string{"1d"}, []string{"9d"}}
|
||||
sMap["leftshift"] = &scancode{[]string{"2a"}, []string{"aa"}}
|
||||
sMap["leftsuper"] = &scancode{[]string{"e0", "5b"}, []string{"e0", "db"}}
|
||||
sMap["menu"] = &scancode{[]string{"e0", "5d"}, []string{"e0", "dd"}}
|
||||
sMap["pagedown"] = &scancode{[]string{"e0", "51"}, []string{"e0", "d1"}}
|
||||
sMap["pageup"] = &scancode{[]string{"e0", "49"}, []string{"e0", "c9"}}
|
||||
sMap["return"] = &scancode{[]string{"1c"}, []string{"9c"}}
|
||||
sMap["right"] = &scancode{[]string{"e0", "4d"}, []string{"e0", "cd"}}
|
||||
sMap["rightalt"] = &scancode{[]string{"e0", "38"}, []string{"e0", "b8"}}
|
||||
sMap["rightctrl"] = &scancode{[]string{"e0", "1d"}, []string{"e0", "9d"}}
|
||||
sMap["rightshift"] = &scancode{[]string{"36"}, []string{"b6"}}
|
||||
sMap["rightsuper"] = &scancode{[]string{"e0", "5c"}, []string{"e0", "dc"}}
|
||||
sMap["spacebar"] = &scancode{[]string{"39"}, []string{"b9"}}
|
||||
sMap["tab"] = &scancode{[]string{"0f"}, []string{"8f"}}
|
||||
sMap["up"] = &scancode{[]string{"e0", "48"}, []string{"e0", "c8"}}
|
||||
|
||||
scancodeIndex := make(map[string]byte)
|
||||
scancodeIndex["1234567890-="] = 0x02
|
||||
scancodeIndex["!@#$%^&*()_+"] = 0x02
|
||||
scancodeIndex["qwertyuiop[]"] = 0x10
|
||||
scancodeIndex["QWERTYUIOP{}"] = 0x10
|
||||
scancodeIndex["asdfghjkl;'`"] = 0x1e
|
||||
scancodeIndex[`ASDFGHJKL:"~`] = 0x1e
|
||||
scancodeIndex[`\zxcvbnm,./`] = 0x2b
|
||||
scancodeIndex["|ZXCVBNM<>?"] = 0x2b
|
||||
scancodeIndex[" "] = 0x39
|
||||
|
||||
scancodeMap := make(map[rune]byte)
|
||||
for chars, start := range scancodeIndex {
|
||||
var i byte = 0
|
||||
for len(chars) > 0 {
|
||||
r, size := utf8.DecodeRuneInString(chars)
|
||||
chars = chars[size:]
|
||||
scancodeMap[r] = start + i
|
||||
i += 1
|
||||
}
|
||||
}
|
||||
|
||||
return &pcXTDriver{
|
||||
interval: keyInterval,
|
||||
sendImpl: send,
|
||||
specialMap: sMap,
|
||||
scancodeMap: scancodeMap,
|
||||
scancodeChunkSize: chunkSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Flush send all scanecodes.
|
||||
func (d *pcXTDriver) Flush() error {
|
||||
defer func() {
|
||||
d.buffer = nil
|
||||
}()
|
||||
sc, err := chunkScanCodes(d.buffer, d.scancodeChunkSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, b := range sc {
|
||||
if err := d.sendImpl(b); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(d.interval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *pcXTDriver) SendKey(key rune, action KeyAction) error {
|
||||
keyShift := unicode.IsUpper(key) || strings.ContainsRune(shiftedChars, key)
|
||||
|
||||
var sc []string
|
||||
|
||||
if action&(KeyOn|KeyPress) != 0 {
|
||||
scInt := d.scancodeMap[key]
|
||||
if keyShift {
|
||||
sc = append(sc, "2a")
|
||||
}
|
||||
sc = append(sc, fmt.Sprintf("%02x", scInt))
|
||||
}
|
||||
|
||||
if action&(KeyOff|KeyPress) != 0 {
|
||||
scInt := d.scancodeMap[key] + 0x80
|
||||
if keyShift {
|
||||
sc = append(sc, "aa")
|
||||
}
|
||||
sc = append(sc, fmt.Sprintf("%02x", scInt))
|
||||
}
|
||||
|
||||
log.Printf("Sending char '%c', code '%s', shift %v",
|
||||
key, strings.Join(sc, ""), keyShift)
|
||||
|
||||
d.send(sc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *pcXTDriver) SendSpecial(special string, action KeyAction) error {
|
||||
keyCode, ok := d.specialMap[special]
|
||||
if !ok {
|
||||
return fmt.Errorf("special %s not found.", special)
|
||||
}
|
||||
log.Printf("Special code '%s' '<%s>' found, replacing with: %v", action.String(), special, keyCode)
|
||||
|
||||
switch action {
|
||||
case KeyOn:
|
||||
d.send(keyCode.make)
|
||||
case KeyOff:
|
||||
d.send(keyCode.break_)
|
||||
case KeyPress:
|
||||
d.send(keyCode.makeBreak())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// send stores the codes in an internal buffer. Use Flush to send them.
|
||||
func (d *pcXTDriver) send(codes []string) {
|
||||
d.buffer = append(d.buffer, codes)
|
||||
}
|
||||
|
||||
func chunkScanCodes(sc [][]string, size int) (out [][]string, err error) {
|
||||
var running []string
|
||||
for _, codes := range sc {
|
||||
if size > 0 {
|
||||
if len(codes) > size {
|
||||
return nil, fmt.Errorf("chunkScanCodes: size cannot be smaller than sc width.")
|
||||
}
|
||||
if len(running)+len(codes) > size {
|
||||
out = append(out, running)
|
||||
running = nil
|
||||
}
|
||||
}
|
||||
running = append(running, codes...)
|
||||
}
|
||||
if running != nil {
|
||||
out = append(out, running)
|
||||
}
|
||||
return
|
||||
}
|
135
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/usb_driver.go
generated
vendored
Normal file
135
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/usb_driver.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
package bootcommand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/mobile/event/key"
|
||||
)
|
||||
|
||||
// SendUsbScanCodes will be called to send codes to the VM
|
||||
type SendUsbScanCodes func(k key.Code, down bool) error
|
||||
|
||||
type usbDriver struct {
|
||||
sendImpl SendUsbScanCodes
|
||||
interval time.Duration
|
||||
specialMap map[string]key.Code
|
||||
scancodeMap map[rune]key.Code
|
||||
}
|
||||
|
||||
func NewUSBDriver(send SendUsbScanCodes, interval time.Duration) *usbDriver {
|
||||
// We delay (default 100ms) between each key event to allow for CPU or
|
||||
// network latency. See PackerKeyEnv for tuning.
|
||||
keyInterval := PackerKeyDefault
|
||||
if delay, err := time.ParseDuration(os.Getenv(PackerKeyEnv)); err == nil {
|
||||
keyInterval = delay
|
||||
}
|
||||
// override interval based on builder-specific override.
|
||||
if interval > time.Duration(0) {
|
||||
keyInterval = interval
|
||||
}
|
||||
|
||||
special := map[string]key.Code{
|
||||
"enter": key.CodeReturnEnter,
|
||||
"return": key.CodeReturnEnter,
|
||||
"esc": key.CodeEscape,
|
||||
"bs": key.CodeDeleteBackspace,
|
||||
"del": key.CodeDeleteForward,
|
||||
"tab": key.CodeTab,
|
||||
"f1": key.CodeF1,
|
||||
"f2": key.CodeF2,
|
||||
"f3": key.CodeF3,
|
||||
"f4": key.CodeF4,
|
||||
"f5": key.CodeF5,
|
||||
"f6": key.CodeF6,
|
||||
"f7": key.CodeF7,
|
||||
"f8": key.CodeF8,
|
||||
"f9": key.CodeF9,
|
||||
"f10": key.CodeF10,
|
||||
"f11": key.CodeF11,
|
||||
"f12": key.CodeF12,
|
||||
"insert": key.CodeInsert,
|
||||
"home": key.CodeHome,
|
||||
"end": key.CodeEnd,
|
||||
"pageUp": key.CodePageUp,
|
||||
"pageDown": key.CodePageDown,
|
||||
"left": key.CodeLeftArrow,
|
||||
"right": key.CodeRightArrow,
|
||||
"up": key.CodeUpArrow,
|
||||
"down": key.CodeDownArrow,
|
||||
"leftalt": key.CodeLeftAlt,
|
||||
"leftctrl": key.CodeLeftControl,
|
||||
"leftshift": key.CodeLeftShift,
|
||||
"rightalt": key.CodeRightAlt,
|
||||
"rightctrl": key.CodeRightControl,
|
||||
"rightshift": key.CodeRightShift,
|
||||
"leftsuper": key.CodeLeftGUI,
|
||||
"rightsuper": key.CodeRightGUI,
|
||||
"spacebar": key.CodeSpacebar,
|
||||
}
|
||||
|
||||
scancodeIndex := make(map[string]key.Code)
|
||||
scancodeIndex["abcdefghijklmnopqrstuvwxyz"] = key.CodeA
|
||||
scancodeIndex["ABCDEFGHIJKLMNOPQRSTUVWXYZ"] = key.CodeA
|
||||
scancodeIndex["1234567890"] = key.Code1
|
||||
scancodeIndex["!@#$%^&*()"] = key.Code1
|
||||
scancodeIndex[" "] = key.CodeSpacebar
|
||||
scancodeIndex["-=[]\\"] = key.CodeHyphenMinus
|
||||
scancodeIndex["_+{}|"] = key.CodeHyphenMinus
|
||||
scancodeIndex[";'`,./"] = key.CodeSemicolon
|
||||
scancodeIndex[":\"~<>?"] = key.CodeSemicolon
|
||||
|
||||
var scancodeMap = make(map[rune]key.Code)
|
||||
for chars, start := range scancodeIndex {
|
||||
for i, r := range chars {
|
||||
scancodeMap[r] = start + key.Code(i)
|
||||
}
|
||||
}
|
||||
|
||||
return &usbDriver{
|
||||
sendImpl: send,
|
||||
specialMap: special,
|
||||
interval: keyInterval,
|
||||
scancodeMap: scancodeMap,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *usbDriver) keyEvent(k key.Code, down bool) error {
|
||||
if err := d.sendImpl(k, down); err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(d.interval)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *usbDriver) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *usbDriver) SendKey(k rune, action KeyAction) error {
|
||||
keyShift := unicode.IsUpper(k) || strings.ContainsRune(shiftedChars, k)
|
||||
keyCode := d.scancodeMap[k]
|
||||
log.Printf("Sending char '%c', code %s, shift %v", k, keyCode, keyShift)
|
||||
return d.keyEvent(keyCode, keyShift)
|
||||
}
|
||||
|
||||
func (d *usbDriver) SendSpecial(special string, action KeyAction) (err error) {
|
||||
keyCode, ok := d.specialMap[special]
|
||||
if !ok {
|
||||
return fmt.Errorf("special %s not found.", special)
|
||||
}
|
||||
log.Printf("Special code '<%s>' found, replacing with: %s", special, keyCode)
|
||||
|
||||
switch action {
|
||||
case KeyOn:
|
||||
err = d.keyEvent(keyCode, true)
|
||||
case KeyOff, KeyPress:
|
||||
err = d.keyEvent(keyCode, false)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
149
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/vnc_driver.go
generated
vendored
Normal file
149
vendor/github.com/hashicorp/packer-plugin-sdk/bootcommand/vnc_driver.go
generated
vendored
Normal file
|
@ -0,0 +1,149 @@
|
|||
package bootcommand
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const KeyLeftShift uint32 = 0xFFE1
|
||||
|
||||
type VNCKeyEvent interface {
|
||||
KeyEvent(uint32, bool) error
|
||||
}
|
||||
|
||||
type vncDriver struct {
|
||||
c VNCKeyEvent
|
||||
interval time.Duration
|
||||
specialMap map[string]uint32
|
||||
// keyEvent can set this error which will prevent it from continuing
|
||||
err error
|
||||
}
|
||||
|
||||
func NewVNCDriver(c VNCKeyEvent, interval time.Duration) *vncDriver {
|
||||
// We delay (default 100ms) between each key event to allow for CPU or
|
||||
// network latency. See PackerKeyEnv for tuning.
|
||||
keyInterval := PackerKeyDefault
|
||||
if delay, err := time.ParseDuration(os.Getenv(PackerKeyEnv)); err == nil {
|
||||
keyInterval = delay
|
||||
}
|
||||
// override interval based on builder-specific override.
|
||||
if interval > time.Duration(0) {
|
||||
keyInterval = interval
|
||||
}
|
||||
|
||||
// Scancodes reference: https://github.com/qemu/qemu/blob/master/ui/vnc_keysym.h
|
||||
sMap := make(map[string]uint32)
|
||||
sMap["bs"] = 0xFF08
|
||||
sMap["del"] = 0xFFFF
|
||||
sMap["down"] = 0xFF54
|
||||
sMap["end"] = 0xFF57
|
||||
sMap["enter"] = 0xFF0D
|
||||
sMap["esc"] = 0xFF1B
|
||||
sMap["f1"] = 0xFFBE
|
||||
sMap["f2"] = 0xFFBF
|
||||
sMap["f3"] = 0xFFC0
|
||||
sMap["f4"] = 0xFFC1
|
||||
sMap["f5"] = 0xFFC2
|
||||
sMap["f6"] = 0xFFC3
|
||||
sMap["f7"] = 0xFFC4
|
||||
sMap["f8"] = 0xFFC5
|
||||
sMap["f9"] = 0xFFC6
|
||||
sMap["f10"] = 0xFFC7
|
||||
sMap["f11"] = 0xFFC8
|
||||
sMap["f12"] = 0xFFC9
|
||||
sMap["home"] = 0xFF50
|
||||
sMap["insert"] = 0xFF63
|
||||
sMap["left"] = 0xFF51
|
||||
sMap["leftalt"] = 0xFFE9
|
||||
sMap["leftctrl"] = 0xFFE3
|
||||
sMap["leftshift"] = 0xFFE1
|
||||
sMap["leftsuper"] = 0xFFEB
|
||||
sMap["menu"] = 0xFF67
|
||||
sMap["pagedown"] = 0xFF56
|
||||
sMap["pageup"] = 0xFF55
|
||||
sMap["return"] = 0xFF0D
|
||||
sMap["right"] = 0xFF53
|
||||
sMap["rightalt"] = 0xFFEA
|
||||
sMap["rightctrl"] = 0xFFE4
|
||||
sMap["rightshift"] = 0xFFE2
|
||||
sMap["rightsuper"] = 0xFFEC
|
||||
sMap["spacebar"] = 0x020
|
||||
sMap["tab"] = 0xFF09
|
||||
sMap["up"] = 0xFF52
|
||||
|
||||
return &vncDriver{
|
||||
c: c,
|
||||
interval: keyInterval,
|
||||
specialMap: sMap,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *vncDriver) keyEvent(k uint32, down bool) error {
|
||||
if d.err != nil {
|
||||
return nil
|
||||
}
|
||||
if err := d.c.KeyEvent(k, down); err != nil {
|
||||
d.err = err
|
||||
return err
|
||||
}
|
||||
time.Sleep(d.interval)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush does nothing here
|
||||
func (d *vncDriver) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *vncDriver) SendKey(key rune, action KeyAction) error {
|
||||
keyShift := unicode.IsUpper(key) || strings.ContainsRune(shiftedChars, key)
|
||||
keyCode := uint32(key)
|
||||
log.Printf("Sending char '%c', code 0x%X, shift %v", key, keyCode, keyShift)
|
||||
|
||||
switch action {
|
||||
case KeyOn:
|
||||
if keyShift {
|
||||
d.keyEvent(KeyLeftShift, true)
|
||||
}
|
||||
d.keyEvent(keyCode, true)
|
||||
case KeyOff:
|
||||
if keyShift {
|
||||
d.keyEvent(KeyLeftShift, false)
|
||||
}
|
||||
d.keyEvent(keyCode, false)
|
||||
case KeyPress:
|
||||
if keyShift {
|
||||
d.keyEvent(KeyLeftShift, true)
|
||||
}
|
||||
d.keyEvent(keyCode, true)
|
||||
d.keyEvent(keyCode, false)
|
||||
if keyShift {
|
||||
d.keyEvent(KeyLeftShift, false)
|
||||
}
|
||||
}
|
||||
return d.err
|
||||
}
|
||||
|
||||
func (d *vncDriver) SendSpecial(special string, action KeyAction) error {
|
||||
keyCode, ok := d.specialMap[special]
|
||||
if !ok {
|
||||
return fmt.Errorf("special %s not found.", special)
|
||||
}
|
||||
log.Printf("Special code '<%s>' found, replacing with: 0x%X", special, keyCode)
|
||||
|
||||
switch action {
|
||||
case KeyOn:
|
||||
d.keyEvent(keyCode, true)
|
||||
case KeyOff:
|
||||
d.keyEvent(keyCode, false)
|
||||
case KeyPress:
|
||||
d.keyEvent(keyCode, true)
|
||||
d.keyEvent(keyCode, false)
|
||||
}
|
||||
|
||||
return d.err
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
// Cleanup is an interface that some steps implement for early cleanup.
|
||||
type Cleanup interface {
|
||||
CleanupFunc(multistep.StateBag) error
|
||||
}
|
144
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/communicator.go
generated
vendored
Normal file
144
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/communicator.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/tmp"
|
||||
)
|
||||
|
||||
// Communicator is a special communicator that works by executing
|
||||
// commands locally but within a chroot.
|
||||
type Communicator struct {
|
||||
Chroot string
|
||||
CmdWrapper common.CommandWrapper
|
||||
}
|
||||
|
||||
func (c *Communicator) Start(ctx context.Context, cmd *packersdk.RemoteCmd) error {
|
||||
// need extra escapes for the command since we're wrapping it in quotes
|
||||
cmd.Command = strconv.Quote(cmd.Command)
|
||||
command, err := c.CmdWrapper(
|
||||
fmt.Sprintf("chroot %s /bin/sh -c %s", c.Chroot, cmd.Command))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
localCmd := common.ShellCommand(command)
|
||||
localCmd.Stdin = cmd.Stdin
|
||||
localCmd.Stdout = cmd.Stdout
|
||||
localCmd.Stderr = cmd.Stderr
|
||||
log.Printf("Executing: %s %#v", localCmd.Path, localCmd.Args)
|
||||
if err := localCmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
exitStatus := 0
|
||||
if err := localCmd.Wait(); err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
exitStatus = 1
|
||||
|
||||
// There is no process-independent way to get the REAL
|
||||
// exit status so we just try to go deeper.
|
||||
if status, ok := exitErr.Sys().(syscall.WaitStatus); ok {
|
||||
exitStatus = status.ExitStatus()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Chroot execution exited with '%d': '%s'",
|
||||
exitStatus, cmd.Command)
|
||||
cmd.SetExited(exitStatus)
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Communicator) Upload(dst string, r io.Reader, fi *os.FileInfo) error {
|
||||
dst = filepath.Join(c.Chroot, dst)
|
||||
log.Printf("Uploading to chroot dir: %s", dst)
|
||||
tf, err := tmp.File("packer-amazon-chroot")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error preparing shell script: %s", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
if _, err := io.Copy(tf, r); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cpCmd, err := c.CmdWrapper(fmt.Sprintf("cp %s %s", tf.Name(), dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return common.ShellCommand(cpCmd).Run()
|
||||
}
|
||||
|
||||
func (c *Communicator) UploadDir(dst string, src string, exclude []string) error {
|
||||
// If src ends with a trailing "/", copy from "src/." so that
|
||||
// directory contents (including hidden files) are copied, but the
|
||||
// directory "src" is omitted. BSD does this automatically when
|
||||
// the source contains a trailing slash, but linux does not.
|
||||
if src[len(src)-1] == '/' {
|
||||
src = src + "."
|
||||
}
|
||||
|
||||
// TODO: remove any file copied if it appears in `exclude`
|
||||
chrootDest := filepath.Join(c.Chroot, dst)
|
||||
|
||||
log.Printf("Uploading directory '%s' to '%s'", src, chrootDest)
|
||||
cpCmd, err := c.CmdWrapper(fmt.Sprintf("cp -R '%s' %s", src, chrootDest))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd := common.ShellCommand(cpCmd)
|
||||
cmd.Env = append(cmd.Env, "LANG=C")
|
||||
cmd.Env = append(cmd.Env, os.Environ()...)
|
||||
cmd.Stderr = &stderr
|
||||
err = cmd.Run()
|
||||
if err == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.Contains(stderr.String(), "No such file") {
|
||||
// This just means that the directory was empty. Just ignore it.
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Communicator) DownloadDir(src string, dst string, exclude []string) error {
|
||||
return fmt.Errorf("DownloadDir is not implemented for amazon-chroot")
|
||||
}
|
||||
|
||||
func (c *Communicator) Download(src string, w io.Writer) error {
|
||||
src = filepath.Join(c.Chroot, src)
|
||||
log.Printf("Downloading from chroot dir: %s", src)
|
||||
f, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if _, err := io.Copy(w, f); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
/*
|
||||
Package chroot provides convenience tooling specific to chroot builders.
|
||||
|
||||
Chroot builders work by creating a new volume from an existing source image and
|
||||
attaching it into an already-running instance. Once attached, a chroot is used
|
||||
to provision the system within that volume. After provisioning, the volume is
|
||||
detached, snapshotted, and a cloud-specific image is made.
|
||||
|
||||
Using this process, minutes can be shaved off image build processes because a
|
||||
new instance doesn't need to be launched in the cloud before provisioning can
|
||||
take place.
|
||||
|
||||
There are some restrictions, however. The host instance where the volume is
|
||||
attached to must be a similar system (generally the same OS version, kernel
|
||||
versions, etc.) as the image being built. Additionally, this process is much
|
||||
more expensive because the instance used to perform the build must be kept
|
||||
running persistently in order to build images, whereas the other non-chroot
|
||||
cloud image builders start instances on-demand to build images as needed.
|
||||
|
||||
The HashiCorp-maintained Amazon and Azure builder plugins have chroot builders
|
||||
which use this option and can serve as an example for how the chroot steps and
|
||||
communicator are used.
|
||||
*/
|
||||
package chroot
|
7
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/interpolate_context_provider.go
generated
vendored
Normal file
7
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/interpolate_context_provider.go
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
package chroot
|
||||
|
||||
import "github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
|
||||
type interpolateContextProvider interface {
|
||||
GetContext() interpolate.Context
|
||||
}
|
42
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/run_local_commands.go
generated
vendored
Normal file
42
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/run_local_commands.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
sl "github.com/hashicorp/packer-plugin-sdk/shell-local"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
func RunLocalCommands(commands []string, wrappedCommand common.CommandWrapper, ictx interpolate.Context, ui packersdk.Ui) error {
|
||||
ctx := context.TODO()
|
||||
for _, rawCmd := range commands {
|
||||
intCmd, err := interpolate.Render(rawCmd, &ictx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error interpolating: %s", err)
|
||||
}
|
||||
|
||||
command, err := wrappedCommand(intCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error wrapping command: %s", err)
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Executing command: %s", command))
|
||||
comm := &sl.Communicator{
|
||||
ExecuteCommand: []string{"sh", "-c", command},
|
||||
}
|
||||
cmd := &packersdk.RemoteCmd{Command: command}
|
||||
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
|
||||
return fmt.Errorf("Error executing command: %s", err)
|
||||
}
|
||||
if cmd.ExitStatus() != 0 {
|
||||
return fmt.Errorf(
|
||||
"Received non-zero exit code %d from command: %s",
|
||||
cmd.ExitStatus(),
|
||||
command)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
46
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_chroot_provision.go
generated
vendored
Normal file
46
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_chroot_provision.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepChrootProvision provisions the instance within a chroot.
|
||||
type StepChrootProvision struct {
|
||||
}
|
||||
|
||||
func (s *StepChrootProvision) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
hook := state.Get("hook").(packersdk.Hook)
|
||||
mountPath := state.Get("mount_path").(string)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
|
||||
// Create our communicator
|
||||
comm := &Communicator{
|
||||
Chroot: mountPath,
|
||||
CmdWrapper: wrappedCommand,
|
||||
}
|
||||
|
||||
// Loads hook data from builder's state, if it has been set.
|
||||
hookData := commonsteps.PopulateProvisionHookData(state)
|
||||
|
||||
// Update state generated_data with complete hookData
|
||||
// to make them accessible by post-processors
|
||||
state.Put("generated_data", hookData)
|
||||
|
||||
// Provision
|
||||
log.Println("Running the provision hook")
|
||||
if err := hook.Run(ctx, packersdk.HookProvision, ui, comm, hookData); err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepChrootProvision) Cleanup(state multistep.StateBag) {}
|
103
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_copy_files.go
generated
vendored
Normal file
103
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_copy_files.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepCopyFiles copies some files from the host into the chroot environment.
|
||||
//
|
||||
// Produces:
|
||||
// copy_files_cleanup CleanupFunc - A function to clean up the copied files
|
||||
// early.
|
||||
type StepCopyFiles struct {
|
||||
Files []string
|
||||
files []string
|
||||
}
|
||||
|
||||
func (s *StepCopyFiles) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
mountPath := state.Get("mount_path").(string)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
stderr := new(bytes.Buffer)
|
||||
|
||||
s.files = make([]string, 0, len(s.Files))
|
||||
if len(s.Files) > 0 {
|
||||
ui.Say("Copying files from host to chroot...")
|
||||
var removeDestinationOption string
|
||||
switch runtime.GOOS {
|
||||
case "freebsd":
|
||||
// The -f option here is closer to GNU --remove-destination than
|
||||
// what POSIX says -f should do.
|
||||
removeDestinationOption = "-f"
|
||||
default:
|
||||
// This is the GNU binutils version.
|
||||
removeDestinationOption = "--remove-destination"
|
||||
}
|
||||
for _, path := range s.Files {
|
||||
ui.Message(path)
|
||||
chrootPath := filepath.Join(mountPath, path)
|
||||
log.Printf("Copying '%s' to '%s'", path, chrootPath)
|
||||
|
||||
cmdText, err := wrappedCommand(fmt.Sprintf("cp %s %s %s", removeDestinationOption, path, chrootPath))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error building copy command: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
stderr.Reset()
|
||||
cmd := common.ShellCommand(cmdText)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
err := fmt.Errorf(
|
||||
"Error copying file: %s\nnStderr: %s", err, stderr.String())
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.files = append(s.files, chrootPath)
|
||||
}
|
||||
}
|
||||
|
||||
state.Put("copy_files_cleanup", s)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCopyFiles) Cleanup(state multistep.StateBag) {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
if err := s.CleanupFunc(state); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepCopyFiles) CleanupFunc(state multistep.StateBag) error {
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
if s.files != nil {
|
||||
for _, file := range s.files {
|
||||
log.Printf("Removing: %s", file)
|
||||
localCmdText, err := wrappedCommand(fmt.Sprintf("rm -f %s", file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
localCmd := common.ShellCommand(localCmdText)
|
||||
if err := localCmd.Run(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.files = nil
|
||||
return nil
|
||||
}
|
39
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_early_cleanup.go
generated
vendored
Normal file
39
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_early_cleanup.go
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepEarlyCleanup performs some of the cleanup steps early in order to
|
||||
// prepare for snapshotting and creating an AMI.
|
||||
type StepEarlyCleanup struct{}
|
||||
|
||||
func (s *StepEarlyCleanup) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
cleanupKeys := []string{
|
||||
"copy_files_cleanup",
|
||||
"mount_extra_cleanup",
|
||||
"mount_device_cleanup",
|
||||
"attach_cleanup",
|
||||
}
|
||||
|
||||
for _, key := range cleanupKeys {
|
||||
c := state.Get(key).(Cleanup)
|
||||
log.Printf("Running cleanup func: %s", key)
|
||||
if err := c.CleanupFunc(state); err != nil {
|
||||
err := fmt.Errorf("Error cleaning up: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepEarlyCleanup) Cleanup(state multistep.StateBag) {}
|
138
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_mount_extra.go
generated
vendored
Normal file
138
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_mount_extra.go
generated
vendored
Normal file
|
@ -0,0 +1,138 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepMountExtra mounts the attached device.
|
||||
//
|
||||
// Produces:
|
||||
// mount_extra_cleanup CleanupFunc - To perform early cleanup
|
||||
type StepMountExtra struct {
|
||||
ChrootMounts [][]string
|
||||
mounts []string
|
||||
}
|
||||
|
||||
func (s *StepMountExtra) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
mountPath := state.Get("mount_path").(string)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
|
||||
s.mounts = make([]string, 0, len(s.ChrootMounts))
|
||||
|
||||
ui.Say("Mounting additional paths within the chroot...")
|
||||
for _, mountInfo := range s.ChrootMounts {
|
||||
innerPath := mountPath + mountInfo[2]
|
||||
|
||||
if err := os.MkdirAll(innerPath, 0755); err != nil {
|
||||
err := fmt.Errorf("Error creating mount directory: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
flags := "-t " + mountInfo[0]
|
||||
if mountInfo[0] == "bind" {
|
||||
flags = "--bind"
|
||||
}
|
||||
|
||||
ui.Message(fmt.Sprintf("Mounting: %s", mountInfo[2]))
|
||||
stderr := new(bytes.Buffer)
|
||||
mountCommand, err := wrappedCommand(fmt.Sprintf(
|
||||
"mount %s %s %s",
|
||||
flags,
|
||||
mountInfo[1],
|
||||
innerPath))
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating mount command: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
cmd := common.ShellCommand(mountCommand)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
err := fmt.Errorf(
|
||||
"Error mounting: %s\nStderr: %s", err, stderr.String())
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
s.mounts = append(s.mounts, innerPath)
|
||||
}
|
||||
|
||||
state.Put("mount_extra_cleanup", s)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepMountExtra) Cleanup(state multistep.StateBag) {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if err := s.CleanupFunc(state); err != nil {
|
||||
ui.Error(err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepMountExtra) CleanupFunc(state multistep.StateBag) error {
|
||||
if s.mounts == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
for len(s.mounts) > 0 {
|
||||
var path string
|
||||
lastIndex := len(s.mounts) - 1
|
||||
path, s.mounts = s.mounts[lastIndex], s.mounts[:lastIndex]
|
||||
|
||||
grepCommand, err := wrappedCommand(fmt.Sprintf("grep %s /proc/mounts", path))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating grep command: %s", err)
|
||||
}
|
||||
|
||||
// Before attempting to unmount,
|
||||
// check to see if path is already unmounted
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := common.ShellCommand(grepCommand)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
if status, ok := exitError.Sys().(syscall.WaitStatus); ok {
|
||||
exitStatus := status.ExitStatus()
|
||||
if exitStatus == 1 {
|
||||
// path has already been unmounted
|
||||
// just skip this path
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unmountCommand, err := wrappedCommand(fmt.Sprintf("umount %s", path))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating unmount command: %s", err)
|
||||
}
|
||||
|
||||
stderr = new(bytes.Buffer)
|
||||
cmd = common.ShellCommand(unmountCommand)
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf(
|
||||
"Error unmounting device: %s\nStderr: %s", err, stderr.String())
|
||||
}
|
||||
}
|
||||
|
||||
s.mounts = nil
|
||||
return nil
|
||||
}
|
48
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_post_mount_commands.go
generated
vendored
Normal file
48
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_post_mount_commands.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type postMountCommandsData struct {
|
||||
Device string
|
||||
MountPath string
|
||||
}
|
||||
|
||||
// StepPostMountCommands allows running arbitrary commands after mounting the
|
||||
// device, but prior to the bind mount and copy steps.
|
||||
type StepPostMountCommands struct {
|
||||
Commands []string
|
||||
}
|
||||
|
||||
func (s *StepPostMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(interpolateContextProvider)
|
||||
device := state.Get("device").(string)
|
||||
mountPath := state.Get("mount_path").(string)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
|
||||
if len(s.Commands) == 0 {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ictx := config.GetContext()
|
||||
ictx.Data = &postMountCommandsData{
|
||||
Device: device,
|
||||
MountPath: mountPath,
|
||||
}
|
||||
|
||||
ui.Say("Running post-mount commands...")
|
||||
if err := RunLocalCommands(s.Commands, wrappedCommand, ictx, ui); err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepPostMountCommands) Cleanup(state multistep.StateBag) {}
|
42
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_pre_mount_commands.go
generated
vendored
Normal file
42
vendor/github.com/hashicorp/packer-plugin-sdk/chroot/step_pre_mount_commands.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
package chroot
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type preMountCommandsData struct {
|
||||
Device string
|
||||
}
|
||||
|
||||
// StepPreMountCommands sets up the a new block device when building from scratch
|
||||
type StepPreMountCommands struct {
|
||||
Commands []string
|
||||
}
|
||||
|
||||
func (s *StepPreMountCommands) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
config := state.Get("config").(interpolateContextProvider)
|
||||
device := state.Get("device").(string)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
wrappedCommand := state.Get("wrappedCommand").(common.CommandWrapper)
|
||||
|
||||
if len(s.Commands) == 0 {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ictx := config.GetContext()
|
||||
ictx.Data = &preMountCommandsData{Device: device}
|
||||
|
||||
ui.Say("Running device setup commands...")
|
||||
if err := RunLocalCommands(s.Commands, wrappedCommand, ictx, ui); err != nil {
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepPreMountCommands) Cleanup(state multistep.StateBag) {}
|
|
@ -0,0 +1,27 @@
|
|||
// Package common provides the PackerConfig structure that gets passed to every
|
||||
// plugin and contains information populated by the Packer core. This config
|
||||
// contains data about command line flags that were used, as well as template
|
||||
// information and information about the Packer core's version. It also
|
||||
// proivdes string constants to use to access that config.
|
||||
package common
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// CommandWrapper is a type that given a command, will modify that
|
||||
// command in-flight. This might return an error.
|
||||
// For example, your command could be `foo` and your CommandWrapper could be
|
||||
// func(s string) (string, error) {
|
||||
// return fmt.Sprintf("/bin/sh/ %s", s)
|
||||
// }
|
||||
// Using the CommandWrapper, you can set environment variables or perform
|
||||
// string interpolation once rather than many times, to save some lines of code
|
||||
// if similar wrapping needs to be performed many times during a plugin run.
|
||||
type CommandWrapper func(string) (string, error)
|
||||
|
||||
// ShellCommand takes a command string and returns an *exec.Cmd to execute
|
||||
// it within the context of a shell (/bin/sh).
|
||||
func ShellCommand(command string) *exec.Cmd {
|
||||
return exec.Command("/bin/sh", "-c", command)
|
||||
}
|
53
vendor/github.com/hashicorp/packer-plugin-sdk/common/packer_config.go
generated
vendored
Normal file
53
vendor/github.com/hashicorp/packer-plugin-sdk/common/packer_config.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
package common
|
||||
|
||||
const (
|
||||
// This is the key in configurations that is set to the name of the
|
||||
// build.
|
||||
BuildNameConfigKey = "packer_build_name"
|
||||
|
||||
// This is the key in the configuration that is set to the type
|
||||
// of the builder that is run. This is useful for provisioners and
|
||||
// such who want to make use of this.
|
||||
BuilderTypeConfigKey = "packer_builder_type"
|
||||
|
||||
// this is the key in the configuration that is set to the version of the
|
||||
// Packer Core. This can be used by plugins to set user agents, etc, without
|
||||
// having to import the Core to find out the Packer version.
|
||||
CoreVersionConfigKey = "packer_core_version"
|
||||
|
||||
// This is the key in configurations that is set to "true" when Packer
|
||||
// debugging is enabled.
|
||||
DebugConfigKey = "packer_debug"
|
||||
|
||||
// This is the key in configurations that is set to "true" when Packer
|
||||
// force build is enabled.
|
||||
ForceConfigKey = "packer_force"
|
||||
|
||||
// This key determines what to do when a normal multistep step fails
|
||||
// - "cleanup" - run cleanup steps
|
||||
// - "abort" - exit without cleanup
|
||||
// - "ask" - ask the user
|
||||
OnErrorConfigKey = "packer_on_error"
|
||||
|
||||
// TemplatePathKey is the path to the template that configured this build
|
||||
TemplatePathKey = "packer_template_path"
|
||||
|
||||
// This key contains a map[string]string of the user variables for
|
||||
// template processing.
|
||||
UserVariablesConfigKey = "packer_user_variables"
|
||||
)
|
||||
|
||||
// PackerConfig is a struct that contains the configuration keys that
|
||||
// are sent by packer, properly tagged already so mapstructure can load
|
||||
// them. Embed this structure into your configuration class to get access to
|
||||
// this information from the Packer Core.
|
||||
type PackerConfig struct {
|
||||
PackerBuildName string `mapstructure:"packer_build_name"`
|
||||
PackerBuilderType string `mapstructure:"packer_builder_type"`
|
||||
PackerCoreVersion string `mapstructure:"packer_core_version"`
|
||||
PackerDebug bool `mapstructure:"packer_debug"`
|
||||
PackerForce bool `mapstructure:"packer_force"`
|
||||
PackerOnError string `mapstructure:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables"`
|
||||
}
|
26
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/comm_host.go
generated
vendored
Normal file
26
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/comm_host.go
generated
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
package communicator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
// CommHost determines the IP address of the cloud instance that Packer
|
||||
// should connect to. A custom CommHost function can be implemented in each
|
||||
// builder if need be; this is a generic function that should work for most
|
||||
// cloud builders.
|
||||
func CommHost(host string, statebagKey string) func(multistep.StateBag) (string, error) {
|
||||
return func(state multistep.StateBag) (string, error) {
|
||||
if host != "" {
|
||||
log.Printf("Using host value: %s", host)
|
||||
return host, nil
|
||||
}
|
||||
ipAddress, hasIP := state.Get(statebagKey).(string)
|
||||
if !hasIP {
|
||||
return "", fmt.Errorf("Failed to retrieve IP address.")
|
||||
}
|
||||
return ipAddress, nil
|
||||
}
|
||||
}
|
632
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.go
generated
vendored
Normal file
632
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.go
generated
vendored
Normal file
|
@ -0,0 +1,632 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type Config,SSH,WinRM,SSHTemporaryKeyPair
|
||||
|
||||
package communicator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
helperssh "github.com/hashicorp/packer-plugin-sdk/communicator/ssh"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/pathing"
|
||||
packerssh "github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/masterzen/winrm"
|
||||
"golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/agent"
|
||||
)
|
||||
|
||||
// Config is the common configuration a builder uses to define and configure a Packer
|
||||
// communicator. Embed this struct in your builder config to implement
|
||||
// communicator support.
|
||||
type Config struct {
|
||||
// Packer currently supports three kinds of communicators:
|
||||
//
|
||||
// - `none` - No communicator will be used. If this is set, most
|
||||
// provisioners also can't be used.
|
||||
//
|
||||
// - `ssh` - An SSH connection will be established to the machine. This
|
||||
// is usually the default.
|
||||
//
|
||||
// - `winrm` - A WinRM connection will be established.
|
||||
//
|
||||
// In addition to the above, some builders have custom communicators they
|
||||
// can use. For example, the Docker builder has a "docker" communicator
|
||||
// that uses `docker exec` and `docker cp` to execute scripts and copy
|
||||
// files.
|
||||
Type string `mapstructure:"communicator"`
|
||||
// We recommend that you enable SSH or WinRM as the very last step in your
|
||||
// guest's bootstrap script, but sometimes you may have a race condition
|
||||
// where you need Packer to wait before attempting to connect to your
|
||||
// guest.
|
||||
//
|
||||
// If you end up in this situation, you can use the template option
|
||||
// `pause_before_connecting`. By default, there is no pause. For example if
|
||||
// you set `pause_before_connecting` to `10m` Packer will check whether it
|
||||
// can connect, as normal. But once a connection attempt is successful, it
|
||||
// will disconnect and then wait 10 minutes before connecting to the guest
|
||||
// and beginning provisioning.
|
||||
PauseBeforeConnect time.Duration `mapstructure:"pause_before_connecting"`
|
||||
|
||||
SSH `mapstructure:",squash"`
|
||||
WinRM `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
// The SSH config defines configuration for the SSH communicator.
|
||||
type SSH struct {
|
||||
// The address to SSH to. This usually is automatically configured by the
|
||||
// builder.
|
||||
SSHHost string `mapstructure:"ssh_host"`
|
||||
// The port to connect to SSH. This defaults to `22`.
|
||||
SSHPort int `mapstructure:"ssh_port"`
|
||||
// The username to connect to SSH with. Required if using SSH.
|
||||
SSHUsername string `mapstructure:"ssh_username"`
|
||||
// A plaintext password to use to authenticate with SSH.
|
||||
SSHPassword string `mapstructure:"ssh_password"`
|
||||
// If specified, this is the key that will be used for SSH with the
|
||||
// machine. The key must match a key pair name loaded up into the remote.
|
||||
// By default, this is blank, and Packer will generate a temporary keypair
|
||||
// unless [`ssh_password`](#ssh_password) is used.
|
||||
// [`ssh_private_key_file`](#ssh_private_key_file) or
|
||||
// [`ssh_agent_auth`](#ssh_agent_auth) must be specified when
|
||||
// [`ssh_keypair_name`](#ssh_keypair_name) is utilized.
|
||||
SSHKeyPairName string `mapstructure:"ssh_keypair_name" undocumented:"true"`
|
||||
// The name of the temporary key pair to generate. By default, Packer
|
||||
// generates a name that looks like `packer_<UUID>`, where <UUID> is
|
||||
// a 36 character unique identifier.
|
||||
SSHTemporaryKeyPairName string `mapstructure:"temporary_key_pair_name" undocumented:"true"`
|
||||
SSHTemporaryKeyPair `mapstructure:",squash"`
|
||||
// This overrides the value of ciphers supported by default by golang.
|
||||
// The default value is [
|
||||
// "aes128-gcm@openssh.com",
|
||||
// "chacha20-poly1305@openssh.com",
|
||||
// "aes128-ctr", "aes192-ctr", "aes256-ctr",
|
||||
// ]
|
||||
//
|
||||
// Valid options for ciphers include:
|
||||
// "aes128-ctr", "aes192-ctr", "aes256-ctr", "aes128-gcm@openssh.com",
|
||||
// "chacha20-poly1305@openssh.com",
|
||||
// "arcfour256", "arcfour128", "arcfour", "aes128-cbc", "3des-cbc",
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers"`
|
||||
// If true, Packer will attempt to remove its temporary key from
|
||||
// `~/.ssh/authorized_keys` and `/root/.ssh/authorized_keys`. This is a
|
||||
// mostly cosmetic option, since Packer will delete the temporary private
|
||||
// key from the host system regardless of whether this is set to true
|
||||
// (unless the user has set the `-debug` flag). Defaults to "false";
|
||||
// currently only works on guests with `sed` installed.
|
||||
SSHClearAuthorizedKeys bool `mapstructure:"ssh_clear_authorized_keys"`
|
||||
// If set, Packer will override the value of key exchange (kex) altorighms
|
||||
// supported by default by golang. Acceptable values include:
|
||||
// "curve25519-sha256@libssh.org", "ecdh-sha2-nistp256",
|
||||
// "ecdh-sha2-nistp384", "ecdh-sha2-nistp521",
|
||||
// "diffie-hellman-group14-sha1", and "diffie-hellman-group1-sha1".
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms"`
|
||||
// Path to a PEM encoded private key file to use to authenticate with SSH.
|
||||
// The `~` can be used in path and will be expanded to the home directory
|
||||
// of current user.
|
||||
SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file" undocumented:"true"`
|
||||
// Path to user certificate used to authenticate with SSH.
|
||||
// The `~` can be used in path and will be expanded to the
|
||||
// home directory of current user.
|
||||
SSHCertificateFile string `mapstructure:"ssh_certificate_file"`
|
||||
// If `true`, a PTY will be requested for the SSH connection. This defaults
|
||||
// to `false`.
|
||||
SSHPty bool `mapstructure:"ssh_pty"`
|
||||
// The time to wait for SSH to become available. Packer uses this to
|
||||
// determine when the machine has booted so this is usually quite long.
|
||||
// Example value: `10m`.
|
||||
SSHTimeout time.Duration `mapstructure:"ssh_timeout"`
|
||||
// Deprecated in favor of SSHTimeout
|
||||
SSHWaitTimeout time.Duration `mapstructure:"ssh_wait_timeout" undocumented:"true"`
|
||||
// If true, the local SSH agent will be used to authenticate connections to
|
||||
// the source instance. No temporary keypair will be created, and the
|
||||
// values of [`ssh_password`](#ssh_password) and
|
||||
// [`ssh_private_key_file`](#ssh_private_key_file) will be ignored. The
|
||||
// environment variable `SSH_AUTH_SOCK` must be set for this option to work
|
||||
// properly.
|
||||
SSHAgentAuth bool `mapstructure:"ssh_agent_auth" undocumented:"true"`
|
||||
// If true, SSH agent forwarding will be disabled. Defaults to `false`.
|
||||
SSHDisableAgentForwarding bool `mapstructure:"ssh_disable_agent_forwarding"`
|
||||
// The number of handshakes to attempt with SSH once it can connect. This
|
||||
// defaults to `10`.
|
||||
SSHHandshakeAttempts int `mapstructure:"ssh_handshake_attempts"`
|
||||
// A bastion host to use for the actual SSH connection.
|
||||
SSHBastionHost string `mapstructure:"ssh_bastion_host"`
|
||||
// The port of the bastion host. Defaults to `22`.
|
||||
SSHBastionPort int `mapstructure:"ssh_bastion_port"`
|
||||
// If `true`, the local SSH agent will be used to authenticate with the
|
||||
// bastion host. Defaults to `false`.
|
||||
SSHBastionAgentAuth bool `mapstructure:"ssh_bastion_agent_auth"`
|
||||
// The username to connect to the bastion host.
|
||||
SSHBastionUsername string `mapstructure:"ssh_bastion_username"`
|
||||
// The password to use to authenticate with the bastion host.
|
||||
SSHBastionPassword string `mapstructure:"ssh_bastion_password"`
|
||||
// If `true`, the keyboard-interactive used to authenticate with bastion host.
|
||||
SSHBastionInteractive bool `mapstructure:"ssh_bastion_interactive"`
|
||||
// Path to a PEM encoded private key file to use to authenticate with the
|
||||
// bastion host. The `~` can be used in path and will be expanded to the
|
||||
// home directory of current user.
|
||||
SSHBastionPrivateKeyFile string `mapstructure:"ssh_bastion_private_key_file"`
|
||||
// Path to user certificate used to authenticate with bastion host.
|
||||
// The `~` can be used in path and will be expanded to the
|
||||
//home directory of current user.
|
||||
SSHBastionCertificateFile string `mapstructure:"ssh_bastion_certificate_file"`
|
||||
// `scp` or `sftp` - How to transfer files, Secure copy (default) or SSH
|
||||
// File Transfer Protocol.
|
||||
SSHFileTransferMethod string `mapstructure:"ssh_file_transfer_method"`
|
||||
// A SOCKS proxy host to use for SSH connection
|
||||
SSHProxyHost string `mapstructure:"ssh_proxy_host"`
|
||||
// A port of the SOCKS proxy. Defaults to `1080`.
|
||||
SSHProxyPort int `mapstructure:"ssh_proxy_port"`
|
||||
// The optional username to authenticate with the proxy server.
|
||||
SSHProxyUsername string `mapstructure:"ssh_proxy_username"`
|
||||
// The optional password to use to authenticate with the proxy server.
|
||||
SSHProxyPassword string `mapstructure:"ssh_proxy_password"`
|
||||
// How often to send "keep alive" messages to the server. Set to a negative
|
||||
// value (`-1s`) to disable. Example value: `10s`. Defaults to `5s`.
|
||||
SSHKeepAliveInterval time.Duration `mapstructure:"ssh_keep_alive_interval"`
|
||||
// The amount of time to wait for a remote command to end. This might be
|
||||
// useful if, for example, packer hangs on a connection after a reboot.
|
||||
// Example: `5m`. Disabled by default.
|
||||
SSHReadWriteTimeout time.Duration `mapstructure:"ssh_read_write_timeout"`
|
||||
|
||||
// Tunneling
|
||||
|
||||
//
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels"`
|
||||
//
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels"`
|
||||
|
||||
// SSH Internals
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true"`
|
||||
}
|
||||
|
||||
// When no ssh credentials are specified, Packer will generate a temporary SSH
|
||||
// keypair for the instance. You can change the algorithm type and bits
|
||||
// settings.
|
||||
type SSHTemporaryKeyPair struct {
|
||||
// `dsa` | `ecdsa` | `ed25519` | `rsa` ( the default )
|
||||
//
|
||||
// Specifies the type of key to create. The possible values are 'dsa',
|
||||
// 'ecdsa', 'ed25519', or 'rsa'.
|
||||
SSHTemporaryKeyPairType string `mapstructure:"temporary_key_pair_type"`
|
||||
// Specifies the number of bits in the key to create. For RSA keys, the
|
||||
// minimum size is 1024 bits and the default is 4096 bits. Generally, 3072
|
||||
// bits is considered sufficient. DSA keys must be exactly 1024 bits as
|
||||
// specified by FIPS 186-2. For ECDSA keys, bits determines the key length
|
||||
// by selecting from one of three elliptic curve sizes: 256, 384 or 521
|
||||
// bits. Attempting to use bit lengths other than these three values for
|
||||
// ECDSA keys will fail. Ed25519 keys have a fixed length and bits will be
|
||||
// ignored.
|
||||
SSHTemporaryKeyPairBits int `mapstructure:"temporary_key_pair_bits"`
|
||||
}
|
||||
|
||||
// The WinRM config defines configuration for the WinRM communicator.
|
||||
type WinRM struct {
|
||||
// The username to use to connect to WinRM.
|
||||
WinRMUser string `mapstructure:"winrm_username"`
|
||||
// The password to use to connect to WinRM.
|
||||
WinRMPassword string `mapstructure:"winrm_password"`
|
||||
// The address for WinRM to connect to.
|
||||
//
|
||||
// NOTE: If using an Amazon EBS builder, you can specify the interface
|
||||
// WinRM connects to via
|
||||
// [`ssh_interface`](/docs/builders/amazon-ebs#ssh_interface)
|
||||
WinRMHost string `mapstructure:"winrm_host"`
|
||||
// Setting this to `true` adds the remote
|
||||
// `host:port` to the `NO_PROXY` environment variable. This has the effect of
|
||||
// bypassing any configured proxies when connecting to the remote host.
|
||||
// Default to `false`.
|
||||
WinRMNoProxy bool `mapstructure:"winrm_no_proxy"`
|
||||
// The WinRM port to connect to. This defaults to `5985` for plain
|
||||
// unencrypted connection and `5986` for SSL when `winrm_use_ssl` is set to
|
||||
// true.
|
||||
WinRMPort int `mapstructure:"winrm_port"`
|
||||
// The amount of time to wait for WinRM to become available. This defaults
|
||||
// to `30m` since setting up a Windows machine generally takes a long time.
|
||||
WinRMTimeout time.Duration `mapstructure:"winrm_timeout"`
|
||||
// If `true`, use HTTPS for WinRM.
|
||||
WinRMUseSSL bool `mapstructure:"winrm_use_ssl"`
|
||||
// If `true`, do not check server certificate chain and host name.
|
||||
WinRMInsecure bool `mapstructure:"winrm_insecure"`
|
||||
// If `true`, NTLMv2 authentication (with session security) will be used
|
||||
// for WinRM, rather than default (basic authentication), removing the
|
||||
// requirement for basic authentication to be enabled within the target
|
||||
// guest. Further reading for remote connection authentication can be found
|
||||
// [here](https://msdn.microsoft.com/en-us/library/aa384295(v=vs.85).aspx).
|
||||
WinRMUseNTLM bool `mapstructure:"winrm_use_ntlm"`
|
||||
WinRMTransportDecorator func() winrm.Transporter
|
||||
}
|
||||
|
||||
// The ConfigSpec funcs are used by the Packer core to parse HCL2 templates.
|
||||
func (c *SSH) ConfigSpec() hcldec.ObjectSpec { return c.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
// The ConfigSpec funcs are used by the Packer core to parse HCL2 templates.
|
||||
func (c *WinRM) ConfigSpec() hcldec.ObjectSpec { return c.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
// Configure parses the json template into the Config structs
|
||||
func (c *SSH) Configure(raws ...interface{}) ([]string, error) {
|
||||
err := config.Decode(c, nil, raws...)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Configure parses the json template into the Config structs
|
||||
func (c *WinRM) Configure(raws ...interface{}) ([]string, error) {
|
||||
err := config.Decode(c, nil, raws...)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
_ packersdk.ConfigurableCommunicator = new(SSH)
|
||||
_ packersdk.ConfigurableCommunicator = new(WinRM)
|
||||
)
|
||||
|
||||
// SSHInterface defines whether to use public or private, addresses, and whether
|
||||
// to use IPv4 or IPv6.
|
||||
type SSHInterface struct {
|
||||
// One of `public_ip`, `private_ip`, `public_dns`, or `private_dns`. If
|
||||
// set, either the public IP address, private IP address, public DNS name
|
||||
// or private DNS name will used as the host for SSH. The default behaviour
|
||||
// if inside a VPC is to use the public IP address if available, otherwise
|
||||
// the private IP address will be used. If not in a VPC the public DNS name
|
||||
// will be used. Also works for WinRM.
|
||||
//
|
||||
// Where Packer is configured for an outbound proxy but WinRM traffic
|
||||
// should be direct, `ssh_interface` must be set to `private_dns` and
|
||||
// `<region>.compute.internal` included in the `NO_PROXY` environment
|
||||
// variable.
|
||||
SSHInterface string `mapstructure:"ssh_interface"`
|
||||
// The IP version to use for SSH connections, valid values are `4` and `6`.
|
||||
// Useful on dual stacked instances where the default behavior is to
|
||||
// connect via whichever IP address is returned first from the OpenStack
|
||||
// API.
|
||||
SSHIPVersion string `mapstructure:"ssh_ip_version"`
|
||||
}
|
||||
|
||||
// ReadSSHPrivateKeyFile returns the SSH private key bytes.
|
||||
func (c *Config) ReadSSHPrivateKeyFile() ([]byte, error) {
|
||||
var privateKey []byte
|
||||
|
||||
if c.SSHPrivateKeyFile != "" {
|
||||
keyPath, err := pathing.ExpandUser(c.SSHPrivateKeyFile)
|
||||
if err != nil {
|
||||
return []byte{}, fmt.Errorf("Error expanding path for SSH private key: %s", err)
|
||||
}
|
||||
|
||||
privateKey, err = ioutil.ReadFile(keyPath)
|
||||
if err != nil {
|
||||
return privateKey, fmt.Errorf("Error on reading SSH private key: %s", err)
|
||||
}
|
||||
}
|
||||
return privateKey, nil
|
||||
}
|
||||
|
||||
// SSHConfigFunc returns a function that can be used for the SSH communicator
|
||||
// config for connecting to the instance created over SSH using the private key
|
||||
// or password.
|
||||
func (c *Config) SSHConfigFunc() func(multistep.StateBag) (*ssh.ClientConfig, error) {
|
||||
return func(state multistep.StateBag) (*ssh.ClientConfig, error) {
|
||||
sshConfig := &ssh.ClientConfig{
|
||||
User: c.SSHUsername,
|
||||
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||
}
|
||||
if len(c.SSHCiphers) != 0 {
|
||||
sshConfig.Config.Ciphers = c.SSHCiphers
|
||||
}
|
||||
|
||||
if len(c.SSHKEXAlgos) != 0 {
|
||||
sshConfig.Config.KeyExchanges = c.SSHKEXAlgos
|
||||
}
|
||||
|
||||
if c.SSHAgentAuth {
|
||||
authSock := os.Getenv("SSH_AUTH_SOCK")
|
||||
if authSock == "" {
|
||||
return nil, fmt.Errorf("SSH_AUTH_SOCK is not set")
|
||||
}
|
||||
|
||||
sshAgent, err := net.Dial("unix", authSock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err)
|
||||
}
|
||||
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeysCallback(agent.NewClient(sshAgent).Signers))
|
||||
}
|
||||
|
||||
var privateKeys [][]byte
|
||||
if c.SSHPrivateKeyFile != "" {
|
||||
privateKey, err := c.ReadSSHPrivateKeyFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privateKeys = append(privateKeys, privateKey)
|
||||
}
|
||||
|
||||
// aws,alicloud,cloudstack,digitalOcean,oneAndOne,openstack,oracle & profitbricks key
|
||||
if iKey, hasKey := state.GetOk("privateKey"); hasKey {
|
||||
privateKeys = append(privateKeys, []byte(iKey.(string)))
|
||||
}
|
||||
|
||||
if len(c.SSHPrivateKey) != 0 {
|
||||
privateKeys = append(privateKeys, c.SSHPrivateKey)
|
||||
}
|
||||
|
||||
certPath := ""
|
||||
if c.SSHCertificateFile != "" {
|
||||
var err error
|
||||
certPath, err = pathing.ExpandUser(c.SSHCertificateFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, key := range privateKeys {
|
||||
|
||||
signer, err := ssh.ParsePrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error on parsing SSH private key: %s", err)
|
||||
}
|
||||
|
||||
if certPath != "" {
|
||||
signer, err = helperssh.ReadCertificate(certPath, signer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
sshConfig.Auth = append(sshConfig.Auth, ssh.PublicKeys(signer))
|
||||
}
|
||||
|
||||
if c.SSHPassword != "" {
|
||||
sshConfig.Auth = append(sshConfig.Auth,
|
||||
ssh.Password(c.SSHPassword),
|
||||
ssh.KeyboardInteractive(packerssh.PasswordKeyboardInteractive(c.SSHPassword)),
|
||||
)
|
||||
}
|
||||
return sshConfig, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Port returns the port that will be used for access based on config.
|
||||
func (c *Config) Port() int {
|
||||
switch c.Type {
|
||||
case "ssh":
|
||||
return c.SSHPort
|
||||
case "winrm":
|
||||
return c.WinRMPort
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// Host returns the host that will be used for access based on config.
|
||||
func (c *Config) Host() string {
|
||||
switch c.Type {
|
||||
case "ssh":
|
||||
return c.SSHHost
|
||||
case "winrm":
|
||||
return c.WinRMHost
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// User returns the user that will be used for access based on config.
|
||||
func (c *Config) User() string {
|
||||
switch c.Type {
|
||||
case "ssh":
|
||||
return c.SSHUsername
|
||||
case "winrm":
|
||||
return c.WinRMUser
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
// Password returns the password that will be used for access based on config.
|
||||
func (c *Config) Password() string {
|
||||
switch c.Type {
|
||||
case "ssh":
|
||||
return c.SSHPassword
|
||||
case "winrm":
|
||||
return c.WinRMPassword
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) Prepare(ctx *interpolate.Context) []error {
|
||||
if c.Type == "" {
|
||||
c.Type = "ssh"
|
||||
}
|
||||
|
||||
var errs []error
|
||||
switch c.Type {
|
||||
case "ssh":
|
||||
if es := c.prepareSSH(ctx); len(es) > 0 {
|
||||
errs = append(errs, es...)
|
||||
}
|
||||
case "winrm":
|
||||
if es := c.prepareWinRM(ctx); len(es) > 0 {
|
||||
errs = append(errs, es...)
|
||||
}
|
||||
case "docker", "dockerWindowsContainer", "none":
|
||||
break
|
||||
default:
|
||||
return []error{fmt.Errorf("Communicator type %s is invalid", c.Type)}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (c *Config) prepareSSH(ctx *interpolate.Context) []error {
|
||||
if c.SSHPort == 0 {
|
||||
c.SSHPort = 22
|
||||
}
|
||||
|
||||
if c.SSHTimeout == 0 {
|
||||
c.SSHTimeout = 5 * time.Minute
|
||||
}
|
||||
|
||||
if c.SSHKeepAliveInterval == 0 {
|
||||
c.SSHKeepAliveInterval = 5 * time.Second
|
||||
}
|
||||
|
||||
if c.SSHHandshakeAttempts == 0 {
|
||||
c.SSHHandshakeAttempts = 10
|
||||
}
|
||||
|
||||
if c.SSHBastionHost != "" {
|
||||
if c.SSHBastionPort == 0 {
|
||||
c.SSHBastionPort = 22
|
||||
}
|
||||
|
||||
if c.SSHBastionPrivateKeyFile == "" && c.SSHPrivateKeyFile != "" {
|
||||
c.SSHBastionPrivateKeyFile = c.SSHPrivateKeyFile
|
||||
}
|
||||
|
||||
if c.SSHBastionCertificateFile == "" && c.SSHCertificateFile != "" {
|
||||
c.SSHBastionCertificateFile = c.SSHCertificateFile
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if c.SSHProxyHost != "" {
|
||||
if c.SSHProxyPort == 0 {
|
||||
c.SSHProxyPort = 1080
|
||||
}
|
||||
}
|
||||
|
||||
if c.SSHFileTransferMethod == "" {
|
||||
c.SSHFileTransferMethod = "scp"
|
||||
}
|
||||
|
||||
// Backwards compatibility
|
||||
if c.SSHWaitTimeout != 0 {
|
||||
c.SSHTimeout = c.SSHWaitTimeout
|
||||
}
|
||||
|
||||
// Validation
|
||||
var errs []error
|
||||
if c.SSHUsername == "" {
|
||||
errs = append(errs, errors.New("An ssh_username must be specified\n Note: some builders used to default ssh_username to \"root\"."))
|
||||
}
|
||||
|
||||
if c.SSHPrivateKeyFile != "" {
|
||||
path, err := pathing.ExpandUser(c.SSHPrivateKeyFile)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_private_key_file is invalid: %s", err))
|
||||
} else if _, err := os.Stat(path); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_private_key_file is invalid: %s", err))
|
||||
} else {
|
||||
if c.SSHCertificateFile != "" {
|
||||
certPath, err := pathing.ExpandUser(c.SSHCertificateFile)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("invalid identity certificate: #{err}"))
|
||||
}
|
||||
|
||||
if _, err := helperssh.FileSignerWithCert(path, certPath); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_private_key_file is invalid: %s", err))
|
||||
}
|
||||
} else {
|
||||
if _, err := helperssh.FileSigner(path); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_private_key_file is invalid: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.SSHBastionHost != "" && !c.SSHBastionAgentAuth {
|
||||
if c.SSHBastionPassword == "" && c.SSHBastionPrivateKeyFile == "" {
|
||||
errs = append(errs, errors.New(
|
||||
"ssh_bastion_password or ssh_bastion_private_key_file must be specified"))
|
||||
} else if c.SSHBastionPrivateKeyFile != "" {
|
||||
path, err := pathing.ExpandUser(c.SSHBastionPrivateKeyFile)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_bastion_private_key_file is invalid: %s", err))
|
||||
} else if _, err := os.Stat(path); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_bastion_private_key_file is invalid: %s", err))
|
||||
} else {
|
||||
if c.SSHBastionCertificateFile != "" {
|
||||
certPath, err := pathing.ExpandUser(c.SSHBastionCertificateFile)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("invalid identity certificate: #{err}"))
|
||||
}
|
||||
if _, err := helperssh.FileSignerWithCert(path, certPath); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_bastion_private_key_file is invalid: %s", err))
|
||||
}
|
||||
} else {
|
||||
if _, err := helperssh.FileSigner(path); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_bastion_private_key_file is invalid: %s", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.SSHFileTransferMethod != "scp" && c.SSHFileTransferMethod != "sftp" {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_file_transfer_method ('%s') is invalid, valid methods: sftp, scp",
|
||||
c.SSHFileTransferMethod))
|
||||
}
|
||||
|
||||
if c.SSHBastionHost != "" && c.SSHProxyHost != "" {
|
||||
errs = append(errs, errors.New("please specify either ssh_bastion_host or ssh_proxy_host, not both"))
|
||||
}
|
||||
|
||||
for _, v := range c.SSHLocalTunnels {
|
||||
_, err := helperssh.ParseTunnelArgument(v, packerssh.UnsetTunnel)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_local_tunnels ('%s') is invalid: %s", v, err))
|
||||
}
|
||||
}
|
||||
|
||||
for _, v := range c.SSHRemoteTunnels {
|
||||
_, err := helperssh.ParseTunnelArgument(v, packerssh.UnsetTunnel)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"ssh_remote_tunnels ('%s') is invalid: %s", v, err))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func (c *Config) prepareWinRM(ctx *interpolate.Context) (errs []error) {
|
||||
if c.WinRMPort == 0 && c.WinRMUseSSL {
|
||||
c.WinRMPort = 5986
|
||||
} else if c.WinRMPort == 0 {
|
||||
c.WinRMPort = 5985
|
||||
}
|
||||
|
||||
if c.WinRMTimeout == 0 {
|
||||
c.WinRMTimeout = 30 * time.Minute
|
||||
}
|
||||
|
||||
if c.WinRMUseNTLM == true {
|
||||
c.WinRMTransportDecorator = func() winrm.Transporter { return &winrm.ClientNTLM{} }
|
||||
}
|
||||
|
||||
if c.WinRMUser == "" {
|
||||
errs = append(errs, errors.New("winrm_username must be specified."))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
288
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.hcl2spec.go
generated
vendored
Normal file
288
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/config.hcl2spec.go
generated
vendored
Normal file
|
@ -0,0 +1,288 @@
|
|||
// Code generated by "mapstructure-to-hcl2 -type Config,SSH,WinRM,SSHTemporaryKeyPair"; DO NOT EDIT.
|
||||
|
||||
package communicator
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
Type *string `mapstructure:"communicator" cty:"communicator" hcl:"communicator"`
|
||||
PauseBeforeConnect *string `mapstructure:"pause_before_connecting" cty:"pause_before_connecting" hcl:"pause_before_connecting"`
|
||||
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
|
||||
SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"`
|
||||
SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"`
|
||||
SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"`
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"`
|
||||
SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"`
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"`
|
||||
SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"`
|
||||
SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"`
|
||||
SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"`
|
||||
SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"`
|
||||
SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"`
|
||||
SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"`
|
||||
SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"`
|
||||
SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"`
|
||||
SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"`
|
||||
SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"`
|
||||
SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"`
|
||||
SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"`
|
||||
SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"`
|
||||
SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"`
|
||||
SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"`
|
||||
SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"`
|
||||
SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"`
|
||||
SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"`
|
||||
SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"`
|
||||
SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"`
|
||||
SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"`
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"`
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"`
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"`
|
||||
WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"`
|
||||
WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"`
|
||||
WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"`
|
||||
WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"`
|
||||
WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"`
|
||||
WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"`
|
||||
WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"`
|
||||
WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"`
|
||||
WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false},
|
||||
"pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false},
|
||||
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},
|
||||
"ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false},
|
||||
"ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
"ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false},
|
||||
"ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false},
|
||||
"ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false},
|
||||
"ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false},
|
||||
"ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false},
|
||||
"ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false},
|
||||
"ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false},
|
||||
"ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false},
|
||||
"ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false},
|
||||
"ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false},
|
||||
"ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false},
|
||||
"ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false},
|
||||
"ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false},
|
||||
"ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false},
|
||||
"ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false},
|
||||
"ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false},
|
||||
"ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false},
|
||||
"winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false},
|
||||
"winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false},
|
||||
"winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false},
|
||||
"winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false},
|
||||
"winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false},
|
||||
"winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false},
|
||||
"winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false},
|
||||
"winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false},
|
||||
"winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatSSH is an auto-generated flat version of SSH.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatSSH struct {
|
||||
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
|
||||
SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"`
|
||||
SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"`
|
||||
SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"`
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"`
|
||||
SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"`
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"`
|
||||
SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"`
|
||||
SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"`
|
||||
SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"`
|
||||
SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"`
|
||||
SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"`
|
||||
SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"`
|
||||
SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"`
|
||||
SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"`
|
||||
SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"`
|
||||
SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"`
|
||||
SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"`
|
||||
SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"`
|
||||
SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"`
|
||||
SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"`
|
||||
SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"`
|
||||
SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"`
|
||||
SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"`
|
||||
SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"`
|
||||
SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"`
|
||||
SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"`
|
||||
SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"`
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"`
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"`
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatSSH.
|
||||
// FlatSSH is an auto-generated flat version of SSH.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*SSH) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatSSH)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a SSH.
|
||||
// This spec is used by HCL to read the fields of SSH.
|
||||
// The decoded values from this spec will then be applied to a FlatSSH.
|
||||
func (*FlatSSH) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},
|
||||
"ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false},
|
||||
"ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
"ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false},
|
||||
"ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false},
|
||||
"ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false},
|
||||
"ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false},
|
||||
"ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false},
|
||||
"ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false},
|
||||
"ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false},
|
||||
"ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false},
|
||||
"ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false},
|
||||
"ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false},
|
||||
"ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false},
|
||||
"ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false},
|
||||
"ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false},
|
||||
"ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false},
|
||||
"ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false},
|
||||
"ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false},
|
||||
"ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatSSHTemporaryKeyPair is an auto-generated flat version of SSHTemporaryKeyPair.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatSSHTemporaryKeyPair struct {
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatSSHTemporaryKeyPair.
|
||||
// FlatSSHTemporaryKeyPair is an auto-generated flat version of SSHTemporaryKeyPair.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*SSHTemporaryKeyPair) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatSSHTemporaryKeyPair)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a SSHTemporaryKeyPair.
|
||||
// This spec is used by HCL to read the fields of SSHTemporaryKeyPair.
|
||||
// The decoded values from this spec will then be applied to a FlatSSHTemporaryKeyPair.
|
||||
func (*FlatSSHTemporaryKeyPair) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// FlatWinRM is an auto-generated flat version of WinRM.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatWinRM struct {
|
||||
WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"`
|
||||
WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"`
|
||||
WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"`
|
||||
WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"`
|
||||
WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"`
|
||||
WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"`
|
||||
WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"`
|
||||
WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"`
|
||||
WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatWinRM.
|
||||
// FlatWinRM is an auto-generated flat version of WinRM.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*WinRM) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatWinRM)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a WinRM.
|
||||
// This spec is used by HCL to read the fields of WinRM.
|
||||
// The decoded values from this spec will then be applied to a FlatWinRM.
|
||||
func (*FlatWinRM) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false},
|
||||
"winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false},
|
||||
"winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false},
|
||||
"winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false},
|
||||
"winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false},
|
||||
"winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false},
|
||||
"winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false},
|
||||
"winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false},
|
||||
"winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
/*
|
||||
Package communicator provides common steps for connecting to an instance
|
||||
using the Packer communicator. These steps can be implemented by builders.
|
||||
Normally, a builder will want to implement StepConnect, which is smart enough
|
||||
to then determine which kind of communicator, and therefore which kind of
|
||||
substep, it should implement.
|
||||
|
||||
Various helper functions are also supplied.
|
||||
*/
|
||||
package communicator
|
260
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/key_pair.go
generated
vendored
Normal file
260
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/key_pair.go
generated
vendored
Normal file
|
@ -0,0 +1,260 @@
|
|||
package ssh
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultRsaBits is the default bits of entropy for a new RSA
|
||||
// key pair. That's a lot of bits.
|
||||
defaultRsaBits = 4096
|
||||
|
||||
// Markers for various SSH key pair types.
|
||||
Default KeyPairType = ""
|
||||
Rsa KeyPairType = "RSA"
|
||||
Ecdsa KeyPairType = "ECDSA"
|
||||
Dsa KeyPairType = "DSA"
|
||||
Ed25519 KeyPairType = "ED25519"
|
||||
)
|
||||
|
||||
// KeyPairType represents different types of SSH key pairs.
|
||||
// See the 'const' block for details.
|
||||
type KeyPairType string
|
||||
|
||||
func (o KeyPairType) String() string {
|
||||
return string(o)
|
||||
}
|
||||
|
||||
// KeyPair represents an SSH key pair.
|
||||
type KeyPair struct {
|
||||
// PrivateKeyPemBlock represents the key pair's private key in
|
||||
// ASN.1 Distinguished Encoding Rules (DER) format in a
|
||||
// Privacy-Enhanced Mail (PEM) block.
|
||||
PrivateKeyPemBlock []byte
|
||||
|
||||
// PublicKeyAuthorizedKeysLine represents the key pair's public key
|
||||
// as a line in OpenSSH authorized_keys.
|
||||
PublicKeyAuthorizedKeysLine []byte
|
||||
|
||||
// Comment is the key pair's comment. This is typically used
|
||||
// to identify the key pair's owner in the SSH user's
|
||||
// 'authorized_keys' file.
|
||||
Comment string
|
||||
}
|
||||
|
||||
// KeyPairFromPrivateKey returns a KeyPair loaded from an existing private key.
|
||||
//
|
||||
// Supported key pair types include:
|
||||
// - DSA
|
||||
// - ECDSA
|
||||
// - ED25519
|
||||
// - RSA
|
||||
func KeyPairFromPrivateKey(config FromPrivateKeyConfig) (KeyPair, error) {
|
||||
privateKey, err := gossh.ParseRawPrivateKey(config.RawPrivateKeyPemBlock)
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
|
||||
switch pk := privateKey.(type) {
|
||||
case crypto.Signer:
|
||||
// crypto.Signer is implemented by ecdsa.PrivateKey,
|
||||
// ed25519.PrivateKey, and rsa.PrivateKey - separate cases
|
||||
// for each PrivateKey type would be redundant.
|
||||
publicKey, err := gossh.NewPublicKey(pk.Public())
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
return KeyPair{
|
||||
Comment: config.Comment,
|
||||
PrivateKeyPemBlock: config.RawPrivateKeyPemBlock,
|
||||
PublicKeyAuthorizedKeysLine: authorizedKeysLine(publicKey, config.Comment),
|
||||
}, nil
|
||||
case *dsa.PrivateKey:
|
||||
publicKey, err := gossh.NewPublicKey(&pk.PublicKey)
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
return KeyPair{
|
||||
Comment: config.Comment,
|
||||
PrivateKeyPemBlock: config.RawPrivateKeyPemBlock,
|
||||
PublicKeyAuthorizedKeysLine: authorizedKeysLine(publicKey, config.Comment),
|
||||
}, nil
|
||||
}
|
||||
|
||||
return KeyPair{}, fmt.Errorf("Cannot parse existing SSH key pair - unknown key pair type")
|
||||
}
|
||||
|
||||
// FromPrivateKeyConfig describes how an SSH key pair should be loaded from an
|
||||
// existing private key.
|
||||
type FromPrivateKeyConfig struct {
|
||||
// RawPrivateKeyPemBlock is the raw private key that the key pair
|
||||
// should be loaded from.
|
||||
RawPrivateKeyPemBlock []byte
|
||||
|
||||
// Comment is the key pair's comment. This is typically used
|
||||
// to identify the key pair's owner in the SSH user's
|
||||
// 'authorized_keys' file.
|
||||
Comment string
|
||||
}
|
||||
|
||||
// NewKeyPair generates a new SSH key pair using the specified
|
||||
// CreateKeyPairConfig.
|
||||
func NewKeyPair(config CreateKeyPairConfig) (KeyPair, error) {
|
||||
if config.Type == Default {
|
||||
config.Type = Ecdsa
|
||||
}
|
||||
|
||||
switch config.Type {
|
||||
case Ecdsa:
|
||||
return newEcdsaKeyPair(config)
|
||||
case Rsa:
|
||||
return newRsaKeyPair(config)
|
||||
}
|
||||
|
||||
return KeyPair{}, fmt.Errorf("Unable to generate new key pair, type %s is not supported",
|
||||
config.Type.String())
|
||||
}
|
||||
|
||||
// newEcdsaKeyPair returns a new ECDSA SSH key pair.
|
||||
func newEcdsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {
|
||||
var curve elliptic.Curve
|
||||
|
||||
switch config.Bits {
|
||||
case 0:
|
||||
config.Bits = 521
|
||||
fallthrough
|
||||
case 521:
|
||||
curve = elliptic.P521()
|
||||
case 384:
|
||||
curve = elliptic.P384()
|
||||
case 256:
|
||||
curve = elliptic.P256()
|
||||
case 224:
|
||||
// Not supported by "golang.org/x/crypto/ssh".
|
||||
return KeyPair{}, fmt.Errorf("golang.org/x/crypto/ssh does not support %d bits", config.Bits)
|
||||
default:
|
||||
return KeyPair{}, fmt.Errorf("crypto/elliptic does not support %d bits", config.Bits)
|
||||
}
|
||||
|
||||
privateKey, err := ecdsa.GenerateKey(curve, rand.Reader)
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
|
||||
sshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
|
||||
privateRaw, err := x509.MarshalECPrivateKey(privateKey)
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
|
||||
privatePem, err := rawPemBlock(&pem.Block{
|
||||
Type: "EC PRIVATE KEY",
|
||||
Headers: nil,
|
||||
Bytes: privateRaw,
|
||||
})
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
|
||||
return KeyPair{
|
||||
PrivateKeyPemBlock: privatePem,
|
||||
PublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),
|
||||
Comment: config.Comment,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newRsaKeyPair returns a new RSA SSH key pair.
|
||||
func newRsaKeyPair(config CreateKeyPairConfig) (KeyPair, error) {
|
||||
if config.Bits == 0 {
|
||||
config.Bits = defaultRsaBits
|
||||
}
|
||||
|
||||
privateKey, err := rsa.GenerateKey(rand.Reader, config.Bits)
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
|
||||
sshPublicKey, err := gossh.NewPublicKey(&privateKey.PublicKey)
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
|
||||
privatePemBlock, err := rawPemBlock(&pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Headers: nil,
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
|
||||
})
|
||||
if err != nil {
|
||||
return KeyPair{}, err
|
||||
}
|
||||
|
||||
return KeyPair{
|
||||
PrivateKeyPemBlock: privatePemBlock,
|
||||
PublicKeyAuthorizedKeysLine: authorizedKeysLine(sshPublicKey, config.Comment),
|
||||
Comment: config.Comment,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateKeyPairConfig describes how an SSH key pair should be created.
|
||||
type CreateKeyPairConfig struct {
|
||||
// Type describes the key pair's type.
|
||||
Type KeyPairType
|
||||
|
||||
// Bits represents the key pair's bits of entropy. E.g., 4096 for
|
||||
// a 4096 bit RSA key pair, or 521 for a ECDSA key pair with a
|
||||
// 521-bit curve.
|
||||
Bits int
|
||||
|
||||
// Comment is the resulting key pair's comment. This is typically
|
||||
// used to identify the key pair's owner in the SSH user's
|
||||
// 'authorized_keys' file.
|
||||
Comment string
|
||||
}
|
||||
|
||||
// rawPemBlock encodes a pem.Block to a slice of bytes.
|
||||
func rawPemBlock(block *pem.Block) ([]byte, error) {
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
|
||||
err := pem.Encode(buffer, block)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// authorizedKeysLine serializes key for inclusion in an OpenSSH
|
||||
// authorized_keys file. The return value ends without newline so
|
||||
// a comment can be appended to the end.
|
||||
func authorizedKeysLine(key gossh.PublicKey, comment string) []byte {
|
||||
marshaledPublicKey := gossh.MarshalAuthorizedKey(key)
|
||||
|
||||
// Remove the mandatory unix new line. Awful, but the go
|
||||
// ssh library automatically appends a unix new line.
|
||||
// We remove it so a key comment can be safely appended to the
|
||||
// end of the string.
|
||||
marshaledPublicKey = bytes.TrimSpace(marshaledPublicKey)
|
||||
|
||||
if len(strings.TrimSpace(comment)) > 0 {
|
||||
marshaledPublicKey = append(marshaledPublicKey, ' ')
|
||||
marshaledPublicKey = append(marshaledPublicKey, comment...)
|
||||
}
|
||||
|
||||
return marshaledPublicKey
|
||||
}
|
117
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/ssh.go
generated
vendored
Normal file
117
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/ssh.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
// Package SSH provides tooling for generating a temporary SSH keypair, and
|
||||
// provides tooling for connecting to an instance via a tunnel.
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
func parseKeyFile(path string) ([]byte, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
keyBytes, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We parse the private key on our own first so that we can
|
||||
// show a nicer error if the private key has a password.
|
||||
block, _ := pem.Decode(keyBytes)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Failed to read key '%s': no key found", path)
|
||||
}
|
||||
if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
|
||||
return nil, fmt.Errorf(
|
||||
"Failed to read key '%s': password protected keys are\n"+
|
||||
"not supported. Please decrypt the key prior to use.", path)
|
||||
}
|
||||
return keyBytes, nil
|
||||
}
|
||||
|
||||
// FileSigner returns an ssh.Signer for a key file.
|
||||
func FileSigner(path string) (ssh.Signer, error) {
|
||||
|
||||
keyBytes, err := parseKeyFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error setting up SSH config: %s", err)
|
||||
}
|
||||
|
||||
signer, err := ssh.ParsePrivateKey(keyBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error setting up SSH config: %s", err)
|
||||
}
|
||||
|
||||
return signer, nil
|
||||
}
|
||||
|
||||
func ReadCertificate(certificatePath string, keySigner ssh.Signer) (ssh.Signer, error) {
|
||||
|
||||
if certificatePath == "" {
|
||||
return keySigner, fmt.Errorf("no certificate file provided")
|
||||
}
|
||||
|
||||
// Load the certificate
|
||||
cert, err := ioutil.ReadFile(certificatePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read certificate file: %v", err)
|
||||
}
|
||||
|
||||
pk, _, _, _, err := ssh.ParseAuthorizedKey(cert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to parse public key: %v", err)
|
||||
}
|
||||
|
||||
certificate, ok := pk.(*ssh.Certificate)
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Error loading certificate")
|
||||
}
|
||||
|
||||
err = checkValidCert(certificate)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%s not a valid cert: %v", certificatePath, err)
|
||||
}
|
||||
|
||||
certSigner, err := ssh.NewCertSigner(certificate, keySigner)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cert signer: %v", err)
|
||||
}
|
||||
|
||||
return certSigner, nil
|
||||
}
|
||||
|
||||
// FileSigner returns an ssh.Signer for a key file.
|
||||
func FileSignerWithCert(path string, certificatePath string) (ssh.Signer, error) {
|
||||
|
||||
keySigner, err := FileSigner(path)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ReadCertificate(certificatePath, keySigner)
|
||||
}
|
||||
|
||||
func checkValidCert(cert *ssh.Certificate) error {
|
||||
const CertTimeInfinity = 1<<64 - 1
|
||||
unixNow := time.Now().Unix()
|
||||
|
||||
if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) {
|
||||
return fmt.Errorf("ssh: cert is not yet valid")
|
||||
}
|
||||
if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) {
|
||||
return fmt.Errorf("ssh: cert has expired")
|
||||
}
|
||||
return nil
|
||||
}
|
45
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/tunnel.go
generated
vendored
Normal file
45
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/ssh/tunnel.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package ssh
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh"
|
||||
)
|
||||
|
||||
// ParseTunnelArgument parses an SSH tunneling argument compatible with the openssh client form.
|
||||
// Valid formats:
|
||||
// `port:host:hostport`
|
||||
// NYI `[bind_address:]port:host:hostport`
|
||||
func ParseTunnelArgument(forward string, direction ssh.TunnelDirection) (ssh.TunnelSpec, error) {
|
||||
parts := strings.SplitN(forward, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return ssh.TunnelSpec{}, fmt.Errorf("Error parsing tunnel '%s': %v", forward, parts)
|
||||
}
|
||||
listeningPort, forwardingAddr := parts[0], parts[1]
|
||||
|
||||
_, sPort, err := net.SplitHostPort(forwardingAddr)
|
||||
if err != nil {
|
||||
return ssh.TunnelSpec{}, fmt.Errorf("Error parsing forwarding, must be a tcp address: %s", err)
|
||||
}
|
||||
_, err = strconv.Atoi(sPort)
|
||||
if err != nil {
|
||||
return ssh.TunnelSpec{}, fmt.Errorf("Error parsing forwarding port, must be a valid port: %s", err)
|
||||
}
|
||||
_, err = strconv.Atoi(listeningPort)
|
||||
if err != nil {
|
||||
return ssh.TunnelSpec{}, fmt.Errorf("Error parsing listening port, must be a valid port: %s", err)
|
||||
}
|
||||
|
||||
return ssh.TunnelSpec{
|
||||
Direction: direction,
|
||||
ForwardAddr: forwardingAddr,
|
||||
ForwardType: "tcp",
|
||||
ListenAddr: fmt.Sprintf("localhost:%s", listeningPort),
|
||||
ListenType: "tcp",
|
||||
}, nil
|
||||
// So we parsed all that, and are just going to ignore it now. We would
|
||||
// have used the information to set the type here.
|
||||
}
|
52
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/algorithm_enumer.go
generated
vendored
Normal file
52
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/algorithm_enumer.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
// Code generated by "enumer -type Algorithm -transform snake"; DO NOT EDIT.
|
||||
|
||||
//
|
||||
package sshkey
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const _AlgorithmName = "rsadsaecdsaed25519"
|
||||
|
||||
var _AlgorithmIndex = [...]uint8{0, 3, 6, 11, 18}
|
||||
|
||||
func (i Algorithm) String() string {
|
||||
if i < 0 || i >= Algorithm(len(_AlgorithmIndex)-1) {
|
||||
return fmt.Sprintf("Algorithm(%d)", i)
|
||||
}
|
||||
return _AlgorithmName[_AlgorithmIndex[i]:_AlgorithmIndex[i+1]]
|
||||
}
|
||||
|
||||
var _AlgorithmValues = []Algorithm{0, 1, 2, 3}
|
||||
|
||||
var _AlgorithmNameToValueMap = map[string]Algorithm{
|
||||
_AlgorithmName[0:3]: 0,
|
||||
_AlgorithmName[3:6]: 1,
|
||||
_AlgorithmName[6:11]: 2,
|
||||
_AlgorithmName[11:18]: 3,
|
||||
}
|
||||
|
||||
// AlgorithmString retrieves an enum value from the enum constants string name.
|
||||
// Throws an error if the param is not part of the enum.
|
||||
func AlgorithmString(s string) (Algorithm, error) {
|
||||
if val, ok := _AlgorithmNameToValueMap[s]; ok {
|
||||
return val, nil
|
||||
}
|
||||
return 0, fmt.Errorf("%s does not belong to Algorithm values", s)
|
||||
}
|
||||
|
||||
// AlgorithmValues returns all values of the enum
|
||||
func AlgorithmValues() []Algorithm {
|
||||
return _AlgorithmValues
|
||||
}
|
||||
|
||||
// IsAAlgorithm returns "true" if the value is listed in the enum definition. "false" otherwise
|
||||
func (i Algorithm) IsAAlgorithm() bool {
|
||||
for _, v := range _AlgorithmValues {
|
||||
if i == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
255
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/generate.go
generated
vendored
Normal file
255
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/sshkey/generate.go
generated
vendored
Normal file
|
@ -0,0 +1,255 @@
|
|||
package sshkey
|
||||
|
||||
import (
|
||||
"crypto/dsa"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/asn1"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type Algorithm int
|
||||
|
||||
//go:generate enumer -type Algorithm -transform snake
|
||||
const (
|
||||
RSA Algorithm = iota
|
||||
DSA
|
||||
ECDSA
|
||||
ED25519
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknownAlgorithm = fmt.Errorf("sshkey: unknown private key algorithm")
|
||||
ErrInvalidRSAKeySize = fmt.Errorf("sshkey: invalid private key rsa size: must be more than 1024")
|
||||
ErrInvalidECDSAKeySize = fmt.Errorf("sshkey: invalid private key ecdsa size, must be one of 256, 384 or 521")
|
||||
ErrInvalidDSAKeySize = fmt.Errorf("sshkey: invalid private key dsa size, must be one of 1024, 2048 or 3072")
|
||||
)
|
||||
|
||||
// Pair represents an ssh key pair, as in
|
||||
type Pair struct {
|
||||
Private []byte
|
||||
Public []byte
|
||||
}
|
||||
|
||||
func NewPair(public, private interface{}) (*Pair, error) {
|
||||
kb, err := x509.MarshalPKCS8PrivateKey(private)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
privBlk := &pem.Block{
|
||||
Type: "PRIVATE KEY",
|
||||
Headers: nil,
|
||||
Bytes: kb,
|
||||
}
|
||||
|
||||
publicKey, err := ssh.NewPublicKey(public)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Pair{
|
||||
Private: pem.EncodeToMemory(privBlk),
|
||||
Public: ssh.MarshalAuthorizedKey(publicKey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PairFromED25519 marshalls a valid pair of openssh pem for ED25519 keypairs.
|
||||
// NewPair can handle ed25519 pairs but generates the wrong format apparently:
|
||||
// `Load key "id_ed25519": invalid format` is the error that happens when I try
|
||||
// to ssh with such a key.
|
||||
func PairFromED25519(public ed25519.PublicKey, private ed25519.PrivateKey) (*Pair, error) {
|
||||
// see https://github.com/golang/crypto/blob/7f63de1d35b0f77fa2b9faea3e7deb402a2383c8/ssh/keys.go#L1273-L1443
|
||||
key := struct {
|
||||
Pub []byte
|
||||
Priv []byte
|
||||
Comment string
|
||||
Pad []byte `ssh:"rest"`
|
||||
}{
|
||||
Pub: public,
|
||||
Priv: private,
|
||||
}
|
||||
keyBytes := ssh.Marshal(key)
|
||||
|
||||
pk1 := struct {
|
||||
Check1 uint32
|
||||
Check2 uint32
|
||||
Keytype string
|
||||
Rest []byte `ssh:"rest"`
|
||||
}{
|
||||
Keytype: ssh.KeyAlgoED25519,
|
||||
Rest: keyBytes,
|
||||
}
|
||||
pk1Bytes := ssh.Marshal(pk1)
|
||||
|
||||
k := struct {
|
||||
CipherName string
|
||||
KdfName string
|
||||
KdfOpts string
|
||||
NumKeys uint32
|
||||
PubKey []byte
|
||||
PrivKeyBlock []byte
|
||||
}{
|
||||
CipherName: "none",
|
||||
KdfName: "none",
|
||||
KdfOpts: "",
|
||||
NumKeys: 1,
|
||||
PrivKeyBlock: pk1Bytes,
|
||||
}
|
||||
|
||||
const opensshV1Magic = "openssh-key-v1\x00"
|
||||
|
||||
privBlk := &pem.Block{
|
||||
Type: "OPENSSH PRIVATE KEY",
|
||||
Headers: nil,
|
||||
Bytes: append([]byte(opensshV1Magic), ssh.Marshal(k)...),
|
||||
}
|
||||
publicKey, err := ssh.NewPublicKey(public)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Pair{
|
||||
Private: pem.EncodeToMemory(privBlk),
|
||||
Public: ssh.MarshalAuthorizedKey(publicKey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PairFromDSA marshalls a valid pair of openssh pem for dsa keypairs.
|
||||
// x509.MarshalPKCS8PrivateKey does not know how to deal with dsa keys.
|
||||
func PairFromDSA(key *dsa.PrivateKey) (*Pair, error) {
|
||||
// see https://github.com/golang/crypto/blob/7f63de1d35b0f77fa2b9faea3e7deb402a2383c8/ssh/keys.go#L1186-L1195
|
||||
// and https://linux.die.net/man/1/dsa
|
||||
k := struct {
|
||||
Version int
|
||||
P *big.Int
|
||||
Q *big.Int
|
||||
G *big.Int
|
||||
Pub *big.Int
|
||||
Priv *big.Int
|
||||
}{
|
||||
Version: 0,
|
||||
P: key.P,
|
||||
Q: key.Q,
|
||||
G: key.G,
|
||||
Pub: key.Y,
|
||||
Priv: key.X,
|
||||
}
|
||||
kb, err := asn1.Marshal(k)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
privBlk := &pem.Block{
|
||||
Type: "DSA PRIVATE KEY",
|
||||
Headers: nil,
|
||||
Bytes: kb,
|
||||
}
|
||||
publicKey, err := ssh.NewPublicKey(&key.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Pair{
|
||||
Private: pem.EncodeToMemory(privBlk),
|
||||
Public: ssh.MarshalAuthorizedKey(publicKey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GeneratePair generates a Private/Public key pair using algorithm t.
|
||||
//
|
||||
// When rand is nil "crypto/rand".Reader will be used.
|
||||
//
|
||||
// bits specifies the number of bits in the key to create. For RSA keys, the
|
||||
// minimum size is 1024 bits and the default is 3072 bits. Generally, 3072 bits
|
||||
// is considered sufficient. DSA keys must be exactly 1024 bits - or 2 or 3
|
||||
// times that - as specified by FIPS 186-2. For ECDSA keys, bits determines the
|
||||
// key length by selecting from one of three elliptic curve sizes: 256, 384 or
|
||||
// 521 bits. Attempting to use bit lengths other than these three values for
|
||||
// ECDSA keys will fail. Ed25519 keys have a fixed length and the bits will
|
||||
// be ignored.
|
||||
func GeneratePair(t Algorithm, rand io.Reader, bits int) (*Pair, error) {
|
||||
if rand == nil {
|
||||
rand = cryptorand.Reader
|
||||
}
|
||||
switch t {
|
||||
case DSA:
|
||||
if bits == 0 {
|
||||
// currently the ssh package can only decode 1024 bits dsa keys, so
|
||||
// that's going be the default for now see
|
||||
// https://github.com/golang/crypto/blob/7f63de1d35b0f77fa2b9faea3e7deb402a2383c8/ssh/keys.go#L411-L420
|
||||
bits = 1024
|
||||
}
|
||||
var sizes dsa.ParameterSizes
|
||||
switch bits {
|
||||
case 1024:
|
||||
sizes = dsa.L1024N160
|
||||
case 2048:
|
||||
sizes = dsa.L2048N256
|
||||
case 3072:
|
||||
sizes = dsa.L3072N256
|
||||
default:
|
||||
return nil, ErrInvalidDSAKeySize
|
||||
}
|
||||
|
||||
params := dsa.Parameters{}
|
||||
if err := dsa.GenerateParameters(¶ms, rand, sizes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dsakey := &dsa.PrivateKey{
|
||||
PublicKey: dsa.PublicKey{
|
||||
Parameters: params,
|
||||
},
|
||||
}
|
||||
if err := dsa.GenerateKey(dsakey, rand); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return PairFromDSA(dsakey)
|
||||
case ECDSA:
|
||||
if bits == 0 {
|
||||
bits = 521
|
||||
}
|
||||
var ecdsakey *ecdsa.PrivateKey
|
||||
var err error
|
||||
switch bits {
|
||||
case 256:
|
||||
ecdsakey, err = ecdsa.GenerateKey(elliptic.P256(), rand)
|
||||
case 384:
|
||||
ecdsakey, err = ecdsa.GenerateKey(elliptic.P384(), rand)
|
||||
case 521:
|
||||
ecdsakey, err = ecdsa.GenerateKey(elliptic.P521(), rand)
|
||||
default:
|
||||
ecdsakey, err = nil, ErrInvalidECDSAKeySize
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewPair(&ecdsakey.PublicKey, ecdsakey)
|
||||
case ED25519:
|
||||
publicKey, privateKey, err := ed25519.GenerateKey(rand)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return PairFromED25519(publicKey, privateKey)
|
||||
case RSA:
|
||||
if bits == 0 {
|
||||
bits = 4096
|
||||
}
|
||||
if bits < 1024 {
|
||||
return nil, ErrInvalidRSAKeySize
|
||||
}
|
||||
rsakey, err := rsa.GenerateKey(rand, bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewPair(&rsakey.PublicKey, rsakey)
|
||||
default:
|
||||
return nil, ErrUnknownAlgorithm
|
||||
}
|
||||
}
|
139
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect.go
generated
vendored
Normal file
139
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect.go
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
package communicator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/none"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// StepConnect is a multistep Step implementation that connects to
|
||||
// the proper communicator and stores it in the "communicator" key in the
|
||||
// state bag.
|
||||
type StepConnect struct {
|
||||
// Config is the communicator config struct
|
||||
Config *Config
|
||||
|
||||
// Host should return a host that can be connected to for communicator
|
||||
// connections.
|
||||
Host func(multistep.StateBag) (string, error)
|
||||
|
||||
// The fields below are callbacks to assist with connecting to SSH.
|
||||
//
|
||||
// SSHConfig should return the default configuration for
|
||||
// connecting via SSH.
|
||||
SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error)
|
||||
SSHPort func(multistep.StateBag) (int, error)
|
||||
|
||||
// The fields below are callbacks to assist with connecting to WinRM.
|
||||
//
|
||||
// WinRMConfig should return the default configuration for
|
||||
// connecting via WinRM.
|
||||
WinRMConfig func(multistep.StateBag) (*WinRMConfig, error)
|
||||
WinRMPort func(multistep.StateBag) (int, error)
|
||||
|
||||
// CustomConnect can be set to have custom connectors for specific
|
||||
// types. These take highest precedence so you can also override
|
||||
// existing types.
|
||||
CustomConnect map[string]multistep.Step
|
||||
|
||||
substep multistep.Step
|
||||
}
|
||||
|
||||
func (s *StepConnect) pause(pauseLen time.Duration, ctx context.Context) bool {
|
||||
// Use a select to determine if we get cancelled during the wait
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
case <-time.After(pauseLen):
|
||||
}
|
||||
log.Printf("Pause over; connecting...")
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *StepConnect) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
typeMap := map[string]multistep.Step{
|
||||
"none": nil,
|
||||
"ssh": &StepConnectSSH{
|
||||
Config: s.Config,
|
||||
Host: s.Host,
|
||||
SSHConfig: s.SSHConfig,
|
||||
SSHPort: s.SSHPort,
|
||||
},
|
||||
"winrm": &StepConnectWinRM{
|
||||
Config: s.Config,
|
||||
Host: s.Host,
|
||||
WinRMConfig: s.WinRMConfig,
|
||||
WinRMPort: s.WinRMPort,
|
||||
},
|
||||
}
|
||||
for k, v := range s.CustomConnect {
|
||||
typeMap[k] = v
|
||||
}
|
||||
|
||||
step, ok := typeMap[s.Config.Type]
|
||||
if !ok {
|
||||
state.Put("error", fmt.Errorf("unknown communicator type: %s", s.Config.Type))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
if step == nil {
|
||||
if comm, err := none.New("none"); err != nil {
|
||||
err := fmt.Errorf("Failed to set communicator 'none': %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
|
||||
} else {
|
||||
state.Put("communicator", comm)
|
||||
log.Printf("[INFO] communicator disabled, will not connect")
|
||||
}
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if host, err := s.Host(state); err == nil {
|
||||
ui.Say(fmt.Sprintf("Using %s communicator to connect: %s", s.Config.Type, host))
|
||||
} else {
|
||||
log.Printf("[DEBUG] Unable to get address during connection step: %s", err)
|
||||
}
|
||||
|
||||
s.substep = step
|
||||
action := s.substep.Run(ctx, state)
|
||||
if action == multistep.ActionHalt {
|
||||
return action
|
||||
}
|
||||
|
||||
if s.Config.PauseBeforeConnect > 0 {
|
||||
ui.Say(fmt.Sprintf("Pausing %s before connecting...",
|
||||
s.Config.PauseBeforeConnect.String()))
|
||||
cancelled := s.pause(s.Config.PauseBeforeConnect, ctx)
|
||||
if cancelled {
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
// After pause is complete, re-run the connect substep to make sure
|
||||
// you've connected properly
|
||||
action := s.substep.Run(ctx, state)
|
||||
if action == multistep.ActionHalt {
|
||||
return action
|
||||
}
|
||||
}
|
||||
|
||||
// Put communicator config into state so we can pass it to provisioners
|
||||
// for specialized interpolation later
|
||||
state.Put("communicator_config", s.Config)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepConnect) Cleanup(state multistep.StateBag) {
|
||||
if s.substep != nil {
|
||||
s.substep.Cleanup(state)
|
||||
}
|
||||
}
|
320
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_ssh.go
generated
vendored
Normal file
320
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_ssh.go
generated
vendored
Normal file
|
@ -0,0 +1,320 @@
|
|||
package communicator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
|
||||
helperssh "github.com/hashicorp/packer-plugin-sdk/communicator/ssh"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/pathing"
|
||||
"github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/ssh"
|
||||
gossh "golang.org/x/crypto/ssh"
|
||||
"golang.org/x/crypto/ssh/agent"
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
// StepConnectSSH is a step that only connects to SSH.
|
||||
//
|
||||
// In general, you should use StepConnect.
|
||||
type StepConnectSSH struct {
|
||||
// All the fields below are documented on StepConnect
|
||||
Config *Config
|
||||
Host func(multistep.StateBag) (string, error)
|
||||
SSHConfig func(multistep.StateBag) (*gossh.ClientConfig, error)
|
||||
SSHPort func(multistep.StateBag) (int, error)
|
||||
}
|
||||
|
||||
func (s *StepConnectSSH) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
var comm packersdk.Communicator
|
||||
var err error
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
waitDone := make(chan bool, 1)
|
||||
go func() {
|
||||
ui.Say("Waiting for SSH to become available...")
|
||||
comm, err = s.waitForSSH(state, subCtx)
|
||||
cancel() // just to make 'possible context leak' analysis happy
|
||||
waitDone <- true
|
||||
}()
|
||||
|
||||
log.Printf("[INFO] Waiting for SSH, up to timeout: %s", s.Config.SSHTimeout)
|
||||
timeout := time.After(s.Config.SSHTimeout)
|
||||
for {
|
||||
// Wait for either SSH to become available, a timeout to occur,
|
||||
// or an interrupt to come through.
|
||||
select {
|
||||
case <-waitDone:
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error waiting for SSH: %s", err))
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say("Connected to SSH!")
|
||||
state.Put("communicator", comm)
|
||||
return multistep.ActionContinue
|
||||
case <-timeout:
|
||||
err := fmt.Errorf("Timeout waiting for SSH.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
cancel()
|
||||
return multistep.ActionHalt
|
||||
case <-ctx.Done():
|
||||
// The step sequence was cancelled, so cancel waiting for SSH
|
||||
// and just start the halting process.
|
||||
cancel()
|
||||
log.Println("[WARN] Interrupt detected, quitting waiting for SSH.")
|
||||
return multistep.ActionHalt
|
||||
case <-time.After(1 * time.Second):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepConnectSSH) Cleanup(multistep.StateBag) {
|
||||
}
|
||||
|
||||
func (s *StepConnectSSH) waitForSSH(state multistep.StateBag, ctx context.Context) (packersdk.Communicator, error) {
|
||||
// Determine if we're using a bastion host, and if so, retrieve
|
||||
// that configuration. This configuration doesn't change so we
|
||||
// do this one before entering the retry loop.
|
||||
var bProto, bAddr string
|
||||
var bConf *gossh.ClientConfig
|
||||
var pAddr string
|
||||
var pAuth *proxy.Auth
|
||||
if s.Config.SSHBastionHost != "" {
|
||||
// The protocol is hardcoded for now, but may be configurable one day
|
||||
bProto = "tcp"
|
||||
bAddr = fmt.Sprintf(
|
||||
"%s:%d", s.Config.SSHBastionHost, s.Config.SSHBastionPort)
|
||||
|
||||
conf, err := sshBastionConfig(s.Config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error configuring bastion: %s", err)
|
||||
}
|
||||
bConf = conf
|
||||
}
|
||||
|
||||
if s.Config.SSHProxyHost != "" {
|
||||
pAddr = fmt.Sprintf("%s:%d", s.Config.SSHProxyHost, s.Config.SSHProxyPort)
|
||||
if s.Config.SSHProxyUsername != "" {
|
||||
pAuth = new(proxy.Auth)
|
||||
pAuth.User = s.Config.SSHProxyUsername
|
||||
pAuth.Password = s.Config.SSHProxyPassword
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
handshakeAttempts := 0
|
||||
|
||||
var comm packersdk.Communicator
|
||||
first := true
|
||||
for {
|
||||
// Don't check for cancel or wait on first iteration
|
||||
if !first {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Println("[DEBUG] SSH wait cancelled. Exiting loop.")
|
||||
return nil, errors.New("SSH wait cancelled")
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
||||
// First we request the TCP connection information
|
||||
host, err := s.Host(state)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Error getting SSH address: %s", err)
|
||||
continue
|
||||
}
|
||||
// store host and port in config so we can access them from provisioners
|
||||
s.Config.SSHHost = host
|
||||
port := s.Config.SSHPort
|
||||
if s.SSHPort != nil {
|
||||
port, err = s.SSHPort(state)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Error getting SSH port: %s", err)
|
||||
continue
|
||||
}
|
||||
s.Config.SSHPort = port
|
||||
}
|
||||
state.Put("communicator_config", s.Config)
|
||||
|
||||
// Retrieve the SSH configuration
|
||||
sshConfig, err := s.SSHConfig(state)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Error getting SSH config: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Attempt to connect to SSH port
|
||||
var connFunc func() (net.Conn, error)
|
||||
address := fmt.Sprintf("%s:%d", host, port)
|
||||
if bAddr != "" {
|
||||
// We're using a bastion host, so use the bastion connfunc
|
||||
connFunc = ssh.BastionConnectFunc(
|
||||
bProto, bAddr, bConf, "tcp", address)
|
||||
} else if pAddr != "" {
|
||||
// Connect via SOCKS5 proxy
|
||||
connFunc = ssh.ProxyConnectFunc(pAddr, pAuth, "tcp", address)
|
||||
} else {
|
||||
// No bastion host, connect directly
|
||||
connFunc = ssh.ConnectFunc("tcp", address)
|
||||
}
|
||||
|
||||
nc, err := connFunc()
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] TCP connection to SSH ip/port failed: %s", err)
|
||||
continue
|
||||
}
|
||||
nc.Close()
|
||||
|
||||
// Parse out all the requested Port Tunnels that will go over our SSH connection
|
||||
var tunnels []ssh.TunnelSpec
|
||||
for _, v := range s.Config.SSHLocalTunnels {
|
||||
t, err := helperssh.ParseTunnelArgument(v, ssh.LocalTunnel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Error parsing port forwarding: %s", err)
|
||||
}
|
||||
tunnels = append(tunnels, t)
|
||||
}
|
||||
for _, v := range s.Config.SSHRemoteTunnels {
|
||||
t, err := helperssh.ParseTunnelArgument(v, ssh.RemoteTunnel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Error parsing port forwarding: %s", err)
|
||||
}
|
||||
tunnels = append(tunnels, t)
|
||||
}
|
||||
|
||||
// Then we attempt to connect via SSH
|
||||
config := &ssh.Config{
|
||||
Connection: connFunc,
|
||||
SSHConfig: sshConfig,
|
||||
Pty: s.Config.SSHPty,
|
||||
DisableAgentForwarding: s.Config.SSHDisableAgentForwarding,
|
||||
UseSftp: s.Config.SSHFileTransferMethod == "sftp",
|
||||
KeepAliveInterval: s.Config.SSHKeepAliveInterval,
|
||||
Timeout: s.Config.SSHReadWriteTimeout,
|
||||
Tunnels: tunnels,
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Attempting SSH connection to %s...", address)
|
||||
comm, err = ssh.New(address, config)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] SSH handshake err: %s", err)
|
||||
|
||||
// Only count this as an attempt if we were able to attempt
|
||||
// to authenticate. Note this is very brittle since it depends
|
||||
// on the string of the error... but I don't see any other way.
|
||||
if strings.Contains(err.Error(), "authenticate") {
|
||||
log.Printf(
|
||||
"[DEBUG] Detected authentication error. Increasing handshake attempts.")
|
||||
err = fmt.Errorf("Packer experienced an authentication error "+
|
||||
"when trying to connect via SSH. This can happen if your "+
|
||||
"username/password are wrong. You may want to double-check"+
|
||||
" your credentials as part of your debugging process. "+
|
||||
"original error: %s",
|
||||
err)
|
||||
handshakeAttempts += 1
|
||||
}
|
||||
|
||||
if handshakeAttempts < s.Config.SSHHandshakeAttempts {
|
||||
// Try to connect via SSH a handful of times. We sleep here
|
||||
// so we don't get a ton of authentication errors back to back.
|
||||
time.Sleep(2 * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
return comm, nil
|
||||
}
|
||||
|
||||
func sshBastionConfig(config *Config) (*gossh.ClientConfig, error) {
|
||||
auth := make([]gossh.AuthMethod, 0, 2)
|
||||
|
||||
if config.SSHBastionInteractive {
|
||||
var c io.ReadWriteCloser
|
||||
if terminal.IsTerminal(int(os.Stdin.Fd())) {
|
||||
c = os.Stdin
|
||||
} else {
|
||||
tty, err := os.Open("/dev/tty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer tty.Close()
|
||||
c = tty
|
||||
}
|
||||
auth = append(auth, gossh.KeyboardInteractive(ssh.KeyboardInteractive(c)))
|
||||
}
|
||||
|
||||
if config.SSHBastionPassword != "" {
|
||||
auth = append(auth,
|
||||
gossh.Password(config.SSHBastionPassword),
|
||||
gossh.KeyboardInteractive(
|
||||
ssh.PasswordKeyboardInteractive(config.SSHBastionPassword)))
|
||||
}
|
||||
|
||||
if config.SSHBastionPrivateKeyFile != "" {
|
||||
path, err := pathing.ExpandUser(config.SSHBastionPrivateKeyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"Error expanding path for SSH bastion private key: %s", err)
|
||||
}
|
||||
|
||||
if config.SSHBastionCertificateFile != "" {
|
||||
identityPath, err := pathing.ExpandUser(config.SSHBastionCertificateFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error expanding path for SSH bastion identity certificate: %s", err)
|
||||
}
|
||||
signer, err := helperssh.FileSignerWithCert(path, identityPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth = append(auth, gossh.PublicKeys(signer))
|
||||
} else {
|
||||
signer, err := helperssh.FileSigner(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth = append(auth, gossh.PublicKeys(signer))
|
||||
}
|
||||
}
|
||||
|
||||
if config.SSHBastionAgentAuth {
|
||||
authSock := os.Getenv("SSH_AUTH_SOCK")
|
||||
if authSock == "" {
|
||||
return nil, fmt.Errorf("SSH_AUTH_SOCK is not set")
|
||||
}
|
||||
|
||||
sshAgent, err := net.Dial("unix", authSock)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot connect to SSH Agent socket %q: %s", authSock, err)
|
||||
}
|
||||
|
||||
auth = append(auth, gossh.PublicKeysCallback(agent.NewClient(sshAgent).Signers))
|
||||
}
|
||||
|
||||
return &gossh.ClientConfig{
|
||||
User: config.SSHBastionUsername,
|
||||
Auth: auth,
|
||||
HostKeyCallback: gossh.InsecureIgnoreHostKey(),
|
||||
}, nil
|
||||
}
|
239
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_winrm.go
generated
vendored
Normal file
239
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_connect_winrm.go
generated
vendored
Normal file
|
@ -0,0 +1,239 @@
|
|||
package communicator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/sdk-internals/communicator/winrm"
|
||||
winrmcmd "github.com/masterzen/winrm"
|
||||
"golang.org/x/net/http/httpproxy"
|
||||
)
|
||||
|
||||
// StepConnectWinRM is a multistep Step implementation that waits for WinRM
|
||||
// to become available. It gets the connection information from a single
|
||||
// configuration when creating the step.
|
||||
//
|
||||
// Uses:
|
||||
// ui packersdk.Ui
|
||||
//
|
||||
// Produces:
|
||||
// communicator packersdk.Communicator
|
||||
type StepConnectWinRM struct {
|
||||
// All the fields below are documented on StepConnect
|
||||
Config *Config
|
||||
Host func(multistep.StateBag) (string, error)
|
||||
WinRMConfig func(multistep.StateBag) (*WinRMConfig, error)
|
||||
WinRMPort func(multistep.StateBag) (int, error)
|
||||
}
|
||||
|
||||
func (s *StepConnectWinRM) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
var comm packersdk.Communicator
|
||||
var err error
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx)
|
||||
waitDone := make(chan bool, 1)
|
||||
go func() {
|
||||
ui.Say("Waiting for WinRM to become available...")
|
||||
comm, err = s.waitForWinRM(state, subCtx)
|
||||
cancel() // just to make 'possible context leak' analysis happy
|
||||
waitDone <- true
|
||||
}()
|
||||
|
||||
log.Printf("Waiting for WinRM, up to timeout: %s", s.Config.WinRMTimeout)
|
||||
timeout := time.After(s.Config.WinRMTimeout)
|
||||
for {
|
||||
// Wait for either WinRM to become available, a timeout to occur,
|
||||
// or an interrupt to come through.
|
||||
select {
|
||||
case <-waitDone:
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf("Error waiting for WinRM: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say("Connected to WinRM!")
|
||||
state.Put("communicator", comm)
|
||||
return multistep.ActionContinue
|
||||
case <-timeout:
|
||||
err := fmt.Errorf("Timeout waiting for WinRM.")
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
cancel()
|
||||
return multistep.ActionHalt
|
||||
case <-ctx.Done():
|
||||
// The step sequence was cancelled, so cancel waiting for WinRM
|
||||
// and just start the halting process.
|
||||
cancel()
|
||||
log.Println("Interrupt detected, quitting waiting for WinRM.")
|
||||
return multistep.ActionHalt
|
||||
case <-time.After(1 * time.Second):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepConnectWinRM) Cleanup(multistep.StateBag) {
|
||||
}
|
||||
|
||||
func (s *StepConnectWinRM) waitForWinRM(state multistep.StateBag, ctx context.Context) (packersdk.Communicator, error) {
|
||||
var comm packersdk.Communicator
|
||||
first := true
|
||||
for {
|
||||
// Don't check for cancel or wait on first iteration
|
||||
if !first {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Println("[INFO] WinRM wait cancelled. Exiting loop.")
|
||||
return nil, errors.New("WinRM wait cancelled")
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
first = false
|
||||
|
||||
host, err := s.Host(state)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Error getting WinRM host: %s", err)
|
||||
continue
|
||||
}
|
||||
s.Config.WinRMHost = host
|
||||
|
||||
port := s.Config.WinRMPort
|
||||
if s.WinRMPort != nil {
|
||||
port, err = s.WinRMPort(state)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Error getting WinRM port: %s", err)
|
||||
continue
|
||||
}
|
||||
s.Config.WinRMPort = port
|
||||
}
|
||||
|
||||
state.Put("communicator_config", s.Config)
|
||||
|
||||
user := s.Config.WinRMUser
|
||||
password := s.Config.WinRMPassword
|
||||
if s.WinRMConfig != nil {
|
||||
config, err := s.WinRMConfig(state)
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Error getting WinRM config: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if config.Username != "" {
|
||||
user = config.Username
|
||||
}
|
||||
if config.Password != "" {
|
||||
password = config.Password
|
||||
s.Config.WinRMPassword = password
|
||||
}
|
||||
}
|
||||
|
||||
if s.Config.WinRMNoProxy {
|
||||
if err := setNoProxy(host, port); err != nil {
|
||||
return nil, fmt.Errorf("Error setting no_proxy: %s", err)
|
||||
}
|
||||
s.Config.WinRMTransportDecorator = ProxyTransportDecorator
|
||||
}
|
||||
|
||||
log.Println("[INFO] Attempting WinRM connection...")
|
||||
comm, err = winrm.New(&winrm.Config{
|
||||
Host: host,
|
||||
Port: port,
|
||||
Username: user,
|
||||
Password: password,
|
||||
Timeout: s.Config.WinRMTimeout,
|
||||
Https: s.Config.WinRMUseSSL,
|
||||
Insecure: s.Config.WinRMInsecure,
|
||||
TransportDecorator: s.Config.WinRMTransportDecorator,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] WinRM connection err: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
// run an "echo" command to make sure winrm is actually connected before moving on.
|
||||
var connectCheckCommand = winrmcmd.Powershell(`if (Test-Path variable:global:ProgressPreference){$ProgressPreference='SilentlyContinue'}; echo "WinRM connected."`)
|
||||
var retryableSleep = 5 * time.Second
|
||||
// run an "echo" command to make sure that the winrm is connected
|
||||
for {
|
||||
cmd := &packersdk.RemoteCmd{Command: connectCheckCommand}
|
||||
var buf, buf2 bytes.Buffer
|
||||
cmd.Stdout = &buf
|
||||
cmd.Stdout = io.MultiWriter(cmd.Stdout, &buf2)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Println("WinRM wait canceled, exiting loop")
|
||||
return comm, fmt.Errorf("WinRM wait canceled")
|
||||
case <-time.After(retryableSleep):
|
||||
}
|
||||
|
||||
log.Printf("Checking that WinRM is connected with: '%s'", connectCheckCommand)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
err := cmd.RunWithUi(ctx, comm, ui)
|
||||
|
||||
if err != nil {
|
||||
log.Printf("Communication connection err: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("Connected to machine")
|
||||
stdoutToRead := buf2.String()
|
||||
if !strings.Contains(stdoutToRead, "WinRM connected.") {
|
||||
log.Printf("echo didn't succeed; retrying...")
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return comm, nil
|
||||
}
|
||||
|
||||
// setNoProxy configures the $NO_PROXY env var
|
||||
func setNoProxy(host string, port int) error {
|
||||
current := os.Getenv("NO_PROXY")
|
||||
p := fmt.Sprintf("%s:%d", host, port)
|
||||
if current == "" {
|
||||
return os.Setenv("NO_PROXY", p)
|
||||
}
|
||||
if !strings.Contains(current, p) {
|
||||
return os.Setenv("NO_PROXY", strings.Join([]string{current, p}, ","))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The net/http ProxyFromEnvironment only loads the environment once, when the
|
||||
// code is initialized rather than when it's executed. This means that if your
|
||||
// wrapping code sets the NO_PROXY env var (as Packer does!), it will be
|
||||
// ignored. Re-loading the environment vars is more expensive, but it is the
|
||||
// easiest way to work around this limitation.
|
||||
func RefreshProxyFromEnvironment(req *http.Request) (*url.URL, error) {
|
||||
return envProxyFunc()(req.URL)
|
||||
}
|
||||
|
||||
func envProxyFunc() func(*url.URL) (*url.URL, error) {
|
||||
envProxyFuncValue := httpproxy.FromEnvironment().ProxyFunc()
|
||||
return envProxyFuncValue
|
||||
}
|
||||
|
||||
// ProxyTransportDecorator is a custom Transporter that reloads HTTP Proxy settings at client runtime.
|
||||
// The net/http ProxyFromEnvironment only loads the environment once, when the
|
||||
// code is initialized rather than when it's executed. This means that if your
|
||||
// wrapping code sets the NO_PROXY env var (as Packer does!), it will be
|
||||
// ignored. Re-loading the environment vars is more expensive, but it is the
|
||||
// easiest way to work around this limitation.
|
||||
func ProxyTransportDecorator() winrmcmd.Transporter {
|
||||
return winrmcmd.NewClientWithProxyFunc(RefreshProxyFromEnvironment)
|
||||
}
|
33
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_debug_ssh_keys.go
generated
vendored
Normal file
33
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_debug_ssh_keys.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
package communicator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepDumpSSHKey is a multistep Step implementation that writes the ssh
|
||||
// keypair somewhere.
|
||||
type StepDumpSSHKey struct {
|
||||
Path string
|
||||
SSH *SSH
|
||||
}
|
||||
|
||||
func (s *StepDumpSSHKey) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.Path))
|
||||
|
||||
err := ioutil.WriteFile(s.Path, s.SSH.SSHPrivateKey, 0700)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error saving debug key: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepDumpSSHKey) Cleanup(state multistep.StateBag) {}
|
65
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_ssh_keygen.go
generated
vendored
Normal file
65
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/step_ssh_keygen.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
package communicator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator/sshkey"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepSSHKeyGen is a Packer build step that generates SSH key pairs.
|
||||
type StepSSHKeyGen struct {
|
||||
CommConf *Config
|
||||
SSHTemporaryKeyPair
|
||||
}
|
||||
|
||||
// Run executes the Packer build step that generates SSH key pairs.
|
||||
// The key pairs are added to the ssh config
|
||||
func (s *StepSSHKeyGen) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := s.CommConf
|
||||
|
||||
if comm.SSHPrivateKeyFile != "" {
|
||||
ui.Say("Using existing SSH private key")
|
||||
privateKeyBytes, err := comm.ReadSSHPrivateKeyFile()
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
comm.SSHPrivateKey = privateKeyBytes
|
||||
comm.SSHPublicKey = nil
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
algorithm := s.SSHTemporaryKeyPair.SSHTemporaryKeyPairType
|
||||
if algorithm == "" {
|
||||
algorithm = sshkey.RSA.String()
|
||||
}
|
||||
a, err := sshkey.AlgorithmString(algorithm)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("%w: possible algorithm types are `dsa` | `ecdsa` | `ed25519` | `rsa` ( the default )", err)
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating temporary %s SSH key for instance...", a.String()))
|
||||
pair, err := sshkey.GeneratePair(a, nil, s.SSHTemporaryKeyPairBits)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error creating temporary ssh key: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
comm.SSHPrivateKey = pair.Private
|
||||
comm.SSHPublicKey = pair.Public
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Nothing to clean up. SSH keys are associated with a single GCE instance.
|
||||
func (s *StepSSHKeyGen) Cleanup(state multistep.StateBag) {}
|
48
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/testing.go
generated
vendored
Normal file
48
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/testing.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
package communicator
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/tmp"
|
||||
)
|
||||
|
||||
func TestPEM(t *testing.T) string {
|
||||
tf, err := tmp.File("packer")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
tf.Write([]byte(TestPEMContents))
|
||||
tf.Close()
|
||||
|
||||
return tf.Name()
|
||||
}
|
||||
|
||||
const TestPEMContents = `
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEpQIBAAKCAQEAxd4iamvrwRJvtNDGQSIbNvvIQN8imXTRWlRY62EvKov60vqu
|
||||
hh+rDzFYAIIzlmrJopvOe0clqmi3mIP9dtkjPFrYflq52a2CF5q+BdwsJXuRHbJW
|
||||
LmStZUwW1khSz93DhvhmK50nIaczW63u4EO/jJb3xj+wxR1Nkk9bxi3DDsYFt8SN
|
||||
AzYx9kjlEYQ/+sI4/ATfmdV9h78SVotjScupd9KFzzi76gWq9gwyCBLRynTUWlyD
|
||||
2UOfJRkOvhN6/jKzvYfVVwjPSfA9IMuooHdScmC4F6KBKJl/zf/zETM0XyzIDNmH
|
||||
uOPbCiljq2WoRM+rY6ET84EO0kVXbfx8uxUsqQIDAQABAoIBAQCkPj9TF0IagbM3
|
||||
5BSs/CKbAWS4dH/D4bPlxx4IRCNirc8GUg+MRb04Xz0tLuajdQDqeWpr6iLZ0RKV
|
||||
BvreLF+TOdV7DNQ4XE4gSdJyCtCaTHeort/aordL3l0WgfI7mVk0L/yfN1PEG4YG
|
||||
E9q1TYcyrB3/8d5JwIkjabxERLglCcP+geOEJp+QijbvFIaZR/n2irlKW4gSy6ko
|
||||
9B0fgUnhkHysSg49ChHQBPQ+o5BbpuLrPDFMiTPTPhdfsvGGcyCGeqfBA56oHcSF
|
||||
K02Fg8OM+Bd1lb48LAN9nWWY4WbwV+9bkN3Ym8hO4c3a/Dxf2N7LtAQqWZzFjvM3
|
||||
/AaDvAgBAoGBAPLD+Xn1IYQPMB2XXCXfOuJewRY7RzoVWvMffJPDfm16O7wOiW5+
|
||||
2FmvxUDayk4PZy6wQMzGeGKnhcMMZTyaq2g/QtGfrvy7q1Lw2fB1VFlVblvqhoJa
|
||||
nMJojjC4zgjBkXMHsRLeTmgUKyGs+fdFbfI6uejBnnf+eMVUMIdJ+6I9AoGBANCn
|
||||
kWO9640dttyXURxNJ3lBr2H3dJOkmD6XS+u+LWqCSKQe691Y/fZ/ZL0Oc4Mhy7I6
|
||||
hsy3kDQ5k2V0fkaNODQIFJvUqXw2pMewUk8hHc9403f4fe9cPrL12rQ8WlQw4yoC
|
||||
v2B61vNczCCUDtGxlAaw8jzSRaSI5s6ax3K7enbdAoGBAJB1WYDfA2CoAQO6y9Sl
|
||||
b07A/7kQ8SN5DbPaqrDrBdJziBQxukoMJQXJeGFNUFD/DXFU5Fp2R7C86vXT7HIR
|
||||
v6m66zH+CYzOx/YE6EsUJms6UP9VIVF0Rg/RU7teXQwM01ZV32LQ8mswhTH20o/3
|
||||
uqMHmxUMEhZpUMhrfq0isyApAoGAe1UxGTXfj9AqkIVYylPIq2HqGww7+jFmVEj1
|
||||
9Wi6S6Sq72ffnzzFEPkIQL/UA4TsdHMnzsYKFPSbbXLIWUeMGyVTmTDA5c0e5XIR
|
||||
lPhMOKCAzv8w4VUzMnEkTzkFY5JqFCD/ojW57KvDdNZPVB+VEcdxyAW6aKELXMAc
|
||||
eHLc1nkCgYEApm/motCTPN32nINZ+Vvywbv64ZD+gtpeMNP3CLrbe1X9O+H52AXa
|
||||
1jCoOldWR8i2bs2NVPcKZgdo6fFULqE4dBX7Te/uYEIuuZhYLNzRO1IKU/YaqsXG
|
||||
3bfQ8hKYcSnTfE0gPtLDnqCIxTocaGLSHeG3TH9fTw+dA8FvWpUztI4=
|
||||
-----END RSA PRIVATE KEY-----
|
||||
`
|
8
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/winrm.go
generated
vendored
Normal file
8
vendor/github.com/hashicorp/packer-plugin-sdk/communicator/winrm.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
package communicator
|
||||
|
||||
// WinRMConfig is configuration that can be returned at runtime to
|
||||
// dynamically configure WinRM.
|
||||
type WinRMConfig struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
/*
|
||||
Package filelock makes it easy to create and check file locks for concurrent
|
||||
processes.
|
||||
*/
|
||||
package filelock
|
11
vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// +build !solaris
|
||||
|
||||
package filelock
|
||||
|
||||
import "github.com/gofrs/flock"
|
||||
|
||||
type Flock = flock.Flock
|
||||
|
||||
func New(path string) *Flock {
|
||||
return flock.New(path)
|
||||
}
|
11
vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock_solaris.go
generated
vendored
Normal file
11
vendor/github.com/hashicorp/packer-plugin-sdk/filelock/filelock_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
// build solaris
|
||||
|
||||
package filelock
|
||||
|
||||
// Flock is a noop on solaris for now.
|
||||
// TODO(azr): PR github.com/gofrs/flock for this.
|
||||
type Flock = Noop
|
||||
|
||||
func New(string) *Flock {
|
||||
return &Flock{}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
package filelock
|
||||
|
||||
// this lock does nothing
|
||||
type Noop struct{}
|
||||
|
||||
func (_ *Noop) Lock() (bool, error) { return true, nil }
|
||||
func (_ *Noop) TryLock() (bool, error) { return true, nil }
|
||||
func (_ *Noop) Unlock() error { return nil }
|
|
@ -0,0 +1,11 @@
|
|||
/*
|
||||
Package guestexec provides a shim for running common operating system commands
|
||||
on the guest/remote instance that is being provisioned. It helps provisioners
|
||||
which need to perform operating-system specific calls do so in a way that is
|
||||
simple and repeatable.
|
||||
|
||||
Note that to successfully use this package your provisioner must have knowledge
|
||||
of the guest type, which is not information that builders generally collect --
|
||||
your provisioner will have to require guest information in its config.
|
||||
*/
|
||||
package guestexec
|
203
vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/elevated.go
generated
vendored
Normal file
203
vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/elevated.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
|||
package guestexec
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
type ElevatedProvisioner interface {
|
||||
Communicator() packersdk.Communicator
|
||||
ElevatedUser() string
|
||||
ElevatedPassword() string
|
||||
}
|
||||
|
||||
type elevatedOptions struct {
|
||||
User string
|
||||
Password string
|
||||
TaskName string
|
||||
TaskDescription string
|
||||
LogFile string
|
||||
XMLEscapedCommand string
|
||||
ScriptFile string
|
||||
}
|
||||
|
||||
var psEscape = strings.NewReplacer(
|
||||
"$", "`$",
|
||||
"\"", "`\"",
|
||||
"`", "``",
|
||||
"'", "`'",
|
||||
)
|
||||
|
||||
var elevatedTemplate = template.Must(template.New("ElevatedCommand").Parse(`
|
||||
$name = "{{.TaskName}}"
|
||||
$log = [System.Environment]::ExpandEnvironmentVariables("{{.LogFile}}")
|
||||
$s = New-Object -ComObject "Schedule.Service"
|
||||
$s.Connect()
|
||||
$t = $s.NewTask($null)
|
||||
$xml = [xml]@'
|
||||
<?xml version="1.0" encoding="UTF-16"?>
|
||||
<Task version="1.2" xmlns="http://schemas.microsoft.com/windows/2004/02/mit/task">
|
||||
<RegistrationInfo>
|
||||
<Description>{{.TaskDescription}}</Description>
|
||||
</RegistrationInfo>
|
||||
<Principals>
|
||||
<Principal id="Author">
|
||||
<UserId>{{.User}}</UserId>
|
||||
<LogonType>Password</LogonType>
|
||||
<RunLevel>HighestAvailable</RunLevel>
|
||||
</Principal>
|
||||
</Principals>
|
||||
<Settings>
|
||||
<MultipleInstancesPolicy>IgnoreNew</MultipleInstancesPolicy>
|
||||
<DisallowStartIfOnBatteries>false</DisallowStartIfOnBatteries>
|
||||
<StopIfGoingOnBatteries>false</StopIfGoingOnBatteries>
|
||||
<AllowHardTerminate>true</AllowHardTerminate>
|
||||
<StartWhenAvailable>false</StartWhenAvailable>
|
||||
<RunOnlyIfNetworkAvailable>false</RunOnlyIfNetworkAvailable>
|
||||
<IdleSettings>
|
||||
<StopOnIdleEnd>false</StopOnIdleEnd>
|
||||
<RestartOnIdle>false</RestartOnIdle>
|
||||
</IdleSettings>
|
||||
<AllowStartOnDemand>true</AllowStartOnDemand>
|
||||
<Enabled>true</Enabled>
|
||||
<Hidden>false</Hidden>
|
||||
<RunOnlyIfIdle>false</RunOnlyIfIdle>
|
||||
<WakeToRun>false</WakeToRun>
|
||||
<ExecutionTimeLimit>PT0S</ExecutionTimeLimit>
|
||||
<Priority>4</Priority>
|
||||
</Settings>
|
||||
<Actions Context="Author">
|
||||
<Exec>
|
||||
<Command>cmd</Command>
|
||||
<Arguments>/c {{.XMLEscapedCommand}}</Arguments>
|
||||
</Exec>
|
||||
</Actions>
|
||||
</Task>
|
||||
'@
|
||||
$logon_type = 1
|
||||
$password = "{{.Password}}"
|
||||
if ($password.Length -eq 0) {
|
||||
$logon_type = 5
|
||||
$password = $null
|
||||
$ns = New-Object System.Xml.XmlNamespaceManager($xml.NameTable)
|
||||
$ns.AddNamespace("ns", $xml.DocumentElement.NamespaceURI)
|
||||
$node = $xml.SelectSingleNode("/ns:Task/ns:Principals/ns:Principal/ns:LogonType", $ns)
|
||||
$node.ParentNode.RemoveChild($node) | Out-Null
|
||||
}
|
||||
$t.XmlText = $xml.OuterXml
|
||||
if (Test-Path variable:global:ProgressPreference){$ProgressPreference="SilentlyContinue"}
|
||||
$f = $s.GetFolder("\")
|
||||
$f.RegisterTaskDefinition($name, $t, 6, "{{.User}}", $password, $logon_type, $null) | Out-Null
|
||||
$t = $f.GetTask("\$name")
|
||||
$t.Run($null) | Out-Null
|
||||
$timeout = 10
|
||||
$sec = 0
|
||||
while ((!($t.state -eq 4)) -and ($sec -lt $timeout)) {
|
||||
Start-Sleep -s 1
|
||||
$sec++
|
||||
}
|
||||
|
||||
$line = 0
|
||||
do {
|
||||
Start-Sleep -m 100
|
||||
if (Test-Path $log) {
|
||||
Get-Content $log | select -skip $line | ForEach {
|
||||
$line += 1
|
||||
Write-Output "$_"
|
||||
}
|
||||
}
|
||||
} while (!($t.state -eq 3))
|
||||
$result = $t.LastTaskResult
|
||||
if (Test-Path $log) {
|
||||
Remove-Item $log -Force -ErrorAction SilentlyContinue | Out-Null
|
||||
}
|
||||
|
||||
$script = [System.Environment]::ExpandEnvironmentVariables("{{.ScriptFile}}")
|
||||
if (Test-Path $script) {
|
||||
Remove-Item $script -Force -ErrorAction SilentlyContinue | Out-Null
|
||||
}
|
||||
$f = $s.GetFolder("\")
|
||||
$f.DeleteTask("\$name", "")
|
||||
|
||||
[System.Runtime.Interopservices.Marshal]::ReleaseComObject($s) | Out-Null
|
||||
exit $result`))
|
||||
|
||||
func GenerateElevatedRunner(command string, p ElevatedProvisioner) (uploadedPath string, err error) {
|
||||
log.Printf("Building elevated command wrapper for: %s", command)
|
||||
|
||||
var buffer bytes.Buffer
|
||||
|
||||
// Output from the elevated command cannot be returned directly to the
|
||||
// Packer console. In order to be able to view output from elevated
|
||||
// commands and scripts an indirect approach is used by which the commands
|
||||
// output is first redirected to file. The output file is then 'watched'
|
||||
// by Packer while the elevated command is running and any content
|
||||
// appearing in the file is written out to the console. Below the portion
|
||||
// of command required to redirect output from the command to file is
|
||||
// built and appended to the existing command string
|
||||
taskName := fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
|
||||
// Only use %ENVVAR% format for environment variables when setting the log
|
||||
// file path; Do NOT use $env:ENVVAR format as it won't be expanded
|
||||
// correctly in the elevatedTemplate
|
||||
logFile := `%SYSTEMROOT%/Temp/` + taskName + ".out"
|
||||
command += fmt.Sprintf(" > %s 2>&1", logFile)
|
||||
|
||||
// elevatedTemplate wraps the command in a single quoted XML text string
|
||||
// so we need to escape characters considered 'special' in XML.
|
||||
err = xml.EscapeText(&buffer, []byte(command))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error escaping characters special to XML in command %s: %s", command, err)
|
||||
}
|
||||
escapedCommand := buffer.String()
|
||||
log.Printf("Command [%s] converted to [%s] for use in XML string", command, escapedCommand)
|
||||
buffer.Reset()
|
||||
|
||||
// Escape chars special to PowerShell in the ElevatedUser string
|
||||
elevatedUser := p.ElevatedUser()
|
||||
escapedElevatedUser := psEscape.Replace(elevatedUser)
|
||||
if escapedElevatedUser != elevatedUser {
|
||||
log.Printf("Elevated user %s converted to %s after escaping chars special to PowerShell",
|
||||
elevatedUser, escapedElevatedUser)
|
||||
}
|
||||
|
||||
// Escape chars special to PowerShell in the ElevatedPassword string
|
||||
elevatedPassword := p.ElevatedPassword()
|
||||
escapedElevatedPassword := psEscape.Replace(elevatedPassword)
|
||||
if escapedElevatedPassword != elevatedPassword {
|
||||
log.Printf("Elevated password %s converted to %s after escaping chars special to PowerShell",
|
||||
elevatedPassword, escapedElevatedPassword)
|
||||
}
|
||||
|
||||
uuid := uuid.TimeOrderedUUID()
|
||||
path := fmt.Sprintf(`C:/Windows/Temp/packer-elevated-shell-%s.ps1`, uuid)
|
||||
|
||||
// Generate command
|
||||
err = elevatedTemplate.Execute(&buffer, elevatedOptions{
|
||||
User: escapedElevatedUser,
|
||||
Password: escapedElevatedPassword,
|
||||
TaskName: taskName,
|
||||
TaskDescription: "Packer elevated task",
|
||||
ScriptFile: path,
|
||||
LogFile: logFile,
|
||||
XMLEscapedCommand: escapedCommand,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
fmt.Printf("Error creating elevated template: %s", err)
|
||||
return "", err
|
||||
}
|
||||
log.Printf("Uploading elevated shell wrapper for command [%s] to [%s]", command, path)
|
||||
err = p.Communicator().Upload(path, &buffer, nil)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error preparing elevated powershell script: %s", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("powershell -executionpolicy bypass -file \"%s\"", path), err
|
||||
}
|
86
vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/guest_commands.go
generated
vendored
Normal file
86
vendor/github.com/hashicorp/packer-plugin-sdk/guestexec/guest_commands.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package guestexec
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const UnixOSType = "unix"
|
||||
const WindowsOSType = "windows"
|
||||
const DefaultOSType = UnixOSType
|
||||
|
||||
type guestOSTypeCommand struct {
|
||||
chmod string
|
||||
mkdir string
|
||||
removeDir string
|
||||
statPath string
|
||||
mv string
|
||||
}
|
||||
|
||||
var guestOSTypeCommands = map[string]guestOSTypeCommand{
|
||||
UnixOSType: {
|
||||
chmod: "chmod %s '%s'",
|
||||
mkdir: "mkdir -p '%s'",
|
||||
removeDir: "rm -rf '%s'",
|
||||
statPath: "stat '%s'",
|
||||
mv: "mv '%s' '%s'",
|
||||
},
|
||||
WindowsOSType: {
|
||||
chmod: "echo 'skipping chmod %s %s'", // no-op
|
||||
mkdir: "powershell.exe -Command \"New-Item -ItemType directory -Force -ErrorAction SilentlyContinue -Path %s\"",
|
||||
removeDir: "powershell.exe -Command \"rm %s -recurse -force\"",
|
||||
statPath: "powershell.exe -Command { if (test-path %s) { exit 0 } else { exit 1 } }",
|
||||
mv: "powershell.exe -Command \"mv %s %s -force\"",
|
||||
},
|
||||
}
|
||||
|
||||
type GuestCommands struct {
|
||||
GuestOSType string
|
||||
Sudo bool
|
||||
}
|
||||
|
||||
func NewGuestCommands(osType string, sudo bool) (*GuestCommands, error) {
|
||||
_, ok := guestOSTypeCommands[osType]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid osType: \"%s\"", osType)
|
||||
}
|
||||
return &GuestCommands{GuestOSType: osType, Sudo: sudo}, nil
|
||||
}
|
||||
|
||||
func (g *GuestCommands) Chmod(path string, mode string) string {
|
||||
return g.sudo(fmt.Sprintf(g.commands().chmod, mode, g.escapePath(path)))
|
||||
}
|
||||
|
||||
func (g *GuestCommands) CreateDir(path string) string {
|
||||
return g.sudo(fmt.Sprintf(g.commands().mkdir, g.escapePath(path)))
|
||||
}
|
||||
|
||||
func (g *GuestCommands) RemoveDir(path string) string {
|
||||
return g.sudo(fmt.Sprintf(g.commands().removeDir, g.escapePath(path)))
|
||||
}
|
||||
|
||||
func (g *GuestCommands) commands() guestOSTypeCommand {
|
||||
return guestOSTypeCommands[g.GuestOSType]
|
||||
}
|
||||
|
||||
func (g *GuestCommands) escapePath(path string) string {
|
||||
if g.GuestOSType == WindowsOSType {
|
||||
return strings.Replace(path, " ", "` ", -1)
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func (g *GuestCommands) StatPath(path string) string {
|
||||
return g.sudo(fmt.Sprintf(g.commands().statPath, g.escapePath(path)))
|
||||
}
|
||||
|
||||
func (g *GuestCommands) MovePath(srcPath string, dstPath string) string {
|
||||
return g.sudo(fmt.Sprintf(g.commands().mv, g.escapePath(srcPath), g.escapePath(dstPath)))
|
||||
}
|
||||
|
||||
func (g *GuestCommands) sudo(cmd string) string {
|
||||
if g.GuestOSType == UnixOSType && g.Sudo {
|
||||
return "sudo " + cmd
|
||||
}
|
||||
return cmd
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Unmarshal is wrapper around json.Unmarshal that returns user-friendly
|
||||
// errors when there are syntax errors.
|
||||
func Unmarshal(data []byte, i interface{}) error {
|
||||
err := json.Unmarshal(data, i)
|
||||
if err != nil {
|
||||
syntaxErr, ok := err.(*json.SyntaxError)
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
|
||||
// We have a syntax error. Extract out the line number and friends.
|
||||
// https://groups.google.com/forum/#!topic/golang-nuts/fizimmXtVfc
|
||||
newline := []byte{'\x0a'}
|
||||
|
||||
// Calculate the start/end position of the line where the error is
|
||||
start := bytes.LastIndex(data[:syntaxErr.Offset], newline) + 1
|
||||
end := len(data)
|
||||
if idx := bytes.Index(data[start:], newline); idx >= 0 {
|
||||
end = start + idx
|
||||
}
|
||||
|
||||
// Count the line number we're on plus the offset in the line
|
||||
line := bytes.Count(data[:start], newline) + 1
|
||||
pos := int(syntaxErr.Offset) - start - 1
|
||||
|
||||
err = fmt.Errorf("Error in line %d, char %d: %s\n%s",
|
||||
line, pos, syntaxErr, data[start:end])
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2014-2015 Dylan Meissner
|
||||
Copyright (c) 2013 Mitchell Hashimoto
|
||||
|
||||
MIT License
|
||||
|
82
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/basic_runner.go
generated
vendored
Normal file
82
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/basic_runner.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
package multistep
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type runState int32
|
||||
|
||||
const (
|
||||
stateIdle runState = iota
|
||||
stateRunning
|
||||
stateCancelling
|
||||
)
|
||||
|
||||
// BasicRunner is a Runner that just runs the given slice of steps.
|
||||
type BasicRunner struct {
|
||||
// Steps is a slice of steps to run. Once set, this should _not_ be
|
||||
// modified.
|
||||
Steps []Step
|
||||
|
||||
l sync.Mutex
|
||||
state runState
|
||||
}
|
||||
|
||||
func (b *BasicRunner) Run(ctx context.Context, state StateBag) {
|
||||
|
||||
b.l.Lock()
|
||||
if b.state != stateIdle {
|
||||
panic("already running")
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
b.state = stateRunning
|
||||
b.l.Unlock()
|
||||
|
||||
defer func() {
|
||||
b.l.Lock()
|
||||
b.state = stateIdle
|
||||
close(doneCh)
|
||||
b.l.Unlock()
|
||||
}()
|
||||
|
||||
// This goroutine listens for cancels and puts the StateCancelled key
|
||||
// as quickly as possible into the state bag to mark it.
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
state.Put(StateCancelled, true)
|
||||
case <-doneCh:
|
||||
}
|
||||
}()
|
||||
|
||||
for _, step := range b.Steps {
|
||||
if step == nil {
|
||||
continue
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
state.Put(StateCancelled, true)
|
||||
break
|
||||
}
|
||||
// We also check for cancellation here since we can't be sure
|
||||
// the goroutine that is running to set it actually ran.
|
||||
if runState(atomic.LoadInt32((*int32)(&b.state))) == stateCancelling {
|
||||
state.Put(StateCancelled, true)
|
||||
break
|
||||
}
|
||||
|
||||
action := step.Run(ctx, state)
|
||||
defer step.Cleanup(state)
|
||||
|
||||
if _, ok := state.GetOk(StateCancelled); ok {
|
||||
break
|
||||
}
|
||||
|
||||
if action == ActionHalt {
|
||||
state.Put(StateHalted, true)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
15
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/doc.go
generated
vendored
Normal file
15
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
The commonsteps package contains the multistep runner that comprises the main
|
||||
architectural convention of Packer builder plugins. It enables builders
|
||||
to respect global Packer flags like "on-error" and "debug". It also contains
|
||||
a selection of convenience "multistep" steps that perform globally relevant
|
||||
tasks that many or most builders will want to implement -- for example,
|
||||
launching Packer's internal HTTP server for serving files to the instance.
|
||||
|
||||
It also provides step_provision, which contains the hooks necessary for allowing
|
||||
provisioners to run inside your builder.
|
||||
|
||||
While it is possible to create a simple builder without using the multistep
|
||||
runner or step_provision, your builder will lack core Packer functionality.
|
||||
*/
|
||||
package commonsteps
|
99
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/extra_iso_config.go
generated
vendored
Normal file
99
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/extra_iso_config.go
generated
vendored
Normal file
|
@ -0,0 +1,99 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package commonsteps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// An iso (CD) containing custom files can be made available for your build.
|
||||
//
|
||||
// By default, no extra CD will be attached. All files listed in this setting
|
||||
// get placed into the root directory of the CD and the CD is attached as the
|
||||
// second CD device.
|
||||
//
|
||||
// This config exists to work around modern operating systems that have no
|
||||
// way to mount floppy disks, which was our previous go-to for adding files at
|
||||
// boot time.
|
||||
type CDConfig struct {
|
||||
// A list of files to place onto a CD that is attached when the VM is
|
||||
// booted. This can include either files or directories; any directories
|
||||
// will be copied onto the CD recursively, preserving directory structure
|
||||
// hierarchy. Symlinks will have the link's target copied into the directory
|
||||
// tree on the CD where the symlink was. File globbing is allowed.
|
||||
//
|
||||
// Usage example (JSON):
|
||||
//
|
||||
// ```json
|
||||
// "cd_files": ["./somedirectory/meta-data", "./somedirectory/user-data"],
|
||||
// "cd_label": "cidata",
|
||||
// ```
|
||||
//
|
||||
// Usage example (HCL):
|
||||
//
|
||||
// ```hcl
|
||||
// cd_files = ["./somedirectory/meta-data", "./somedirectory/user-data"]
|
||||
// cd_label = "cidata"
|
||||
// ```
|
||||
//
|
||||
// The above will create a CD with two files, user-data and meta-data in the
|
||||
// CD root. This specific example is how you would create a CD that can be
|
||||
// used for an Ubuntu 20.04 autoinstall.
|
||||
//
|
||||
// Since globbing is also supported,
|
||||
//
|
||||
// ```hcl
|
||||
// cd_files = ["./somedirectory/*"]
|
||||
// cd_label = "cidata"
|
||||
// ```
|
||||
//
|
||||
// Would also be an acceptable way to define the above cd. The difference
|
||||
// between providing the directory with or without the glob is whether the
|
||||
// directory itself or its contents will be at the CD root.
|
||||
//
|
||||
// Use of this option assumes that you have a command line tool installed
|
||||
// that can handle the iso creation. Packer will use one of the following
|
||||
// tools:
|
||||
//
|
||||
// * xorriso
|
||||
// * mkisofs
|
||||
// * hdiutil (normally found in macOS)
|
||||
// * oscdimg (normally found in Windows as part of the Windows ADK)
|
||||
CDFiles []string `mapstructure:"cd_files"`
|
||||
CDLabel string `mapstructure:"cd_label"`
|
||||
}
|
||||
|
||||
func (c *CDConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
var err error
|
||||
|
||||
if c.CDFiles == nil {
|
||||
c.CDFiles = make([]string, 0)
|
||||
}
|
||||
|
||||
// Create new file list based on globbing.
|
||||
var files []string
|
||||
for _, path := range c.CDFiles {
|
||||
if strings.ContainsAny(path, "*?[") {
|
||||
var globbedFiles []string
|
||||
globbedFiles, err = filepath.Glob(path)
|
||||
if len(globbedFiles) > 0 {
|
||||
files = append(files, globbedFiles...)
|
||||
}
|
||||
} else {
|
||||
_, err = os.Stat(path)
|
||||
files = append(files, path)
|
||||
}
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("Bad CD disk file '%s': %s", path, err))
|
||||
}
|
||||
c.CDFiles = files
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
74
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/floppy_config.go
generated
vendored
Normal file
74
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/floppy_config.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package commonsteps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// A floppy can be made available for your build. This is most useful for
|
||||
// unattended Windows installs, which look for an Autounattend.xml file on
|
||||
// removable media. By default, no floppy will be attached. All files listed in
|
||||
// this setting get placed into the root directory of the floppy and the floppy
|
||||
// is attached as the first floppy device. The summary size of the listed files
|
||||
// must not exceed 1.44 MB. The supported ways to move large files into the OS
|
||||
// are using `http_directory` or [the file
|
||||
// provisioner](/docs/provisioners/file).
|
||||
type FloppyConfig struct {
|
||||
// A list of files to place onto a floppy disk that is attached when the VM
|
||||
// is booted. Currently, no support exists for creating sub-directories on
|
||||
// the floppy. Wildcard characters (\\*, ?, and \[\]) are allowed. Directory
|
||||
// names are also allowed, which will add all the files found in the
|
||||
// directory to the floppy.
|
||||
FloppyFiles []string `mapstructure:"floppy_files"`
|
||||
// A list of directories to place onto the floppy disk recursively. This is
|
||||
// similar to the `floppy_files` option except that the directory structure
|
||||
// is preserved. This is useful for when your floppy disk includes drivers
|
||||
// or if you just want to organize it's contents as a hierarchy. Wildcard
|
||||
// characters (\\*, ?, and \[\]) are allowed. The maximum summary size of
|
||||
// all files in the listed directories are the same as in `floppy_files`.
|
||||
FloppyDirectories []string `mapstructure:"floppy_dirs"`
|
||||
FloppyLabel string `mapstructure:"floppy_label"`
|
||||
}
|
||||
|
||||
func (c *FloppyConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
var err error
|
||||
|
||||
if c.FloppyFiles == nil {
|
||||
c.FloppyFiles = make([]string, 0)
|
||||
}
|
||||
|
||||
for _, path := range c.FloppyFiles {
|
||||
if strings.ContainsAny(path, "*?[") {
|
||||
_, err = filepath.Glob(path)
|
||||
} else {
|
||||
_, err = os.Stat(path)
|
||||
}
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("Bad Floppy disk file '%s': %s", path, err))
|
||||
}
|
||||
}
|
||||
|
||||
if c.FloppyDirectories == nil {
|
||||
c.FloppyDirectories = make([]string, 0)
|
||||
}
|
||||
|
||||
for _, path := range c.FloppyDirectories {
|
||||
if strings.ContainsAny(path, "*?[") {
|
||||
_, err = filepath.Glob(path)
|
||||
} else {
|
||||
_, err = os.Stat(path)
|
||||
}
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("Bad Floppy disk directory '%s': %s", path, err))
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
70
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/http_config.go
generated
vendored
Normal file
70
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/http_config.go
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package commonsteps
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// Packer will create an http server serving `http_directory` when it is set, a
|
||||
// random free port will be selected and the architecture of the directory
|
||||
// referenced will be available in your builder.
|
||||
//
|
||||
// Example usage from a builder:
|
||||
//
|
||||
// `wget http://{{ .HTTPIP }}:{{ .HTTPPort }}/foo/bar/preseed.cfg`
|
||||
type HTTPConfig struct {
|
||||
// Path to a directory to serve using an HTTP server. The files in this
|
||||
// directory will be available over HTTP that will be requestable from the
|
||||
// virtual machine. This is useful for hosting kickstart files and so on.
|
||||
// By default this is an empty string, which means no HTTP server will be
|
||||
// started. The address and port of the HTTP server will be available as
|
||||
// variables in `boot_command`. This is covered in more detail below.
|
||||
HTTPDir string `mapstructure:"http_directory"`
|
||||
// These are the minimum and maximum port to use for the HTTP server
|
||||
// started to serve the `http_directory`. Because Packer often runs in
|
||||
// parallel, Packer will choose a randomly available port in this range to
|
||||
// run the HTTP server. If you want to force the HTTP server to be on one
|
||||
// port, make this minimum and maximum port the same. By default the values
|
||||
// are `8000` and `9000`, respectively.
|
||||
HTTPPortMin int `mapstructure:"http_port_min"`
|
||||
HTTPPortMax int `mapstructure:"http_port_max"`
|
||||
// This is the bind address for the HTTP server. Defaults to 0.0.0.0 so that
|
||||
// it will work with any network interface.
|
||||
HTTPAddress string `mapstructure:"http_bind_address"`
|
||||
// This is the bind interface for the HTTP server. Defaults to the first
|
||||
// interface with a non-loopback address. Either `http_bind_address` or
|
||||
// `http_interface` can be specified.
|
||||
HTTPInterface string `mapstructure:"http_interface" undocumented:"true"`
|
||||
}
|
||||
|
||||
func (c *HTTPConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
// Validation
|
||||
var errs []error
|
||||
|
||||
if c.HTTPPortMin == 0 {
|
||||
c.HTTPPortMin = 8000
|
||||
}
|
||||
|
||||
if c.HTTPPortMax == 0 {
|
||||
c.HTTPPortMax = 9000
|
||||
}
|
||||
|
||||
if c.HTTPAddress == "" {
|
||||
c.HTTPAddress = "0.0.0.0"
|
||||
}
|
||||
|
||||
if c.HTTPPortMin > c.HTTPPortMax {
|
||||
errs = append(errs,
|
||||
errors.New("http_port_min must be less than http_port_max"))
|
||||
}
|
||||
|
||||
if c.HTTPInterface != "" && c.HTTPAddress == "0.0.0.0" {
|
||||
errs = append(errs,
|
||||
errors.New("either http_interface of http_bind_address can be specified"))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
193
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/iso_config.go
generated
vendored
Normal file
193
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/iso_config.go
generated
vendored
Normal file
|
@ -0,0 +1,193 @@
|
|||
//go:generate struct-markdown
|
||||
|
||||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
getter "github.com/hashicorp/go-getter/v2"
|
||||
urlhelper "github.com/hashicorp/go-getter/v2/helper/url"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
// By default, Packer will symlink, download or copy image files to the Packer
|
||||
// cache into a "`hash($iso_url+$iso_checksum).$iso_target_extension`" file.
|
||||
// Packer uses [hashicorp/go-getter](https://github.com/hashicorp/go-getter) in
|
||||
// file mode in order to perform a download.
|
||||
//
|
||||
// go-getter supports the following protocols:
|
||||
//
|
||||
// * Local files
|
||||
// * Git
|
||||
// * Mercurial
|
||||
// * HTTP
|
||||
// * Amazon S3
|
||||
//
|
||||
// Examples:
|
||||
// go-getter can guess the checksum type based on `iso_checksum` length, and it is
|
||||
// also possible to specify the checksum type.
|
||||
//
|
||||
// In JSON:
|
||||
//
|
||||
// ```json
|
||||
// "iso_checksum": "946a6077af6f5f95a51f82fdc44051c7aa19f9cfc5f737954845a6050543d7c2",
|
||||
// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
|
||||
// ```
|
||||
//
|
||||
// ```json
|
||||
// "iso_checksum": "file:ubuntu.org/..../ubuntu-14.04.1-server-amd64.iso.sum",
|
||||
// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
|
||||
// ```
|
||||
//
|
||||
// ```json
|
||||
// "iso_checksum": "file://./shasums.txt",
|
||||
// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
|
||||
// ```
|
||||
//
|
||||
// ```json
|
||||
// "iso_checksum": "file:./shasums.txt",
|
||||
// "iso_url": "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
|
||||
// ```
|
||||
//
|
||||
// In HCL2:
|
||||
//
|
||||
// ```hcl
|
||||
// iso_checksum = "946a6077af6f5f95a51f82fdc44051c7aa19f9cfc5f737954845a6050543d7c2"
|
||||
// iso_url = "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
|
||||
// ```
|
||||
//
|
||||
// ```hcl
|
||||
// iso_checksum = "file:ubuntu.org/..../ubuntu-14.04.1-server-amd64.iso.sum"
|
||||
// iso_url = "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
|
||||
// ```
|
||||
//
|
||||
// ```hcl
|
||||
// iso_checksum = "file://./shasums.txt"
|
||||
// iso_url = "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
|
||||
// ```
|
||||
//
|
||||
// ```hcl
|
||||
// iso_checksum = "file:./shasums.txt",
|
||||
// iso_url = "ubuntu.org/.../ubuntu-14.04.1-server-amd64.iso"
|
||||
// ```
|
||||
//
|
||||
type ISOConfig struct {
|
||||
// The checksum for the ISO file or virtual hard drive file. The type of
|
||||
// the checksum is specified within the checksum field as a prefix, ex:
|
||||
// "md5:{$checksum}". The type of the checksum can also be omitted and
|
||||
// Packer will try to infer it based on string length. Valid values are
|
||||
// "none", "{$checksum}", "md5:{$checksum}", "sha1:{$checksum}",
|
||||
// "sha256:{$checksum}", "sha512:{$checksum}" or "file:{$path}". Here is a
|
||||
// list of valid checksum values:
|
||||
// * md5:090992ba9fd140077b0661cb75f7ce13
|
||||
// * 090992ba9fd140077b0661cb75f7ce13
|
||||
// * sha1:ebfb681885ddf1234c18094a45bbeafd91467911
|
||||
// * ebfb681885ddf1234c18094a45bbeafd91467911
|
||||
// * sha256:ed363350696a726b7932db864dda019bd2017365c9e299627830f06954643f93
|
||||
// * ed363350696a726b7932db864dda019bd2017365c9e299627830f06954643f93
|
||||
// * file:http://releases.ubuntu.com/20.04/MD5SUMS
|
||||
// * file:file://./local/path/file.sum
|
||||
// * file:./local/path/file.sum
|
||||
// * none
|
||||
// Although the checksum will not be verified when it is set to "none",
|
||||
// this is not recommended since these files can be very large and
|
||||
// corruption does happen from time to time.
|
||||
ISOChecksum string `mapstructure:"iso_checksum" required:"true"`
|
||||
// A URL to the ISO containing the installation image or virtual hard drive
|
||||
// (VHD or VHDX) file to clone.
|
||||
RawSingleISOUrl string `mapstructure:"iso_url" required:"true"`
|
||||
// Multiple URLs for the ISO to download. Packer will try these in order.
|
||||
// If anything goes wrong attempting to download or while downloading a
|
||||
// single URL, it will move on to the next. All URLs must point to the same
|
||||
// file (same checksum). By default this is empty and `iso_url` is used.
|
||||
// Only one of `iso_url` or `iso_urls` can be specified.
|
||||
ISOUrls []string `mapstructure:"iso_urls"`
|
||||
// The path where the iso should be saved after download. By default will
|
||||
// go in the packer cache, with a hash of the original filename and
|
||||
// checksum as its name.
|
||||
TargetPath string `mapstructure:"iso_target_path"`
|
||||
// The extension of the iso file after download. This defaults to `iso`.
|
||||
TargetExtension string `mapstructure:"iso_target_extension"`
|
||||
}
|
||||
|
||||
func (c *ISOConfig) Prepare(*interpolate.Context) (warnings []string, errs []error) {
|
||||
if len(c.ISOUrls) != 0 && c.RawSingleISOUrl != "" {
|
||||
errs = append(
|
||||
errs, errors.New("Only one of iso_url or iso_urls must be specified"))
|
||||
return
|
||||
}
|
||||
|
||||
if c.RawSingleISOUrl != "" {
|
||||
// make sure only array is set
|
||||
c.ISOUrls = append([]string{c.RawSingleISOUrl}, c.ISOUrls...)
|
||||
c.RawSingleISOUrl = ""
|
||||
}
|
||||
|
||||
if len(c.ISOUrls) == 0 {
|
||||
errs = append(
|
||||
errs, errors.New("One of iso_url or iso_urls must be specified"))
|
||||
return
|
||||
}
|
||||
if c.TargetExtension == "" {
|
||||
c.TargetExtension = "iso"
|
||||
}
|
||||
c.TargetExtension = strings.ToLower(c.TargetExtension)
|
||||
|
||||
// Warnings
|
||||
if c.ISOChecksum == "none" {
|
||||
warnings = append(warnings,
|
||||
"A checksum of 'none' was specified. Since ISO files are so big,\n"+
|
||||
"a checksum is highly recommended.")
|
||||
return warnings, errs
|
||||
} else if c.ISOChecksum == "" {
|
||||
errs = append(errs, fmt.Errorf("A checksum must be specified"))
|
||||
} else {
|
||||
// ESX5Driver.VerifyChecksum is ran remotely but should not download a
|
||||
// checksum file, therefore in case it is a file, we need to download
|
||||
// it now and compute the checksum now, we transform it back to a
|
||||
// checksum string so that it can be simply read in the VerifyChecksum.
|
||||
//
|
||||
// Doing this also has the added benefit of failing early if a checksum
|
||||
// is incorrect or if getting it should fail.
|
||||
u, err := urlhelper.Parse(c.ISOUrls[0])
|
||||
if err != nil {
|
||||
return warnings, append(errs, fmt.Errorf("url parse: %s", err))
|
||||
}
|
||||
|
||||
q := u.Query()
|
||||
if c.ISOChecksum != "" {
|
||||
q.Set("checksum", c.ISOChecksum)
|
||||
}
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Printf("Getwd: %v", err)
|
||||
// here we ignore the error in case the
|
||||
// working directory is not needed.
|
||||
}
|
||||
|
||||
req := &getter.Request{
|
||||
Src: u.String(),
|
||||
Pwd: wd,
|
||||
}
|
||||
cksum, err := defaultGetterClient.GetChecksum(context.TODO(), req)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("%v in %q", err, req.URL().Query().Get("checksum")))
|
||||
} else {
|
||||
c.ISOChecksum = cksum.String()
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasSuffix(strings.ToLower(c.ISOChecksum), ".iso") {
|
||||
errs = append(errs, fmt.Errorf("Error parsing checksum:"+
|
||||
" .iso is not a valid checksum ending"))
|
||||
}
|
||||
|
||||
return warnings, errs
|
||||
}
|
51
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_debug.go
generated
vendored
Normal file
51
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_debug.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// MultistepDebugFn will return a proper multistep.DebugPauseFn to
|
||||
// use for debugging if you're using multistep in your builder.
|
||||
func MultistepDebugFn(ui packersdk.Ui) multistep.DebugPauseFn {
|
||||
return func(loc multistep.DebugLocation, name string, state multistep.StateBag) {
|
||||
var locationString string
|
||||
switch loc {
|
||||
case multistep.DebugLocationAfterRun:
|
||||
locationString = "after run of"
|
||||
case multistep.DebugLocationBeforeCleanup:
|
||||
locationString = "before cleanup of"
|
||||
default:
|
||||
locationString = "at"
|
||||
}
|
||||
|
||||
message := fmt.Sprintf(
|
||||
"Pausing %s step '%s'. Press enter to continue.",
|
||||
locationString, name)
|
||||
|
||||
result := make(chan string, 1)
|
||||
go func() {
|
||||
line, err := ui.Ask(message)
|
||||
if err != nil {
|
||||
log.Printf("Error asking for input: %s", err)
|
||||
}
|
||||
|
||||
result <- line
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-result:
|
||||
return
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
if _, ok := state.GetOk(multistep.StateCancelled); ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
219
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_runner.go
generated
vendored
Normal file
219
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/multistep_runner.go
generated
vendored
Normal file
|
@ -0,0 +1,219 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func newRunner(steps []multistep.Step, config common.PackerConfig, ui packersdk.Ui) (multistep.Runner, multistep.DebugPauseFn) {
|
||||
switch config.PackerOnError {
|
||||
case "", "cleanup":
|
||||
case "abort":
|
||||
for i, step := range steps {
|
||||
steps[i] = abortStep{
|
||||
step: step,
|
||||
cleanupProv: false,
|
||||
ui: ui,
|
||||
}
|
||||
}
|
||||
case "ask":
|
||||
for i, step := range steps {
|
||||
steps[i] = askStep{step, ui}
|
||||
}
|
||||
case "run-cleanup-provisioner":
|
||||
for i, step := range steps {
|
||||
steps[i] = abortStep{
|
||||
step: step,
|
||||
cleanupProv: true,
|
||||
ui: ui,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config.PackerDebug {
|
||||
pauseFn := MultistepDebugFn(ui)
|
||||
return &multistep.DebugRunner{Steps: steps, PauseFn: pauseFn}, pauseFn
|
||||
} else {
|
||||
return &multistep.BasicRunner{Steps: steps}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewRunner returns a multistep.Runner that runs steps augmented with support
|
||||
// for -debug and -on-error command line arguments.
|
||||
func NewRunner(steps []multistep.Step, config common.PackerConfig, ui packersdk.Ui) multistep.Runner {
|
||||
runner, _ := newRunner(steps, config, ui)
|
||||
return runner
|
||||
}
|
||||
|
||||
// NewRunnerWithPauseFn returns a multistep.Runner that runs steps augmented
|
||||
// with support for -debug and -on-error command line arguments. With -debug it
|
||||
// puts the multistep.DebugPauseFn that will pause execution between steps into
|
||||
// the state under the key "pauseFn".
|
||||
func NewRunnerWithPauseFn(steps []multistep.Step, config common.PackerConfig, ui packersdk.Ui, state multistep.StateBag) multistep.Runner {
|
||||
runner, pauseFn := newRunner(steps, config, ui)
|
||||
if pauseFn != nil {
|
||||
state.Put("pauseFn", pauseFn)
|
||||
}
|
||||
return runner
|
||||
}
|
||||
|
||||
func typeName(i interface{}) string {
|
||||
return reflect.Indirect(reflect.ValueOf(i)).Type().Name()
|
||||
}
|
||||
|
||||
type abortStep struct {
|
||||
step multistep.Step
|
||||
cleanupProv bool
|
||||
ui packersdk.Ui
|
||||
}
|
||||
|
||||
func (s abortStep) InnerStepName() string {
|
||||
return typeName(s.step)
|
||||
}
|
||||
|
||||
func (s abortStep) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
return s.step.Run(ctx, state)
|
||||
}
|
||||
|
||||
func (s abortStep) Cleanup(state multistep.StateBag) {
|
||||
if s.InnerStepName() == typeName(StepProvision{}) && s.cleanupProv {
|
||||
s.step.Cleanup(state)
|
||||
return
|
||||
}
|
||||
|
||||
shouldCleanup := handleAbortsAndInterupts(state, s.ui, typeName(s.step))
|
||||
if !shouldCleanup {
|
||||
return
|
||||
}
|
||||
s.step.Cleanup(state)
|
||||
}
|
||||
|
||||
type askStep struct {
|
||||
step multistep.Step
|
||||
ui packersdk.Ui
|
||||
}
|
||||
|
||||
func (s askStep) InnerStepName() string {
|
||||
return typeName(s.step)
|
||||
}
|
||||
|
||||
func (s askStep) Run(ctx context.Context, state multistep.StateBag) (action multistep.StepAction) {
|
||||
for {
|
||||
action = s.step.Run(ctx, state)
|
||||
|
||||
if action != multistep.ActionHalt {
|
||||
return
|
||||
}
|
||||
|
||||
err, ok := state.GetOk("error")
|
||||
if ok {
|
||||
s.ui.Error(fmt.Sprintf("%s", err))
|
||||
}
|
||||
|
||||
switch ask(s.ui, typeName(s.step), state) {
|
||||
case askCleanup:
|
||||
return
|
||||
case askAbort:
|
||||
state.Put("aborted", true)
|
||||
return
|
||||
case askRetry:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s askStep) Cleanup(state multistep.StateBag) {
|
||||
if _, ok := state.GetOk("aborted"); ok {
|
||||
shouldCleanup := handleAbortsAndInterupts(state, s.ui, typeName(s.step))
|
||||
if !shouldCleanup {
|
||||
return
|
||||
}
|
||||
}
|
||||
s.step.Cleanup(state)
|
||||
}
|
||||
|
||||
type askResponse int
|
||||
|
||||
const (
|
||||
askCleanup askResponse = iota
|
||||
askAbort
|
||||
askRetry
|
||||
)
|
||||
|
||||
func ask(ui packersdk.Ui, name string, state multistep.StateBag) askResponse {
|
||||
ui.Say(fmt.Sprintf("Step %q failed", name))
|
||||
|
||||
result := make(chan askResponse)
|
||||
go func() {
|
||||
result <- askPrompt(ui)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case response := <-result:
|
||||
return response
|
||||
case <-time.After(100 * time.Millisecond):
|
||||
if _, ok := state.GetOk(multistep.StateCancelled); ok {
|
||||
return askCleanup
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func askPrompt(ui packersdk.Ui) askResponse {
|
||||
for {
|
||||
line, err := ui.Ask("[c] Clean up and exit, [a] abort without cleanup, or [r] retry step (build may fail even if retry succeeds)?")
|
||||
if err != nil {
|
||||
log.Printf("Error asking for input: %s", err)
|
||||
}
|
||||
|
||||
input := strings.ToLower(line) + "c"
|
||||
switch input[0] {
|
||||
case 'c':
|
||||
return askCleanup
|
||||
case 'a':
|
||||
return askAbort
|
||||
case 'r':
|
||||
return askRetry
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Incorrect input: %#v", line))
|
||||
}
|
||||
}
|
||||
|
||||
func handleAbortsAndInterupts(state multistep.StateBag, ui packersdk.Ui, stepName string) bool {
|
||||
// if returns false, don't run cleanup. If true, do run cleanup.
|
||||
_, alreadyLogged := state.GetOk("abort_step_logged")
|
||||
|
||||
err, ok := state.GetOk("error")
|
||||
if ok && !alreadyLogged {
|
||||
ui.Error(fmt.Sprintf("%s", err))
|
||||
state.Put("abort_step_logged", true)
|
||||
}
|
||||
if _, ok := state.GetOk(multistep.StateCancelled); ok {
|
||||
if !alreadyLogged {
|
||||
ui.Error("Interrupted, aborting...")
|
||||
state.Put("abort_step_logged", true)
|
||||
} else {
|
||||
ui.Error(fmt.Sprintf("aborted: skipping cleanup of step %q", stepName))
|
||||
}
|
||||
return false
|
||||
}
|
||||
if _, ok := state.GetOk(multistep.StateHalted); ok {
|
||||
if !alreadyLogged {
|
||||
ui.Error(fmt.Sprintf("Step %q failed, aborting...", stepName))
|
||||
state.Put("abort_step_logged", true)
|
||||
} else {
|
||||
ui.Error(fmt.Sprintf("aborted: skipping cleanup of step %q", stepName))
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
72
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_cleanup_temp_keys.go
generated
vendored
Normal file
72
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_cleanup_temp_keys.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type StepCleanupTempKeys struct {
|
||||
Comm *communicator.Config
|
||||
}
|
||||
|
||||
func (s *StepCleanupTempKeys) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
// This step is mostly cosmetic; Packer deletes the ephemeral keys anyway
|
||||
// so there's no realistic situation where these keys can cause issues.
|
||||
// However, it's nice to clean up after yourself.
|
||||
|
||||
if !s.Comm.SSHClearAuthorizedKeys {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Comm.Type != "ssh" {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Comm.SSHTemporaryKeyPairName == "" {
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
cmd := new(packersdk.RemoteCmd)
|
||||
|
||||
ui.Say("Trying to remove ephemeral keys from authorized_keys files")
|
||||
|
||||
// Per the OpenSSH manual (https://man.openbsd.org/sshd.8), a typical
|
||||
// line in the 'authorized_keys' file contains several fields that
|
||||
// are delimited by spaces. Here is an (abbreviated) example of a line:
|
||||
// ssh-rsa AAAAB3Nza...LiPk== user@example.net
|
||||
//
|
||||
// In the above example, 'ssh-rsa' is the key pair type,
|
||||
// 'AAAAB3Nza...LiPk==' is the base64 encoded public key,
|
||||
// and 'user@example.net' is a comment (in this case, describing
|
||||
// who the key belongs to).
|
||||
//
|
||||
// In the following 'sed' calls, the comment field will be equal to
|
||||
// the value of communicator.Config.SSHTemporaryKeyPairName.
|
||||
// We can remove an authorized public key using 'sed' by looking
|
||||
// for a line ending in ' packer-key-pair-comment' (note the
|
||||
// leading space).
|
||||
//
|
||||
// TODO: Why create a backup file if you are going to remove it?
|
||||
cmd.Command = fmt.Sprintf("sed -i.bak '/ %s$/d' ~/.ssh/authorized_keys; rm ~/.ssh/authorized_keys.bak", s.Comm.SSHTemporaryKeyPairName)
|
||||
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
|
||||
log.Printf("Error cleaning up ~/.ssh/authorized_keys; please clean up keys manually: %s", err)
|
||||
}
|
||||
cmd = new(packersdk.RemoteCmd)
|
||||
cmd.Command = fmt.Sprintf("sudo sed -i.bak '/ %s$/d' /root/.ssh/authorized_keys; sudo rm /root/.ssh/authorized_keys.bak", s.Comm.SSHTemporaryKeyPairName)
|
||||
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
|
||||
log.Printf("Error cleaning up /root/.ssh/authorized_keys; please clean up keys manually: %s", err)
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCleanupTempKeys) Cleanup(state multistep.StateBag) {
|
||||
}
|
294
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_cdrom.go
generated
vendored
Normal file
294
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_cdrom.go
generated
vendored
Normal file
|
@ -0,0 +1,294 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/shell-local/localexec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/tmp"
|
||||
)
|
||||
|
||||
// StepCreateCD will create a CD disk with the given files.
|
||||
type StepCreateCD struct {
|
||||
// Files can be either files or directories. Any files provided here will
|
||||
// be written to the root of the CD. Directories will be written to the
|
||||
// root of the CD as well, but will retain their subdirectory structure.
|
||||
Files []string
|
||||
Label string
|
||||
|
||||
CDPath string
|
||||
|
||||
filesAdded map[string]bool
|
||||
}
|
||||
|
||||
func (s *StepCreateCD) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
if len(s.Files) == 0 {
|
||||
log.Println("No CD files specified. CD disk will not be made.")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
ui.Say("Creating CD disk...")
|
||||
|
||||
if s.Label == "" {
|
||||
s.Label = "packer"
|
||||
} else {
|
||||
log.Printf("CD label is set to %s", s.Label)
|
||||
}
|
||||
|
||||
// Track what files are added. Used for testing step.
|
||||
s.filesAdded = make(map[string]bool)
|
||||
|
||||
// Create a temporary file to be our CD drive
|
||||
CDF, err := tmp.File("packer*.iso")
|
||||
// Set the path so we can remove it later
|
||||
CDPath := CDF.Name()
|
||||
CDF.Close()
|
||||
os.Remove(CDPath)
|
||||
if err != nil {
|
||||
state.Put("error",
|
||||
fmt.Errorf("Error creating temporary file for CD: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
log.Printf("CD path: %s", CDPath)
|
||||
s.CDPath = CDPath
|
||||
|
||||
// Consolidate all files provided into a single directory to become our
|
||||
// "root" directory.
|
||||
rootFolder, err := tmp.Dir("packer_to_cdrom")
|
||||
if err != nil {
|
||||
state.Put("error",
|
||||
fmt.Errorf("Error creating temporary file for CD: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
for _, toAdd := range s.Files {
|
||||
err = s.AddFile(rootFolder, toAdd)
|
||||
if err != nil {
|
||||
state.Put("error",
|
||||
fmt.Errorf("Error creating temporary file for CD: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
|
||||
cmd, err := retrieveCDISOCreationCommand(s.Label, rootFolder, CDPath)
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
err = localexec.RunAndStream(cmd, ui, []string{})
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Message("Done copying paths from CD_dirs")
|
||||
|
||||
// Set the path to the CD so it can be used later
|
||||
state.Put("cd_path", CDPath)
|
||||
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateCD) Cleanup(multistep.StateBag) {
|
||||
if s.CDPath != "" {
|
||||
log.Printf("Deleting CD disk: %s", s.CDPath)
|
||||
os.Remove(s.CDPath)
|
||||
}
|
||||
}
|
||||
|
||||
type cdISOCreationCommand struct {
|
||||
Name string
|
||||
Command func(path string, label string, source string, dest string) *exec.Cmd
|
||||
}
|
||||
|
||||
var supportedCDISOCreationCommands []cdISOCreationCommand = []cdISOCreationCommand{
|
||||
{
|
||||
"xorriso", func(path string, label string, source string, dest string) *exec.Cmd {
|
||||
return exec.Command(
|
||||
path,
|
||||
"-as", "genisoimage",
|
||||
"-rock",
|
||||
"-joliet",
|
||||
"-volid", label,
|
||||
"-output", dest,
|
||||
source)
|
||||
},
|
||||
},
|
||||
{
|
||||
"mkisofs", func(path string, label string, source string, dest string) *exec.Cmd {
|
||||
return exec.Command(
|
||||
path,
|
||||
"-joliet",
|
||||
"-volid", label,
|
||||
"-o", dest,
|
||||
source)
|
||||
},
|
||||
},
|
||||
{
|
||||
"hdiutil", func(path string, label string, source string, dest string) *exec.Cmd {
|
||||
return exec.Command(
|
||||
path,
|
||||
"makehybrid",
|
||||
"-o", dest,
|
||||
"-hfs",
|
||||
"-joliet",
|
||||
"-iso",
|
||||
"-default-volume-name", label,
|
||||
source)
|
||||
},
|
||||
},
|
||||
{
|
||||
"oscdimg", func(path string, label string, source string, dest string) *exec.Cmd {
|
||||
return exec.Command(
|
||||
path,
|
||||
"-j1",
|
||||
"-o",
|
||||
"-m",
|
||||
"-l"+label,
|
||||
source,
|
||||
dest)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func isCygwinExecutable(path string) bool {
|
||||
return runtime.GOOS == "windows" && strings.Contains(path, "\\usr\\bin\\")
|
||||
}
|
||||
|
||||
func toCygwinPath(path string) (string, error) {
|
||||
c := exec.Command("cygpath", path)
|
||||
cygwinPath, err := c.Output()
|
||||
return strings.TrimSpace(string(cygwinPath)), err
|
||||
}
|
||||
|
||||
func retrieveCDISOCreationCommand(label string, source string, dest string) (*exec.Cmd, error) {
|
||||
for _, c := range supportedCDISOCreationCommands {
|
||||
path, err := exec.LookPath(c.Name)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// if we are running a cygwin/msys2 executable we must convert the
|
||||
// native win32 path to a cygwin/msys2/unix style path.
|
||||
if isCygwinExecutable(path) {
|
||||
source, err = toCygwinPath(source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dest, err = toCygwinPath(dest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return c.Command(path, label, source, dest), nil
|
||||
}
|
||||
var commands = make([]string, 0, len(supportedCDISOCreationCommands))
|
||||
for _, c := range supportedCDISOCreationCommands {
|
||||
commands = append(commands, c.Name)
|
||||
}
|
||||
return nil, fmt.Errorf(
|
||||
"could not find a supported CD ISO creation command (the supported commands are: %s)",
|
||||
strings.Join(commands, ", "))
|
||||
}
|
||||
|
||||
func (s *StepCreateCD) AddFile(dst, src string) error {
|
||||
finfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error adding path to CD: %s", err)
|
||||
}
|
||||
|
||||
// add a file
|
||||
if !finfo.IsDir() {
|
||||
inputF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer inputF.Close()
|
||||
|
||||
// Create a new file in the root directory
|
||||
dest, err := os.Create(filepath.Join(dst, finfo.Name()))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error opening file for copy %s to CD root", src)
|
||||
}
|
||||
defer dest.Close()
|
||||
nBytes, err := io.Copy(dest, inputF)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to CD root", src)
|
||||
}
|
||||
s.filesAdded[src] = true
|
||||
log.Printf("Wrote %d bytes to %s", nBytes, finfo.Name())
|
||||
return err
|
||||
}
|
||||
|
||||
// file is a directory, so we need to parse the filename into a path to
|
||||
// discard and a basename
|
||||
discardPath, _ := filepath.Split(src)
|
||||
|
||||
// Add a directory and its subdirectories
|
||||
visit := func(pathname string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clean up pathing so that we preserve the base directory provided by
|
||||
// the user but not the local pathing to that directory.
|
||||
allDirs, base := filepath.Split(pathname)
|
||||
intermediaryDirs := strings.Replace(allDirs, discardPath, "", 1)
|
||||
|
||||
dstPath := filepath.Join(dst, base)
|
||||
if intermediaryDirs != "" {
|
||||
dstPath = filepath.Join(dst, intermediaryDirs, base)
|
||||
}
|
||||
|
||||
// add a file
|
||||
if !fi.IsDir() {
|
||||
inputF, err := os.Open(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer inputF.Close()
|
||||
|
||||
fileDst, err := os.Create(dstPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error opening file %s on CD: %s", dstPath, err)
|
||||
}
|
||||
defer fileDst.Close()
|
||||
nBytes, err := io.Copy(fileDst, inputF)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error copying %s to CD: %s", dstPath, err)
|
||||
}
|
||||
s.filesAdded[dstPath] = true
|
||||
log.Printf("Wrote %d bytes to %s", nBytes, dstPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if fi.Mode().IsDir() {
|
||||
// create the directory on the CD, continue walk.
|
||||
err := os.MkdirAll(dstPath, fi.Mode())
|
||||
if err != nil {
|
||||
err = fmt.Errorf("error creating new directory %s: %s",
|
||||
dstPath, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return filepath.Walk(src, visit)
|
||||
}
|
427
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_floppy.go
generated
vendored
Normal file
427
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_create_floppy.go
generated
vendored
Normal file
|
@ -0,0 +1,427 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/tmp"
|
||||
"github.com/mitchellh/go-fs"
|
||||
"github.com/mitchellh/go-fs/fat"
|
||||
)
|
||||
|
||||
// StepCreateFloppy will create a floppy disk with the given files.
|
||||
type StepCreateFloppy struct {
|
||||
Files []string
|
||||
Directories []string
|
||||
Label string
|
||||
|
||||
floppyPath string
|
||||
|
||||
FilesAdded map[string]bool
|
||||
}
|
||||
|
||||
func (s *StepCreateFloppy) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
if len(s.Files) == 0 && len(s.Directories) == 0 {
|
||||
log.Println("No floppy files specified. Floppy disk will not be made.")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
if s.Label == "" {
|
||||
s.Label = "packer"
|
||||
} else {
|
||||
log.Printf("Floppy label is set to %s", s.Label)
|
||||
}
|
||||
|
||||
s.FilesAdded = make(map[string]bool)
|
||||
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
ui.Say("Creating floppy disk...")
|
||||
|
||||
// Create a temporary file to be our floppy drive
|
||||
floppyF, err := tmp.File("packer")
|
||||
if err != nil {
|
||||
state.Put("error",
|
||||
fmt.Errorf("Error creating temporary file for floppy: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
defer floppyF.Close()
|
||||
|
||||
// Set the path so we can remove it later
|
||||
s.floppyPath = floppyF.Name()
|
||||
|
||||
log.Printf("Floppy path: %s", s.floppyPath)
|
||||
|
||||
// Set the size of the file to be a floppy sized
|
||||
if err := floppyF.Truncate(1440 * 1024); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating floppy: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// BlockDevice backed by the file for our filesystem
|
||||
log.Println("Initializing block device backed by temporary file")
|
||||
device, err := fs.NewFileDisk(floppyF)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating floppy: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Format the block device so it contains a valid FAT filesystem
|
||||
log.Println("Formatting the block device with a FAT filesystem...")
|
||||
formatConfig := &fat.SuperFloppyConfig{
|
||||
FATType: fat.FAT12,
|
||||
Label: s.Label,
|
||||
OEMName: s.Label,
|
||||
}
|
||||
if err := fat.FormatSuperFloppy(device, formatConfig); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating floppy: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// The actual FAT filesystem
|
||||
log.Println("Initializing FAT filesystem on block device")
|
||||
fatFs, err := fat.New(device)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating floppy: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Get the root directory to the filesystem and create a cache for any directories within
|
||||
log.Println("Reading the root directory from the filesystem")
|
||||
rootDir, err := fatFs.RootDir()
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error creating floppy: %s", err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
cache := fsDirectoryCache(rootDir)
|
||||
|
||||
// Utility functions for walking through a directory grabbing all files flatly
|
||||
globFiles := func(files []string, list chan string) {
|
||||
for _, filename := range files {
|
||||
if strings.ContainsAny(filename, "*?[") {
|
||||
matches, _ := filepath.Glob(filename)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
list <- match
|
||||
}
|
||||
continue
|
||||
}
|
||||
list <- filename
|
||||
}
|
||||
close(list)
|
||||
}
|
||||
|
||||
var crawlDirectoryFiles []string
|
||||
crawlDirectory := func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
crawlDirectoryFiles = append(crawlDirectoryFiles, path)
|
||||
ui.Message(fmt.Sprintf("Adding file: %s", path))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
crawlDirectoryFiles = []string{}
|
||||
|
||||
// Collect files and copy them flatly...because floppy_files is broken on purpose.
|
||||
var filelist chan string
|
||||
filelist = make(chan string)
|
||||
go globFiles(s.Files, filelist)
|
||||
|
||||
ui.Message("Copying files flatly from floppy_files")
|
||||
for {
|
||||
filename, ok := <-filelist
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error trying to stat : %s : %s", filename, err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// walk through directory adding files to the root of the fs
|
||||
if finfo.IsDir() {
|
||||
ui.Message(fmt.Sprintf("Copying directory: %s", filename))
|
||||
|
||||
err := filepath.Walk(filename, crawlDirectory)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error adding file from floppy_files : %s : %s", filename, err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
for _, crawlfilename := range crawlDirectoryFiles {
|
||||
if err = s.Add(cache, crawlfilename); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error adding file from floppy_files : %s : %s", filename, err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.FilesAdded[crawlfilename] = true
|
||||
}
|
||||
|
||||
crawlDirectoryFiles = []string{}
|
||||
continue
|
||||
}
|
||||
|
||||
// add just a single file
|
||||
ui.Message(fmt.Sprintf("Copying file: %s", filename))
|
||||
if err = s.Add(cache, filename); err != nil {
|
||||
state.Put("error", fmt.Errorf("Error adding file from floppy_files : %s : %s", filename, err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
s.FilesAdded[filename] = true
|
||||
}
|
||||
ui.Message("Done copying files from floppy_files")
|
||||
|
||||
// Collect all paths (expanding wildcards) into pathqueue
|
||||
ui.Message("Collecting paths from floppy_dirs")
|
||||
var pathqueue []string
|
||||
for _, filename := range s.Directories {
|
||||
if strings.ContainsAny(filename, "*?[") {
|
||||
matches, err := filepath.Glob(filename)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error adding path %s to floppy: %s", filename, err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
for _, filename := range matches {
|
||||
pathqueue = append(pathqueue, filename)
|
||||
}
|
||||
continue
|
||||
}
|
||||
pathqueue = append(pathqueue, filename)
|
||||
}
|
||||
ui.Message(fmt.Sprintf("Resulting paths from floppy_dirs : %v", pathqueue))
|
||||
|
||||
// Go over each path in pathqueue and copy it.
|
||||
for _, src := range pathqueue {
|
||||
ui.Message(fmt.Sprintf("Recursively copying : %s", src))
|
||||
err = s.Add(cache, src)
|
||||
if err != nil {
|
||||
state.Put("error", fmt.Errorf("Error adding path %s to floppy: %s", src, err))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
ui.Message("Done copying paths from floppy_dirs")
|
||||
|
||||
// Set the path to the floppy so it can be used later
|
||||
state.Put("floppy_path", s.floppyPath)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateFloppy) Add(dircache directoryCache, src string) error {
|
||||
finfo, err := os.Stat(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error adding path to floppy: %s", err)
|
||||
}
|
||||
|
||||
// add a file
|
||||
if !finfo.IsDir() {
|
||||
inputF, err := os.Open(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer inputF.Close()
|
||||
|
||||
d, err := dircache("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entry, err := d.AddFile(path.Base(filepath.ToSlash(src)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fatFile, err := entry.File()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(fatFile, inputF)
|
||||
s.FilesAdded[src] = true
|
||||
return err
|
||||
}
|
||||
|
||||
// add a directory and it's subdirectories
|
||||
basedirectory := filepath.Join(src, "..")
|
||||
visit := func(pathname string, fi os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if fi.Mode().IsDir() {
|
||||
base, err := removeBase(basedirectory, pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = dircache(filepath.ToSlash(base))
|
||||
return err
|
||||
}
|
||||
directory, filename := filepath.Split(filepath.ToSlash(pathname))
|
||||
|
||||
base, err := removeBase(basedirectory, filepath.FromSlash(directory))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
inputF, err := os.Open(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer inputF.Close()
|
||||
|
||||
wd, err := dircache(filepath.ToSlash(base))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
entry, err := wd.AddFile(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fatFile, err := entry.File()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = io.Copy(fatFile, inputF)
|
||||
s.FilesAdded[pathname] = true
|
||||
return err
|
||||
}
|
||||
|
||||
return filepath.Walk(src, visit)
|
||||
}
|
||||
|
||||
func (s *StepCreateFloppy) Cleanup(multistep.StateBag) {
|
||||
if s.floppyPath != "" {
|
||||
log.Printf("Deleting floppy disk: %s", s.floppyPath)
|
||||
os.Remove(s.floppyPath)
|
||||
}
|
||||
}
|
||||
|
||||
// removeBase will take a regular os.PathSeparator-separated path and remove the
|
||||
// prefix directory base from it. Both paths are converted to their absolute
|
||||
// formats before the stripping takes place.
|
||||
func removeBase(base string, path string) (string, error) {
|
||||
var idx int
|
||||
var err error
|
||||
|
||||
if res, err := filepath.Abs(path); err == nil {
|
||||
path = res
|
||||
}
|
||||
path = filepath.Clean(path)
|
||||
|
||||
if base, err = filepath.Abs(base); err != nil {
|
||||
return path, err
|
||||
}
|
||||
|
||||
c1, c2 := strings.Split(base, string(os.PathSeparator)), strings.Split(path, string(os.PathSeparator))
|
||||
for idx = 0; idx < len(c1); idx++ {
|
||||
if len(c1[idx]) == 0 && len(c2[idx]) != 0 {
|
||||
break
|
||||
}
|
||||
if c1[idx] != c2[idx] {
|
||||
return "", fmt.Errorf("Path %s is not prefixed by Base %s", path, base)
|
||||
}
|
||||
}
|
||||
return strings.Join(c2[idx:], string(os.PathSeparator)), nil
|
||||
}
|
||||
|
||||
// fsDirectoryCache returns a function that can be used to grab the fs.Directory
|
||||
// entry associated with a given path. If an fs.Directory entry is not found
|
||||
// then it will be created relative to the rootDirectory argument that is
|
||||
// passed.
|
||||
type directoryCache func(string) (fs.Directory, error)
|
||||
|
||||
func fsDirectoryCache(rootDirectory fs.Directory) directoryCache {
|
||||
var cache map[string]fs.Directory
|
||||
|
||||
cache = make(map[string]fs.Directory)
|
||||
cache[""] = rootDirectory
|
||||
|
||||
Input, Output, Error := make(chan string), make(chan fs.Directory), make(chan error)
|
||||
go func(Error chan error) {
|
||||
for {
|
||||
input := <-Input
|
||||
if len(input) > 0 {
|
||||
input = path.Clean(input)
|
||||
}
|
||||
|
||||
// found a directory, so yield it
|
||||
res, ok := cache[input]
|
||||
if ok {
|
||||
Output <- res
|
||||
continue
|
||||
}
|
||||
component := strings.Split(input, "/")
|
||||
|
||||
// directory not cached, so start at the root and walk each component
|
||||
// creating them if they're not in cache
|
||||
var entry fs.Directory
|
||||
for i := range component {
|
||||
|
||||
// join all of our components into a key
|
||||
path := strings.Join(component[:i], "/")
|
||||
|
||||
// check if parent directory is cached
|
||||
res, ok = cache[path]
|
||||
if !ok {
|
||||
// add directory into cache
|
||||
directory, err := entry.AddDirectory(component[i-1])
|
||||
if err != nil {
|
||||
Error <- err
|
||||
continue
|
||||
}
|
||||
res, err = directory.Dir()
|
||||
if err != nil {
|
||||
Error <- err
|
||||
continue
|
||||
}
|
||||
cache[path] = res
|
||||
}
|
||||
// cool, found a directory
|
||||
entry = res
|
||||
}
|
||||
|
||||
// finally create our directory
|
||||
directory, err := entry.AddDirectory(component[len(component)-1])
|
||||
if err != nil {
|
||||
Error <- err
|
||||
continue
|
||||
}
|
||||
res, err = directory.Dir()
|
||||
if err != nil {
|
||||
Error <- err
|
||||
continue
|
||||
}
|
||||
cache[input] = res
|
||||
|
||||
// ..and yield it
|
||||
Output <- entry
|
||||
}
|
||||
}(Error)
|
||||
|
||||
getFilesystemDirectory := func(input string) (fs.Directory, error) {
|
||||
Input <- input
|
||||
select {
|
||||
case res := <-Output:
|
||||
return res, nil
|
||||
case err := <-Error:
|
||||
return *new(fs.Directory), err
|
||||
}
|
||||
}
|
||||
return getFilesystemDirectory
|
||||
}
|
238
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_download.go
generated
vendored
Normal file
238
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_download.go
generated
vendored
Normal file
|
@ -0,0 +1,238 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
gcs "github.com/hashicorp/go-getter/gcs/v2"
|
||||
s3 "github.com/hashicorp/go-getter/s3/v2"
|
||||
getter "github.com/hashicorp/go-getter/v2"
|
||||
urlhelper "github.com/hashicorp/go-getter/v2/helper/url"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/filelock"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepDownload downloads a remote file using the download client within
|
||||
// this package. This step handles setting up the download configuration,
|
||||
// progress reporting, interrupt handling, etc.
|
||||
//
|
||||
// Uses:
|
||||
// cache packer.Cache
|
||||
// ui packersdk.Ui
|
||||
type StepDownload struct {
|
||||
// The checksum and the type of the checksum for the download
|
||||
Checksum string
|
||||
|
||||
// A short description of the type of download being done. Example:
|
||||
// "ISO" or "Guest Additions"
|
||||
Description string
|
||||
|
||||
// The name of the key where the final path of the ISO will be put
|
||||
// into the state.
|
||||
ResultKey string
|
||||
|
||||
// The path where the result should go, otherwise it goes to the
|
||||
// cache directory.
|
||||
TargetPath string
|
||||
|
||||
// A list of URLs to attempt to download this thing.
|
||||
Url []string
|
||||
|
||||
// Extension is the extension to force for the file that is downloaded.
|
||||
// Some systems require a certain extension. If this isn't set, the
|
||||
// extension on the URL is used. Otherwise, this will be forced
|
||||
// on the downloaded file for every URL.
|
||||
Extension string
|
||||
}
|
||||
|
||||
var defaultGetterClient = getter.Client{
|
||||
Getters: getter.Getters,
|
||||
}
|
||||
|
||||
func init() {
|
||||
defaultGetterClient.Getters = append(defaultGetterClient.Getters, new(gcs.Getter))
|
||||
defaultGetterClient.Getters = append(defaultGetterClient.Getters, new(s3.Getter))
|
||||
}
|
||||
|
||||
func (s *StepDownload) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
if len(s.Url) == 0 {
|
||||
log.Printf("No URLs were provided to Step Download. Continuing...")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
defer log.Printf("Leaving retrieve loop for %s", s.Description)
|
||||
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
ui.Say(fmt.Sprintf("Retrieving %s", s.Description))
|
||||
|
||||
var errs []error
|
||||
|
||||
for _, source := range s.Url {
|
||||
if ctx.Err() != nil {
|
||||
state.Put("error", fmt.Errorf("Download cancelled: %v", errs))
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
ui.Say(fmt.Sprintf("Trying %s", source))
|
||||
var err error
|
||||
var dst string
|
||||
if s.Description == "OVF/OVA" && strings.HasSuffix(source, ".ovf") {
|
||||
// TODO(adrien): make go-getter allow using files in place.
|
||||
// ovf files usually point to a file in the same directory, so
|
||||
// using them in place is the only way.
|
||||
ui.Say(fmt.Sprintf("Using ovf inplace"))
|
||||
dst = source
|
||||
} else {
|
||||
dst, err = s.download(ctx, ui, source)
|
||||
}
|
||||
if err == nil {
|
||||
state.Put(s.ResultKey, dst)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
// may be another url will work
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
err := fmt.Errorf("error downloading %s: %v", s.Description, errs)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
func (s *StepDownload) UseSourceToFindCacheTarget(source string) (*url.URL, string, error) {
|
||||
u, err := parseSourceURL(source)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("url parse: %s", err)
|
||||
}
|
||||
if checksum := u.Query().Get("checksum"); checksum != "" {
|
||||
s.Checksum = checksum
|
||||
}
|
||||
if s.Checksum != "" && s.Checksum != "none" {
|
||||
// add checksum to url query params as go getter will checksum for us
|
||||
q := u.Query()
|
||||
q.Set("checksum", s.Checksum)
|
||||
u.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
// store file under sha1(hash) if set
|
||||
// hash can sometimes be a checksum url
|
||||
// otherwise, use sha1(source_url)
|
||||
var shaSum [20]byte
|
||||
if s.Checksum != "" && s.Checksum != "none" {
|
||||
shaSum = sha1.Sum([]byte(s.Checksum))
|
||||
} else {
|
||||
shaSum = sha1.Sum([]byte(u.String()))
|
||||
}
|
||||
shaSumString := hex.EncodeToString(shaSum[:])
|
||||
|
||||
targetPath := s.TargetPath
|
||||
if targetPath == "" {
|
||||
targetPath = shaSumString
|
||||
if s.Extension != "" {
|
||||
targetPath += "." + s.Extension
|
||||
}
|
||||
targetPath, err = packersdk.CachePath(targetPath)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("CachePath: %s", err)
|
||||
}
|
||||
} else if filepath.Ext(targetPath) == "" {
|
||||
// When an absolute path is provided
|
||||
// this adds the file to the targetPath
|
||||
if !strings.HasSuffix(targetPath, "/") {
|
||||
targetPath += "/"
|
||||
}
|
||||
targetPath += shaSumString
|
||||
if s.Extension != "" {
|
||||
targetPath += "." + s.Extension
|
||||
} else {
|
||||
targetPath += ".iso"
|
||||
}
|
||||
}
|
||||
return u, targetPath, nil
|
||||
}
|
||||
|
||||
func (s *StepDownload) download(ctx context.Context, ui packersdk.Ui, source string) (string, error) {
|
||||
u, targetPath, err := s.UseSourceToFindCacheTarget(source)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
lockFile := targetPath + ".lock"
|
||||
|
||||
log.Printf("Acquiring lock for: %s (%s)", u.String(), lockFile)
|
||||
lock := filelock.New(lockFile)
|
||||
lock.Lock()
|
||||
defer lock.Unlock()
|
||||
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Printf("get working directory: %v", err)
|
||||
// here we ignore the error in case the
|
||||
// working directory is not needed.
|
||||
// It would be better if the go-getter
|
||||
// could guess it only in cases it is
|
||||
// necessary.
|
||||
}
|
||||
src := u.String()
|
||||
if u.Scheme == "" || strings.ToLower(u.Scheme) == "file" {
|
||||
// If a local filepath, then we need to preprocess to make sure the
|
||||
// path doens't have any multiple successive path separators; if it
|
||||
// does, go-getter will read this as a specialized go-getter-specific
|
||||
// subdirectory command, which it most likely isn't.
|
||||
src = filepath.Clean(u.String())
|
||||
if _, err := os.Stat(filepath.Clean(u.Path)); err != nil {
|
||||
// Cleaned path isn't present on system so it must be some other
|
||||
// scheme. Don't error right away; see if go-getter can figure it
|
||||
// out.
|
||||
src = u.String()
|
||||
}
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Trying %s", u.String()))
|
||||
req := &getter.Request{
|
||||
Dst: targetPath,
|
||||
Src: src,
|
||||
ProgressListener: ui,
|
||||
Pwd: wd,
|
||||
Mode: getter.ModeFile,
|
||||
Inplace: true,
|
||||
}
|
||||
|
||||
switch op, err := defaultGetterClient.Get(ctx, req); err.(type) {
|
||||
case nil: // success !
|
||||
ui.Say(fmt.Sprintf("%s => %s", u.String(), op.Dst))
|
||||
return op.Dst, nil
|
||||
case *getter.ChecksumError:
|
||||
ui.Say(fmt.Sprintf("Checksum did not match, removing %s", targetPath))
|
||||
if err := os.Remove(targetPath); err != nil {
|
||||
ui.Error(fmt.Sprintf("Failed to remove cache file. Please remove manually: %s", targetPath))
|
||||
}
|
||||
return "", err
|
||||
default:
|
||||
ui.Say(fmt.Sprintf("Download failed %s", err))
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
func parseSourceURL(source string) (*url.URL, error) {
|
||||
if runtime.GOOS == "windows" {
|
||||
// Check that the user specified a UNC path, and promote it to an smb:// uri.
|
||||
if strings.HasPrefix(source, "\\\\") && len(source) > 2 && source[2] != '?' {
|
||||
source = filepath.ToSlash(source[2:])
|
||||
source = fmt.Sprintf("smb://%s", source)
|
||||
}
|
||||
}
|
||||
|
||||
u, err := urlhelper.Parse(source)
|
||||
return u, err
|
||||
}
|
||||
|
||||
func (s *StepDownload) Cleanup(multistep.StateBag) {}
|
75
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_http_server.go
generated
vendored
Normal file
75
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_http_server.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"net/http"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/net"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// This step creates and runs the HTTP server that is serving files from the
|
||||
// directory specified by the 'http_directory` configuration parameter in the
|
||||
// template.
|
||||
//
|
||||
// Uses:
|
||||
// ui packersdk.Ui
|
||||
//
|
||||
// Produces:
|
||||
// http_port int - The port the HTTP server started on.
|
||||
type StepHTTPServer struct {
|
||||
HTTPDir string
|
||||
HTTPPortMin int
|
||||
HTTPPortMax int
|
||||
HTTPAddress string
|
||||
|
||||
l *net.Listener
|
||||
}
|
||||
|
||||
func (s *StepHTTPServer) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if s.HTTPDir == "" {
|
||||
state.Put("http_port", 0)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Find an available TCP port for our HTTP server
|
||||
var httpAddr string
|
||||
var err error
|
||||
s.l, err = net.ListenRangeConfig{
|
||||
Min: s.HTTPPortMin,
|
||||
Max: s.HTTPPortMax,
|
||||
Addr: s.HTTPAddress,
|
||||
Network: "tcp",
|
||||
}.Listen(ctx)
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error finding port: %s", err)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Starting HTTP server on port %d", s.l.Port))
|
||||
|
||||
// Start the HTTP server and run it in the background
|
||||
fileServer := http.FileServer(http.Dir(s.HTTPDir))
|
||||
server := &http.Server{Addr: httpAddr, Handler: fileServer}
|
||||
go server.Serve(s.l)
|
||||
|
||||
// Save the address into the state so it can be accessed in the future
|
||||
state.Put("http_port", s.l.Port)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepHTTPServer) Cleanup(multistep.StateBag) {
|
||||
if s.l != nil {
|
||||
// Close the listener so that the HTTP server stops
|
||||
s.l.Close()
|
||||
}
|
||||
}
|
86
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_output_dir.go
generated
vendored
Normal file
86
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_output_dir.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepOutputDir sets up the output directory by creating it if it does
|
||||
// not exist, deleting it if it does exist and we're forcing, and cleaning
|
||||
// it up when we're done with it.
|
||||
type StepOutputDir struct {
|
||||
Force bool
|
||||
Path string
|
||||
|
||||
cleanup bool
|
||||
}
|
||||
|
||||
func (s *StepOutputDir) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if _, err := os.Stat(s.Path); err == nil {
|
||||
if !s.Force {
|
||||
err := fmt.Errorf(
|
||||
"Output directory exists: %s\n\n"+
|
||||
"Use the force flag to delete it prior to building.",
|
||||
s.Path)
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
ui.Say("Deleting previous output directory...")
|
||||
os.RemoveAll(s.Path)
|
||||
}
|
||||
|
||||
// Enable cleanup
|
||||
s.cleanup = true
|
||||
|
||||
// Create the directory
|
||||
if err := os.MkdirAll(s.Path, 0755); err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
// Make sure we can write in the directory
|
||||
f, err := os.Create(filepath.Join(s.Path, "_packer_perm_check"))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Couldn't write to output directory: %s", err)
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
f.Close()
|
||||
os.Remove(f.Name())
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepOutputDir) Cleanup(state multistep.StateBag) {
|
||||
if !s.cleanup {
|
||||
return
|
||||
}
|
||||
|
||||
_, cancelled := state.GetOk(multistep.StateCancelled)
|
||||
_, halted := state.GetOk(multistep.StateHalted)
|
||||
|
||||
if cancelled || halted {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Deleting output directory...")
|
||||
for i := 0; i < 5; i++ {
|
||||
err := os.RemoveAll(s.Path)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
log.Printf("Error removing output dir: %s", err)
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
172
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_provision.go
generated
vendored
Normal file
172
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps/step_provision.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
package commonsteps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
// StepProvision runs the provisioners.
|
||||
//
|
||||
// Uses:
|
||||
// communicator packersdk.Communicator
|
||||
// hook packersdk.Hook
|
||||
// ui packersdk.Ui
|
||||
//
|
||||
// Produces:
|
||||
// <nothing>
|
||||
|
||||
const HttpIPNotImplemented = "ERR_HTTP_IP_NOT_IMPLEMENTED_BY_BUILDER"
|
||||
const HttpPortNotImplemented = "ERR_HTTP_PORT_NOT_IMPLEMENTED_BY_BUILDER"
|
||||
const HttpAddrNotImplemented = "ERR_HTTP_ADDR_NOT_IMPLEMENTED_BY_BUILDER"
|
||||
|
||||
func PopulateProvisionHookData(state multistep.StateBag) map[string]interface{} {
|
||||
hookData := make(map[string]interface{})
|
||||
|
||||
// Load Builder hook data from state, if it has been set.
|
||||
hd, ok := state.GetOk("generated_data")
|
||||
if ok {
|
||||
hookData = hd.(map[string]interface{})
|
||||
}
|
||||
|
||||
// Warn user that the id isn't implemented
|
||||
hookData["ID"] = "ERR_ID_NOT_IMPLEMENTED_BY_BUILDER"
|
||||
|
||||
// instance_id is placed in state by the builders.
|
||||
// Not yet implemented in Chroot, lxc/lxd, Azure, Qemu.
|
||||
// Implemented in most others including digitalOcean (droplet id),
|
||||
// docker (container_id), and clouds which use "server" internally instead
|
||||
// of instance.
|
||||
id, ok := state.GetOk("instance_id")
|
||||
if ok {
|
||||
hookData["ID"] = id
|
||||
}
|
||||
|
||||
hookData["PackerRunUUID"] = os.Getenv("PACKER_RUN_UUID")
|
||||
|
||||
// Packer HTTP info
|
||||
hookData["PackerHTTPIP"] = HttpIPNotImplemented
|
||||
hookData["PackerHTTPPort"] = HttpPortNotImplemented
|
||||
hookData["PackerHTTPAddr"] = HttpAddrNotImplemented
|
||||
|
||||
httpPort, okPort := state.GetOk("http_port")
|
||||
if okPort {
|
||||
hookData["PackerHTTPPort"] = strconv.Itoa(httpPort.(int))
|
||||
}
|
||||
httIP, okIP := state.GetOk("http_ip")
|
||||
if okIP {
|
||||
hookData["PackerHTTPIP"] = httIP.(string)
|
||||
}
|
||||
if okPort && okIP {
|
||||
hookData["PackerHTTPAddr"] = fmt.Sprintf("%s:%s", hookData["PackerHTTPIP"], hookData["PackerHTTPPort"])
|
||||
}
|
||||
|
||||
// Read communicator data into hook data
|
||||
comm, ok := state.GetOk("communicator_config")
|
||||
if !ok {
|
||||
log.Printf("Unable to load communicator config from state to populate provisionHookData")
|
||||
return hookData
|
||||
}
|
||||
commConf := comm.(*communicator.Config)
|
||||
|
||||
// Loop over all field values and retrieve them from the ssh config
|
||||
hookData["Host"] = commConf.Host()
|
||||
hookData["Port"] = commConf.Port()
|
||||
hookData["User"] = commConf.User()
|
||||
hookData["Password"] = commConf.Password()
|
||||
hookData["ConnType"] = commConf.Type
|
||||
hookData["SSHPublicKey"] = string(commConf.SSHPublicKey)
|
||||
hookData["SSHPrivateKey"] = string(commConf.SSHPrivateKey)
|
||||
hookData["SSHPrivateKeyFile"] = commConf.SSHPrivateKeyFile
|
||||
hookData["SSHAgentAuth"] = commConf.SSHAgentAuth
|
||||
|
||||
// Backwards compatibility; in practice, WinRMPassword is fulfilled by
|
||||
// Password.
|
||||
hookData["WinRMPassword"] = commConf.WinRMPassword
|
||||
|
||||
return hookData
|
||||
}
|
||||
|
||||
type StepProvision struct {
|
||||
Comm packersdk.Communicator
|
||||
}
|
||||
|
||||
func (s *StepProvision) runWithHook(ctx context.Context, state multistep.StateBag, hooktype string) multistep.StepAction {
|
||||
// hooktype will be either packersdk.HookProvision or packersdk.HookCleanupProvision
|
||||
comm := s.Comm
|
||||
if comm == nil {
|
||||
raw, ok := state.Get("communicator").(packersdk.Communicator)
|
||||
if ok {
|
||||
comm = raw.(packersdk.Communicator)
|
||||
}
|
||||
}
|
||||
|
||||
hook := state.Get("hook").(packersdk.Hook)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
hookData := PopulateProvisionHookData(state)
|
||||
|
||||
// Update state generated_data with complete hookData
|
||||
// to make them accessible by post-processors
|
||||
state.Put("generated_data", hookData)
|
||||
|
||||
// Run the provisioner in a goroutine so we can continually check
|
||||
// for cancellations...
|
||||
if hooktype == packersdk.HookProvision {
|
||||
log.Println("Running the provision hook")
|
||||
} else if hooktype == packersdk.HookCleanupProvision {
|
||||
ui.Say("Provisioning step had errors: Running the cleanup provisioner, if present...")
|
||||
}
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- hook.Run(ctx, hooktype, ui, comm, hookData)
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
if hooktype == packersdk.HookProvision {
|
||||
// We don't overwrite the error if it's a cleanup
|
||||
// provisioner being run.
|
||||
state.Put("error", err)
|
||||
} else if hooktype == packersdk.HookCleanupProvision {
|
||||
origErr := state.Get("error").(error)
|
||||
state.Put("error", fmt.Errorf("Cleanup failed: %s. "+
|
||||
"Original Provisioning error: %s", err, origErr))
|
||||
}
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
case <-ctx.Done():
|
||||
log.Printf("Cancelling provisioning due to context cancellation: %s", ctx.Err())
|
||||
return multistep.ActionHalt
|
||||
case <-time.After(1 * time.Second):
|
||||
if _, ok := state.GetOk(multistep.StateCancelled); ok {
|
||||
log.Println("Cancelling provisioning due to interrupt...")
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StepProvision) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
return s.runWithHook(ctx, state, packersdk.HookProvision)
|
||||
}
|
||||
|
||||
func (s *StepProvision) Cleanup(state multistep.StateBag) {
|
||||
// We have a "final" provisioner that gets defined by "error-cleanup-provisioner"
|
||||
// which we only call if there's an error during the provision run and
|
||||
// the "error-cleanup-provisioner" is defined.
|
||||
if _, ok := state.GetOk("error"); ok {
|
||||
s.runWithHook(context.Background(), state, packersdk.HookCleanupProvision)
|
||||
}
|
||||
}
|
118
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/debug_runner.go
generated
vendored
Normal file
118
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/debug_runner.go
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
package multistep
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// DebugLocation is the location where the pause is occurring when debugging
|
||||
// a step sequence. "DebugLocationAfterRun" is after the run of the named
|
||||
// step. "DebugLocationBeforeCleanup" is before the cleanup of the named
|
||||
// step.
|
||||
type DebugLocation uint
|
||||
|
||||
const (
|
||||
DebugLocationAfterRun DebugLocation = iota
|
||||
DebugLocationBeforeCleanup
|
||||
)
|
||||
|
||||
// StepWrapper is an interface that wrapped steps can implement to expose their
|
||||
// inner step names to the debug runner.
|
||||
type StepWrapper interface {
|
||||
// InnerStepName should return the human readable name of the wrapped step.
|
||||
InnerStepName() string
|
||||
}
|
||||
|
||||
// DebugPauseFn is the type signature for the function that is called
|
||||
// whenever the DebugRunner pauses. It allows the caller time to
|
||||
// inspect the state of the multi-step sequence at a given step.
|
||||
type DebugPauseFn func(DebugLocation, string, StateBag)
|
||||
|
||||
// DebugRunner is a Runner that runs the given set of steps in order,
|
||||
// but pauses between each step until it is told to continue.
|
||||
type DebugRunner struct {
|
||||
// Steps is the steps to run. These will be run in order.
|
||||
Steps []Step
|
||||
|
||||
// PauseFn is the function that is called whenever the debug runner
|
||||
// pauses. The debug runner continues when this function returns.
|
||||
// The function is given the state so that the state can be inspected.
|
||||
PauseFn DebugPauseFn
|
||||
|
||||
l sync.Mutex
|
||||
runner *BasicRunner
|
||||
}
|
||||
|
||||
func (r *DebugRunner) Run(ctx context.Context, state StateBag) {
|
||||
r.l.Lock()
|
||||
if r.runner != nil {
|
||||
panic("already running")
|
||||
}
|
||||
r.runner = new(BasicRunner)
|
||||
r.l.Unlock()
|
||||
|
||||
pauseFn := r.PauseFn
|
||||
|
||||
// If no PauseFn is specified, use the default
|
||||
if pauseFn == nil {
|
||||
pauseFn = DebugPauseDefault
|
||||
}
|
||||
|
||||
// Rebuild the steps so that we insert the pause step after each
|
||||
steps := make([]Step, len(r.Steps)*2)
|
||||
for i, step := range r.Steps {
|
||||
if step == nil {
|
||||
continue
|
||||
}
|
||||
steps[i*2] = step
|
||||
name := ""
|
||||
if wrapped, ok := step.(StepWrapper); ok {
|
||||
name = wrapped.InnerStepName()
|
||||
} else {
|
||||
name = reflect.Indirect(reflect.ValueOf(step)).Type().Name()
|
||||
}
|
||||
steps[(i*2)+1] = &debugStepPause{
|
||||
name,
|
||||
pauseFn,
|
||||
}
|
||||
}
|
||||
|
||||
// Then just use a basic runner to run it
|
||||
r.runner.Steps = steps
|
||||
r.runner.Run(ctx, state)
|
||||
}
|
||||
|
||||
// DebugPauseDefault is the default pause function when using the
|
||||
// DebugRunner if no PauseFn is specified. It outputs some information
|
||||
// to stderr about the step and waits for keyboard input on stdin before
|
||||
// continuing.
|
||||
func DebugPauseDefault(loc DebugLocation, name string, state StateBag) {
|
||||
var locationString string
|
||||
switch loc {
|
||||
case DebugLocationAfterRun:
|
||||
locationString = "after run of"
|
||||
case DebugLocationBeforeCleanup:
|
||||
locationString = "before cleanup of"
|
||||
}
|
||||
|
||||
fmt.Printf("Pausing %s step '%s'. Press any key to continue.\n", locationString, name)
|
||||
|
||||
var line string
|
||||
fmt.Scanln(&line)
|
||||
}
|
||||
|
||||
type debugStepPause struct {
|
||||
StepName string
|
||||
PauseFn DebugPauseFn
|
||||
}
|
||||
|
||||
func (s *debugStepPause) Run(ctx context.Context, state StateBag) StepAction {
|
||||
s.PauseFn(DebugLocationAfterRun, s.StepName, state)
|
||||
return ActionContinue
|
||||
}
|
||||
|
||||
func (s *debugStepPause) Cleanup(state StateBag) {
|
||||
s.PauseFn(DebugLocationBeforeCleanup, s.StepName, state)
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
multistep is a Go library for building up complex actions using discrete,
|
||||
individual "steps." These steps are strung together and run in sequence
|
||||
to achieve a more complex goal. The runner handles cleanup, cancelling, etc.
|
||||
if necessary.
|
||||
|
||||
## Basic Example
|
||||
|
||||
Make a step to perform some action. The step can access your "state",
|
||||
which is passed between steps by the runner.
|
||||
|
||||
```go
|
||||
type stepAdd struct{}
|
||||
|
||||
func (s *stepAdd) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
// Read our value and assert that it is they type we want
|
||||
value := state.Get("value").(int)
|
||||
fmt.Printf("Value is %d\n", value)
|
||||
|
||||
// Store some state back
|
||||
state.Put("value", value + 1)
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *stepAdd) Cleanup(multistep.StateBag) {
|
||||
// This is called after all the steps have run or if the runner is
|
||||
// cancelled so that cleanup can be performed.
|
||||
}
|
||||
```
|
||||
|
||||
Make a runner and call your array of Steps.
|
||||
|
||||
```go
|
||||
func main() {
|
||||
// Our "bag of state" that we read the value from
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("value", 0)
|
||||
|
||||
steps := []multistep.Step{
|
||||
&stepAdd{},
|
||||
&stepAdd{},
|
||||
&stepAdd{},
|
||||
}
|
||||
|
||||
runner := &multistep.BasicRunner{Steps: steps}
|
||||
|
||||
// Executes the steps
|
||||
runner.Run(context.Background(), state)
|
||||
}
|
||||
```
|
||||
|
||||
This will produce:
|
||||
|
||||
```
|
||||
Value is 0
|
||||
Value is 1
|
||||
Value is 2
|
||||
```
|
||||
*/
|
||||
package multistep
|
|
@ -0,0 +1,9 @@
|
|||
package multistep
|
||||
|
||||
// if returns step only if on is true.
|
||||
func If(on bool, step Step) Step {
|
||||
if on == false {
|
||||
return &nullStep{}
|
||||
}
|
||||
return step
|
||||
}
|
71
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/multistep.go
generated
vendored
Normal file
71
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/multistep.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
// multistep is a library for building up complex actions using individual,
|
||||
// discrete steps.
|
||||
package multistep
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A StepAction determines the next step to take regarding multi-step actions.
|
||||
type StepAction uint
|
||||
|
||||
const (
|
||||
ActionContinue StepAction = iota
|
||||
ActionHalt
|
||||
)
|
||||
|
||||
// Implement the stringer interface; useful for testing.
|
||||
func (a StepAction) String() string {
|
||||
switch a {
|
||||
case ActionContinue:
|
||||
return "ActionContinue"
|
||||
case ActionHalt:
|
||||
return "ActionHalt"
|
||||
default:
|
||||
return "Unexpected value: " + strconv.Itoa(int(a))
|
||||
}
|
||||
}
|
||||
|
||||
// This is the key set in the state bag when using the basic runner to
|
||||
// signal that the step sequence was cancelled.
|
||||
const StateCancelled = "cancelled"
|
||||
|
||||
// This is the key set in the state bag when a step halted the sequence.
|
||||
const StateHalted = "halted"
|
||||
|
||||
// Step is a single step that is part of a potentially large sequence
|
||||
// of other steps, responsible for performing some specific action.
|
||||
type Step interface {
|
||||
// Run is called to perform the action. The passed through context will be
|
||||
// cancelled when the runner is cancelled. The second parameter is a "state
|
||||
// bag" of untyped things. Please be very careful about type-checking the
|
||||
// items in this bag.
|
||||
//
|
||||
// The return value determines whether multi-step sequences continue
|
||||
// or should halt.
|
||||
Run(context.Context, StateBag) StepAction
|
||||
|
||||
// Cleanup is called in reverse order of the steps that have run
|
||||
// and allow steps to clean up after themselves. Do not assume if this
|
||||
// ran that the entire multi-step sequence completed successfully. This
|
||||
// method can be ran in the face of errors and cancellations as well.
|
||||
//
|
||||
// The parameter is the same "state bag" as Run, and represents the
|
||||
// state at the latest possible time prior to calling Cleanup.
|
||||
Cleanup(StateBag)
|
||||
}
|
||||
|
||||
// Runner is a thing that runs one or more steps.
|
||||
type Runner interface {
|
||||
// Run runs the steps with the given initial state.
|
||||
Run(context.Context, StateBag)
|
||||
}
|
||||
|
||||
type nullStep struct{}
|
||||
|
||||
func (s nullStep) Run(ctx context.Context, state StateBag) StepAction {
|
||||
return ActionContinue
|
||||
}
|
||||
|
||||
func (s nullStep) Cleanup(state StateBag) {}
|
52
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/statebag.go
generated
vendored
Normal file
52
vendor/github.com/hashicorp/packer-plugin-sdk/multistep/statebag.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package multistep
|
||||
|
||||
import "sync"
|
||||
|
||||
// Add context to state bag to prevent changing step signature
|
||||
|
||||
// StateBag holds the state that is used by the Runner and Steps. The
|
||||
// StateBag implementation must be safe for concurrent access.
|
||||
type StateBag interface {
|
||||
Get(string) interface{}
|
||||
GetOk(string) (interface{}, bool)
|
||||
Put(string, interface{})
|
||||
Remove(string)
|
||||
}
|
||||
|
||||
// BasicStateBag implements StateBag by using a normal map underneath
|
||||
// protected by a RWMutex.
|
||||
type BasicStateBag struct {
|
||||
data map[string]interface{}
|
||||
l sync.RWMutex
|
||||
once sync.Once
|
||||
}
|
||||
|
||||
func (b *BasicStateBag) Get(k string) interface{} {
|
||||
result, _ := b.GetOk(k)
|
||||
return result
|
||||
}
|
||||
|
||||
func (b *BasicStateBag) GetOk(k string) (interface{}, bool) {
|
||||
b.l.RLock()
|
||||
defer b.l.RUnlock()
|
||||
|
||||
result, ok := b.data[k]
|
||||
return result, ok
|
||||
}
|
||||
|
||||
func (b *BasicStateBag) Put(k string, v interface{}) {
|
||||
b.l.Lock()
|
||||
defer b.l.Unlock()
|
||||
|
||||
// Make sure the map is initialized one time, on write
|
||||
b.once.Do(func() {
|
||||
b.data = make(map[string]interface{})
|
||||
})
|
||||
|
||||
// Write the data
|
||||
b.data[k] = v
|
||||
}
|
||||
|
||||
func (b *BasicStateBag) Remove(k string) {
|
||||
delete(b.data, k)
|
||||
}
|
143
vendor/github.com/hashicorp/packer-plugin-sdk/net/configure_port.go
generated
vendored
Normal file
143
vendor/github.com/hashicorp/packer-plugin-sdk/net/configure_port.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
// Package net contains some helper wrapping functions for the http and net
|
||||
// golang libraries that meet Packer-specific needs.
|
||||
package net
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/filelock"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
)
|
||||
|
||||
var _ net.Listener = &Listener{}
|
||||
|
||||
// Listener wraps a net.Lister with some Packer-specific capabilies. For
|
||||
// example, until you call Listener.Close, any call to ListenRangeConfig.Listen
|
||||
// cannot bind to a Port. Packer tries to tell moving parts which port they can
|
||||
// use, but often the port has to be released before a 3rd party is started,
|
||||
// like a VNC server.
|
||||
type Listener struct {
|
||||
// Listener can be closed but Port will be file locked by packer until
|
||||
// Close is called.
|
||||
net.Listener
|
||||
Port int
|
||||
Address string
|
||||
lock *filelock.Flock
|
||||
cleanupFunc func() error
|
||||
}
|
||||
|
||||
func (l *Listener) Close() error {
|
||||
err := l.lock.Unlock()
|
||||
if err != nil {
|
||||
log.Printf("cannot unlock lockfile %#v: %v", l, err)
|
||||
}
|
||||
err = l.Listener.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if l.cleanupFunc != nil {
|
||||
err := l.cleanupFunc()
|
||||
if err != nil {
|
||||
log.Printf("cannot cleanup: %#v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListenRangeConfig contains options for listening to a free address [Min,Max)
|
||||
// range. ListenRangeConfig wraps a net.ListenConfig.
|
||||
type ListenRangeConfig struct {
|
||||
// like "tcp" or "udp". defaults to "tcp".
|
||||
Network string
|
||||
Addr string
|
||||
Min, Max int
|
||||
net.ListenConfig
|
||||
}
|
||||
|
||||
// Listen tries to Listen to a random open TCP port in the [min, max) range
|
||||
// until ctx is cancelled.
|
||||
// Listen uses net.ListenConfig.Listen internally.
|
||||
func (lc ListenRangeConfig) Listen(ctx context.Context) (*Listener, error) {
|
||||
if lc.Network == "" {
|
||||
lc.Network = "tcp"
|
||||
}
|
||||
portRange := lc.Max - lc.Min
|
||||
|
||||
var listener *Listener
|
||||
|
||||
err := retry.Config{
|
||||
RetryDelay: func() time.Duration { return 1 * time.Millisecond },
|
||||
}.Run(ctx, func(context.Context) error {
|
||||
port := lc.Min
|
||||
if portRange > 0 {
|
||||
port += rand.Intn(portRange)
|
||||
}
|
||||
|
||||
lockFilePath, err := packersdk.CachePath("port", strconv.Itoa(port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lock := filelock.New(lockFilePath)
|
||||
locked, err := lock.TryLock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !locked {
|
||||
return ErrPortFileLocked(port)
|
||||
}
|
||||
|
||||
l, err := lc.ListenConfig.Listen(ctx, lc.Network, fmt.Sprintf("%s:%d", lc.Addr, port))
|
||||
if err != nil {
|
||||
if err := lock.Unlock(); err != nil {
|
||||
log.Fatalf("Could not unlock file lock for port %d: %v", port, err)
|
||||
}
|
||||
return &ErrPortBusy{
|
||||
Port: port,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
cleanupFunc := func() error {
|
||||
return os.Remove(lockFilePath)
|
||||
}
|
||||
|
||||
log.Printf("Found available port: %d on IP: %s", port, lc.Addr)
|
||||
listener = &Listener{
|
||||
Address: lc.Addr,
|
||||
Port: port,
|
||||
Listener: l,
|
||||
lock: lock,
|
||||
cleanupFunc: cleanupFunc,
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return listener, err
|
||||
}
|
||||
|
||||
type ErrPortFileLocked int
|
||||
|
||||
func (port ErrPortFileLocked) Error() string {
|
||||
return fmt.Sprintf("Port %d is file locked", port)
|
||||
}
|
||||
|
||||
type ErrPortBusy struct {
|
||||
Port int
|
||||
Err error
|
||||
}
|
||||
|
||||
func (err *ErrPortBusy) Error() string {
|
||||
if err == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return fmt.Sprintf("port %d cannot be opened: %v", err.Port, err.Err)
|
||||
}
|
14
vendor/github.com/hashicorp/packer-plugin-sdk/net/default_client.go
generated
vendored
Normal file
14
vendor/github.com/hashicorp/packer-plugin-sdk/net/default_client.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
package net
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func HttpClientWithEnvironmentProxy() *http.Client {
|
||||
httpClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
},
|
||||
}
|
||||
return httpClient
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package packer
|
||||
|
||||
// An Artifact is the result of a build, and is the metadata that documents
|
||||
// what a builder actually created. The exact meaning of the contents is
|
||||
// specific to each builder, but this interface is used to communicate back
|
||||
// to the user the result of a build.
|
||||
type Artifact interface {
|
||||
// Returns the ID of the builder that was used to create this artifact.
|
||||
// This is the internal ID of the builder and should be unique to every
|
||||
// builder. This can be used to identify what the contents of the
|
||||
// artifact actually are.
|
||||
BuilderId() string
|
||||
|
||||
// Returns the set of files that comprise this artifact. If an
|
||||
// artifact is not made up of files, then this will be empty.
|
||||
Files() []string
|
||||
|
||||
// The ID for the artifact, if it has one. This is not guaranteed to
|
||||
// be unique every run (like a GUID), but simply provide an identifier
|
||||
// for the artifact that may be meaningful in some way. For example,
|
||||
// for Amazon EC2, this value might be the AMI ID.
|
||||
Id() string
|
||||
|
||||
// Returns human-readable output that describes the artifact created.
|
||||
// This is used for UI output. It can be multiple lines.
|
||||
String() string
|
||||
|
||||
// State allows the caller to ask for builder specific state information
|
||||
// relating to the artifact instance.
|
||||
State(name string) interface{}
|
||||
|
||||
// Destroy deletes the artifact. Packer calls this for various reasons,
|
||||
// such as if a post-processor has processed this artifact and it is
|
||||
// no longer needed.
|
||||
Destroy() error
|
||||
}
|
54
vendor/github.com/hashicorp/packer-plugin-sdk/packer/artifact_mock.go
generated
vendored
Normal file
54
vendor/github.com/hashicorp/packer-plugin-sdk/packer/artifact_mock.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package packer
|
||||
|
||||
// MockArtifact is an implementation of Artifact that can be used for tests.
|
||||
type MockArtifact struct {
|
||||
BuilderIdValue string
|
||||
FilesValue []string
|
||||
IdValue string
|
||||
StateValues map[string]interface{}
|
||||
DestroyCalled bool
|
||||
StringValue string
|
||||
}
|
||||
|
||||
func (a *MockArtifact) BuilderId() string {
|
||||
if a.BuilderIdValue == "" {
|
||||
return "bid"
|
||||
}
|
||||
|
||||
return a.BuilderIdValue
|
||||
}
|
||||
|
||||
func (a *MockArtifact) Files() []string {
|
||||
if a.FilesValue == nil {
|
||||
return []string{"a", "b"}
|
||||
}
|
||||
|
||||
return a.FilesValue
|
||||
}
|
||||
|
||||
func (a *MockArtifact) Id() string {
|
||||
id := a.IdValue
|
||||
if id == "" {
|
||||
id = "id"
|
||||
}
|
||||
|
||||
return id
|
||||
}
|
||||
|
||||
func (a *MockArtifact) String() string {
|
||||
str := a.StringValue
|
||||
if str == "" {
|
||||
str = "string"
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
||||
func (a *MockArtifact) State(name string) interface{} {
|
||||
value := a.StateValues[name]
|
||||
return value
|
||||
}
|
||||
|
||||
func (a *MockArtifact) Destroy() error {
|
||||
a.DestroyCalled = true
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
package packer
|
||||
|
||||
import "context"
|
||||
|
||||
// A Build represents a single job within Packer that is responsible for
|
||||
// building some machine image artifact. Builds are meant to be parallelized.
|
||||
type Build interface {
|
||||
// Name is the name of the build. This is unique across a single template,
|
||||
// but not absolutely unique. This is meant more to describe to the user
|
||||
// what is being built rather than being a unique identifier.
|
||||
Name() string
|
||||
|
||||
// Prepare configures the various components of this build and reports
|
||||
// any errors in doing so (such as syntax errors, validation errors, etc.).
|
||||
// It also reports any warnings.
|
||||
Prepare() ([]string, error)
|
||||
|
||||
// Run runs the actual builder, returning an artifact implementation
|
||||
// of what is built. If anything goes wrong, an error is returned.
|
||||
// Run can be context cancelled.
|
||||
Run(context.Context, Ui) ([]Artifact, error)
|
||||
|
||||
// SetDebug will enable/disable debug mode. Debug mode is always
|
||||
// enabled by adding the additional key "packer_debug" to boolean
|
||||
// true in the configuration of the various components. This must
|
||||
// be called prior to Prepare.
|
||||
//
|
||||
// When SetDebug is set to true, parallelism between builds is
|
||||
// strictly prohibited.
|
||||
SetDebug(bool)
|
||||
|
||||
// SetForce will enable/disable forcing a build when artifacts exist.
|
||||
//
|
||||
// When SetForce is set to true, existing artifacts from the build are
|
||||
// deleted prior to the build.
|
||||
SetForce(bool)
|
||||
|
||||
// SetOnError will determine what to do when a normal multistep step fails
|
||||
// - "cleanup" - run cleanup steps
|
||||
// - "abort" - exit without cleanup
|
||||
// - "ask" - ask the user
|
||||
SetOnError(string)
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package packer
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Implementers of Builder are responsible for actually building images
|
||||
// on some platform given some configuration.
|
||||
//
|
||||
// In addition to the documentation on Prepare above: Prepare is sometimes
|
||||
// configured with a `map[string]interface{}` that has a key "packer_debug".
|
||||
// This is a boolean value. If it is set to true, then the builder should
|
||||
// enable a debug mode which allows builder developers and advanced users
|
||||
// to introspect what is going on during a build. During debug builds,
|
||||
// parallelism is strictly disabled, so it is safe to request input from
|
||||
// stdin and so on.
|
||||
type Builder interface {
|
||||
HCL2Speccer
|
||||
|
||||
// Prepare is responsible for configuring the builder and validating
|
||||
// that configuration. Any setup should be done in this method. Note that
|
||||
// NO side effects should take place in prepare, it is meant as a state
|
||||
// setup only. Calling Prepare is not necessarily followed by a Run.
|
||||
//
|
||||
// The parameters to Prepare are a set of interface{} values of the
|
||||
// configuration. These are almost always `map[string]interface{}`
|
||||
// parsed from a template, but no guarantee is made.
|
||||
//
|
||||
// Each of the configuration values should merge into the final
|
||||
// configuration.
|
||||
//
|
||||
// Prepare should return a list of variables that will be made accessible to
|
||||
// users during the provision methods, a list of warnings along with any
|
||||
// errors that occurred while preparing.
|
||||
Prepare(...interface{}) ([]string, []string, error)
|
||||
|
||||
// Run is where the actual build should take place. It takes a Build and a Ui.
|
||||
Run(context.Context, Ui, Hook) (Artifact, error)
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue