Breakout yandex (#10970)
This commit is contained in:
parent
e681669c70
commit
38837848f9
|
@ -1,76 +0,0 @@
|
|||
//go:generate packer-sdc struct-markdown
|
||||
|
||||
package yandex
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/yandex-cloud/go-sdk/iamkey"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "api.cloud.yandex.net:443"
|
||||
defaultMaxRetries = 3
|
||||
)
|
||||
|
||||
// AccessConfig is for common configuration related to Yandex.Cloud API access
|
||||
type AccessConfig struct {
|
||||
// Non standard API endpoint. Default is `api.cloud.yandex.net:443`.
|
||||
Endpoint string `mapstructure:"endpoint" required:"false"`
|
||||
// Path to file with Service Account key in json format. This
|
||||
// is an alternative method to authenticate to Yandex.Cloud. Alternatively you may set environment variable
|
||||
// `YC_SERVICE_ACCOUNT_KEY_FILE`.
|
||||
ServiceAccountKeyFile string `mapstructure:"service_account_key_file" required:"false"`
|
||||
// [OAuth token](https://cloud.yandex.com/docs/iam/concepts/authorization/oauth-token)
|
||||
// or [IAM token](https://cloud.yandex.com/docs/iam/concepts/authorization/iam-token)
|
||||
// to use to authenticate to Yandex.Cloud. Alternatively you may set
|
||||
// value by environment variable `YC_TOKEN`.
|
||||
Token string `mapstructure:"token" required:"true"`
|
||||
// The maximum number of times an API request is being executed.
|
||||
MaxRetries int `mapstructure:"max_retries"`
|
||||
}
|
||||
|
||||
func (c *AccessConfig) Prepare(ctx *interpolate.Context) []error {
|
||||
var errs []error
|
||||
|
||||
if c.MaxRetries == 0 {
|
||||
c.MaxRetries = defaultMaxRetries
|
||||
}
|
||||
|
||||
if c.Endpoint == "" {
|
||||
c.Endpoint = defaultEndpoint
|
||||
}
|
||||
|
||||
// provision config by OS environment variables
|
||||
if c.Token == "" {
|
||||
c.Token = os.Getenv("YC_TOKEN")
|
||||
}
|
||||
|
||||
if c.ServiceAccountKeyFile == "" {
|
||||
c.ServiceAccountKeyFile = os.Getenv("YC_SERVICE_ACCOUNT_KEY_FILE")
|
||||
}
|
||||
|
||||
if c.Token != "" && c.ServiceAccountKeyFile != "" {
|
||||
errs = append(errs, errors.New("one of token or service account key file must be specified, not both"))
|
||||
}
|
||||
|
||||
if c.Token != "" {
|
||||
packersdk.LogSecretFilter.Set(c.Token)
|
||||
}
|
||||
|
||||
if c.ServiceAccountKeyFile != "" {
|
||||
if _, err := iamkey.ReadFromJSONFile(c.ServiceAccountKeyFile); err != nil {
|
||||
errs = append(errs, fmt.Errorf("fail to read service account key file: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
)
|
||||
|
||||
type Artifact struct {
|
||||
config *Config
|
||||
driver Driver
|
||||
Image *compute.Image
|
||||
|
||||
// StateData should store data such as GeneratedData
|
||||
// to be shared with post-processors
|
||||
StateData map[string]interface{}
|
||||
}
|
||||
|
||||
//revive:disable:var-naming
|
||||
func (*Artifact) BuilderId() string {
|
||||
return BuilderID
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return a.Image.Id
|
||||
}
|
||||
|
||||
func (*Artifact) Files() []string {
|
||||
return []string{""}
|
||||
}
|
||||
|
||||
//revive:enable:var-naming
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("A disk image was created: %v (id: %v) with family name %v", a.Image.Name, a.Image.Id, a.Image.Family)
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
if _, ok := a.StateData[name]; ok {
|
||||
return a.StateData[name]
|
||||
}
|
||||
|
||||
switch name {
|
||||
case "ImageID":
|
||||
return a.Image.Id
|
||||
case "FolderID":
|
||||
return a.Image.FolderId
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
return a.driver.DeleteImage(a.Image.Id)
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
)
|
||||
|
||||
func TestArtifact_impl(t *testing.T) {
|
||||
var _ packersdk.Artifact = new(Artifact)
|
||||
}
|
||||
|
||||
func TestArtifact_Id(t *testing.T) {
|
||||
i := &compute.Image{
|
||||
Id: "test-id-value",
|
||||
FolderId: "test-folder-id",
|
||||
}
|
||||
a := &Artifact{
|
||||
Image: i}
|
||||
expected := "test-id-value"
|
||||
|
||||
if a.Id() != expected {
|
||||
t.Fatalf("artifact ID should match: %v", expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArtifact_String(t *testing.T) {
|
||||
i := &compute.Image{
|
||||
Id: "test-id-value",
|
||||
FolderId: "test-folder-id",
|
||||
Name: "test-name",
|
||||
Family: "test-family",
|
||||
}
|
||||
a := &Artifact{
|
||||
Image: i}
|
||||
expected := "A disk image was created: test-name (id: test-id-value) with family name test-family"
|
||||
|
||||
if a.String() != expected {
|
||||
t.Fatalf("artifact string should match: %v", expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestArtifactState(t *testing.T) {
|
||||
expectedData := "this is the data"
|
||||
artifact := &Artifact{
|
||||
StateData: map[string]interface{}{"state_data": expectedData},
|
||||
}
|
||||
|
||||
// Valid state
|
||||
result := artifact.State("state_data")
|
||||
if result != expectedData {
|
||||
t.Fatalf("Bad: State data was %s instead of %s", result, expectedData)
|
||||
}
|
||||
|
||||
// Invalid state
|
||||
result = artifact.State("invalid_key")
|
||||
if result != nil {
|
||||
t.Fatalf("Bad: State should be nil for invalid state data name")
|
||||
}
|
||||
|
||||
// Nil StateData should not fail and should return nil
|
||||
artifact = &Artifact{}
|
||||
result = artifact.State("key")
|
||||
if result != nil {
|
||||
t.Fatalf("Bad: State should be nil for nil StateData")
|
||||
}
|
||||
}
|
|
@ -1,119 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
"github.com/yandex-cloud/go-sdk/pkg/requestid"
|
||||
)
|
||||
|
||||
// The unique ID for this builder.
|
||||
const BuilderID = "packer.yandex"
|
||||
|
||||
// Builder represents a Packer Builder.
|
||||
type Builder struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (b *Builder) ConfigSpec() hcldec.ObjectSpec { return b.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (b *Builder) Prepare(raws ...interface{}) ([]string, []string, error) {
|
||||
warnings, errs := b.config.Prepare(raws...)
|
||||
if errs != nil {
|
||||
return nil, warnings, errs
|
||||
}
|
||||
generatedData := []string{
|
||||
"ImageID",
|
||||
"ImageName",
|
||||
"ImageFamily",
|
||||
"ImageDescription",
|
||||
"ImageFolderID",
|
||||
"SourceImageID",
|
||||
"SourceImageName",
|
||||
"SourceImageDescription",
|
||||
"SourceImageFamily",
|
||||
"SourceImageFolderID",
|
||||
}
|
||||
return generatedData, warnings, nil
|
||||
}
|
||||
|
||||
// Run executes a yandex Packer build and returns a packersdk.Artifact
|
||||
// representing a Yandex.Cloud compute image.
|
||||
func (b *Builder) Run(ctx context.Context, ui packersdk.Ui, hook packersdk.Hook) (packersdk.Artifact, error) {
|
||||
driver, err := NewDriverYC(ui, &b.config.AccessConfig)
|
||||
ctx = requestid.ContextWithClientTraceID(ctx, uuid.New().String())
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set up the state
|
||||
state := &multistep.BasicStateBag{}
|
||||
state.Put("config", &b.config)
|
||||
state.Put("driver", driver)
|
||||
state.Put("sdk", driver.SDK())
|
||||
state.Put("hook", hook)
|
||||
state.Put("ui", ui)
|
||||
generatedData := &packerbuilderdata.GeneratedData{State: state}
|
||||
|
||||
// Build the steps
|
||||
steps := []multistep.Step{
|
||||
&StepCreateSSHKey{
|
||||
Debug: b.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("yc_%s.pem", b.config.PackerBuildName),
|
||||
},
|
||||
&StepCreateInstance{
|
||||
Debug: b.config.PackerDebug,
|
||||
SerialLogFile: b.config.SerialLogFile,
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
&StepInstanceInfo{},
|
||||
&communicator.StepConnect{
|
||||
Config: &b.config.Communicator,
|
||||
Host: CommHost,
|
||||
SSHConfig: b.config.Communicator.SSHConfigFunc(),
|
||||
},
|
||||
&commonsteps.StepProvision{},
|
||||
&commonsteps.StepCleanupTempKeys{
|
||||
Comm: &b.config.Communicator,
|
||||
},
|
||||
&StepTeardownInstance{
|
||||
SerialLogFile: b.config.SerialLogFile,
|
||||
},
|
||||
&stepCreateImage{
|
||||
GeneratedData: generatedData,
|
||||
},
|
||||
}
|
||||
|
||||
// Run the steps
|
||||
b.runner = commonsteps.NewRunner(steps, b.config.PackerConfig, ui)
|
||||
b.runner.Run(ctx, state)
|
||||
|
||||
// Report any errors
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, rawErr.(error)
|
||||
}
|
||||
|
||||
image, ok := state.GetOk("image")
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to find 'image' in state. Bug?")
|
||||
}
|
||||
|
||||
artifact := &Artifact{
|
||||
Image: image.(*compute.Image),
|
||||
config: &b.config,
|
||||
driver: driver,
|
||||
StateData: map[string]interface{}{"generated_data": state.Get("generated_data")},
|
||||
}
|
||||
return artifact, nil
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
|
||||
builderT "github.com/hashicorp/packer/acctest"
|
||||
)
|
||||
|
||||
const InstanceMetadataAddr = "169.254.169.254"
|
||||
|
||||
func TestBuilderAcc_basic(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccBasic,
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuilderAcc_instanceSA(t *testing.T) {
|
||||
builderT.Test(t, builderT.TestCase{
|
||||
PreCheck: func() { testAccPreCheckInstanceSA(t) },
|
||||
Builder: &Builder{},
|
||||
Template: testBuilderAccInstanceSA,
|
||||
})
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("YC_TOKEN"); v == "" {
|
||||
t.Fatal("YC_TOKEN must be set for acceptance tests")
|
||||
}
|
||||
if v := os.Getenv("YC_FOLDER_ID"); v == "" {
|
||||
t.Fatal("YC_FOLDER_ID must be set for acceptance tests")
|
||||
}
|
||||
}
|
||||
|
||||
func testAccPreCheckInstanceSA(t *testing.T) {
|
||||
client := resty.New()
|
||||
|
||||
_, err := client.R().SetHeader("Metadata-Flavor", "Google").Get(tokenUrl())
|
||||
if err != nil {
|
||||
t.Fatalf("error get Service Account token assignment: %s", err)
|
||||
}
|
||||
|
||||
if v := os.Getenv("YC_FOLDER_ID"); v == "" {
|
||||
t.Fatal("YC_FOLDER_ID must be set for acceptance tests")
|
||||
}
|
||||
}
|
||||
|
||||
const testBuilderAccBasic = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"source_image_family": "ubuntu-1804-lts",
|
||||
"use_ipv4_nat": "true",
|
||||
"ssh_username": "ubuntu"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
const testBuilderAccInstanceSA = `
|
||||
{
|
||||
"builders": [{
|
||||
"type": "test",
|
||||
"source_image_family": "ubuntu-1804-lts",
|
||||
"use_ipv4_nat": "true",
|
||||
"ssh_username": "ubuntu"
|
||||
}]
|
||||
}
|
||||
`
|
||||
|
||||
func tokenUrl() string {
|
||||
return fmt.Sprintf("http://%s/computeMetadata/v1/instance/service-accounts/default/token", InstanceMetadataAddr)
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
func TestBuilder_ImplementsBuilder(t *testing.T) {
|
||||
var raw interface{} = &Builder{}
|
||||
if _, ok := raw.(packersdk.Builder); !ok {
|
||||
t.Fatalf("Builder should be a builder")
|
||||
}
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"mime/multipart"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultContentType = "text/cloud-config"
|
||||
shellContentType = "text/x-shellscript"
|
||||
multipartContentType = "multipart/mixed"
|
||||
)
|
||||
|
||||
const (
|
||||
cloudInitIPv6Config = `#!/usr/bin/env bash
|
||||
dhclient -6 eth0
|
||||
`
|
||||
)
|
||||
|
||||
// MergeCloudUserMetaData allow merge some user-data sections
|
||||
func MergeCloudUserMetaData(usersData ...string) (string, error) {
|
||||
buff := new(bytes.Buffer)
|
||||
data := multipart.NewWriter(buff)
|
||||
_, err := buff.WriteString(fmt.Sprintf("Content-Type: %s; boundary=\"%s\"\r\n", multipartContentType, data.Boundary()))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err = buff.WriteString("MIME-Version: 1.0\r\n\r\n")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for i, userData := range usersData {
|
||||
if len(userData) != 0 {
|
||||
w, err := data.CreatePart(textproto.MIMEHeader{
|
||||
"Content-Disposition": {fmt.Sprintf("attachment; filename=\"user-data-%d\"", i)},
|
||||
"Content-Type": {detectContentType(userData)},
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
_, err = w.Write([]byte(userData))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
return buff.String(), nil
|
||||
}
|
||||
|
||||
func detectContentType(content string) string {
|
||||
switch {
|
||||
case strings.HasPrefix(content, "#!"):
|
||||
return shellContentType
|
||||
case strings.HasPrefix(content, "#cloud-config"):
|
||||
return defaultContentType
|
||||
}
|
||||
|
||||
return defaultContentType
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
data1 = `
|
||||
#cloud-config
|
||||
bootcmd:
|
||||
- cmd1
|
||||
- cmd2
|
||||
`
|
||||
data2 = `
|
||||
#cloud-config
|
||||
runcmd:
|
||||
- touch "cmd3"
|
||||
- cmd4
|
||||
`
|
||||
data3 = `#!/bin/bash
|
||||
touch /test`
|
||||
)
|
||||
|
||||
func TestCloudInitMerge(t *testing.T) {
|
||||
merged, err := MergeCloudUserMetaData(
|
||||
data1,
|
||||
data2,
|
||||
data3,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, merged)
|
||||
|
||||
require.Contains(t, merged, "cmd1")
|
||||
require.Contains(t, merged, "cmd2")
|
||||
require.Contains(t, merged, "\"cmd3\"")
|
||||
require.Contains(t, merged, "cmd4")
|
||||
|
||||
require.Contains(t, merged, "text/cloud-config")
|
||||
require.Contains(t, merged, "text/x-shellscript")
|
||||
}
|
|
@ -1,263 +0,0 @@
|
|||
//go:generate packer-sdc struct-markdown
|
||||
|
||||
package yandex
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer-plugin-sdk/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultPlatformID = "standard-v2"
|
||||
defaultZone = "ru-central1-a"
|
||||
defaultGpuPlatformID = "gpu-standard-v1"
|
||||
)
|
||||
|
||||
var reImageFamily = regexp.MustCompile(`^[a-z]([-a-z0-9]{0,61}[a-z0-9])?$`)
|
||||
|
||||
type CommonConfig struct {
|
||||
|
||||
// File path to save serial port output of the launched instance.
|
||||
SerialLogFile string `mapstructure:"serial_log_file" required:"false"`
|
||||
// The time to wait for instance state changes.
|
||||
// Defaults to `5m`.
|
||||
StateTimeout time.Duration `mapstructure:"state_timeout" required:"false"`
|
||||
|
||||
InstanceConfig `mapstructure:",squash"`
|
||||
DiskConfig `mapstructure:",squash"`
|
||||
NetworkConfig `mapstructure:",squash"`
|
||||
CloudConfig `mapstructure:",squash"`
|
||||
}
|
||||
|
||||
func (c *CommonConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
|
||||
if c.SerialLogFile != "" {
|
||||
if _, err := os.Stat(c.SerialLogFile); os.IsExist(err) {
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Serial log file %s already exist", c.SerialLogFile))
|
||||
}
|
||||
}
|
||||
|
||||
if c.StateTimeout == 0 {
|
||||
c.StateTimeout = 5 * time.Minute
|
||||
}
|
||||
|
||||
errs = c.CloudConfig.Prepare(errs)
|
||||
errs = c.InstanceConfig.Prepare(errs)
|
||||
errs = c.DiskConfig.Prepare(errs)
|
||||
errs = c.NetworkConfig.Prepare(errs)
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
type CloudConfig struct {
|
||||
// The folder ID that will be used to launch instances and store images.
|
||||
// Alternatively you may set value by environment variable `YC_FOLDER_ID`.
|
||||
// To use a different folder for looking up the source image or saving the target image to
|
||||
// check options 'source_image_folder_id' and 'target_image_folder_id'.
|
||||
FolderID string `mapstructure:"folder_id" required:"true"`
|
||||
}
|
||||
|
||||
func (c *CloudConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
if c.FolderID == "" {
|
||||
c.FolderID = os.Getenv("YC_FOLDER_ID")
|
||||
}
|
||||
|
||||
if c.FolderID == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("a folder_id must be specified"))
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
type DiskConfig struct {
|
||||
// The name of the disk, if unset the instance name
|
||||
// will be used.
|
||||
DiskName string `mapstructure:"disk_name" required:"false"`
|
||||
// The size of the disk in GB. This defaults to 10/100GB.
|
||||
DiskSizeGb int `mapstructure:"disk_size_gb" required:"false"`
|
||||
// Specify disk type for the launched instance. Defaults to `network-ssd`.
|
||||
DiskType string `mapstructure:"disk_type" required:"false"`
|
||||
// Key/value pair labels to apply to the disk.
|
||||
DiskLabels map[string]string `mapstructure:"disk_labels" required:"false"`
|
||||
}
|
||||
|
||||
func (c *DiskConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
|
||||
if c.DiskSizeGb == 0 {
|
||||
c.DiskSizeGb = 10
|
||||
}
|
||||
|
||||
if c.DiskType == "" {
|
||||
c.DiskType = "network-ssd"
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
type NetworkConfig struct {
|
||||
// The Yandex VPC subnet id to use for
|
||||
// the launched instance. Note, the zone of the subnet must match the
|
||||
// zone in which the VM is launched.
|
||||
SubnetID string `mapstructure:"subnet_id" required:"false"`
|
||||
// The name of the zone to launch the instance. This defaults to `ru-central1-a`.
|
||||
Zone string `mapstructure:"zone" required:"false"`
|
||||
|
||||
// If set to true, then launched instance will have external internet
|
||||
// access.
|
||||
UseIPv4Nat bool `mapstructure:"use_ipv4_nat" required:"false"`
|
||||
// Set to true to enable IPv6 for the instance being
|
||||
// created. This defaults to `false`, or not enabled.
|
||||
//
|
||||
// -> **Note**: Usage of IPv6 will be available in the future.
|
||||
UseIPv6 bool `mapstructure:"use_ipv6" required:"false"`
|
||||
// If true, use the instance's internal IP address
|
||||
// instead of its external IP during building.
|
||||
UseInternalIP bool `mapstructure:"use_internal_ip" required:"false"`
|
||||
}
|
||||
|
||||
func (c *NetworkConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
if c.Zone == "" {
|
||||
c.Zone = defaultZone
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
type ImageConfig struct {
|
||||
// The name of the resulting image, which contains 1-63 characters and only
|
||||
// supports lowercase English characters, numbers and hyphen. Defaults to
|
||||
// `packer-{{timestamp}}`.
|
||||
ImageName string `mapstructure:"image_name" required:"false"`
|
||||
// The description of the image.
|
||||
ImageDescription string `mapstructure:"image_description" required:"false"`
|
||||
// The family name of the image.
|
||||
ImageFamily string `mapstructure:"image_family" required:"false"`
|
||||
// Key/value pair labels to apply to the image.
|
||||
ImageLabels map[string]string `mapstructure:"image_labels" required:"false"`
|
||||
// Minimum size of the disk that will be created from built image, specified in gigabytes.
|
||||
// Should be more or equal to `disk_size_gb`.
|
||||
ImageMinDiskSizeGb int `mapstructure:"image_min_disk_size_gb" required:"false"`
|
||||
// License IDs that indicate which licenses are attached to resulting image.
|
||||
ImageProductIDs []string `mapstructure:"image_product_ids" required:"false"`
|
||||
}
|
||||
|
||||
func (c *ImageConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
|
||||
if len(c.ImageFamily) > 63 {
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
errors.New("Invalid image family: Must not be longer than 63 characters"))
|
||||
}
|
||||
|
||||
if c.ImageFamily != "" {
|
||||
if !reImageFamily.MatchString(c.ImageFamily) {
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
errors.New("Invalid image family: The first character must be a "+
|
||||
"lowercase letter, and all following characters must be a dash, "+
|
||||
"lowercase letter, or digit, except the last character, which cannot be a dash"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.ImageDescription == "" {
|
||||
c.ImageDescription = "Created by Packer"
|
||||
}
|
||||
|
||||
if c.ImageName == "" {
|
||||
img, err := interpolate.Render("packer-{{timestamp}}", nil)
|
||||
if err != nil {
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Unable to render default image name: %s ", err))
|
||||
} else {
|
||||
c.ImageName = img
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
type InstanceConfig struct {
|
||||
// The number of cores available to the instance.
|
||||
InstanceCores int `mapstructure:"instance_cores" required:"false"`
|
||||
// The number of GPU available to the instance.
|
||||
InstanceGpus int `mapstructure:"instance_gpus" required:"false"`
|
||||
// The amount of memory available to the instance, specified in gigabytes.
|
||||
InstanceMemory int `mapstructure:"instance_mem_gb" required:"false"`
|
||||
// The name assigned to the instance.
|
||||
InstanceName string `mapstructure:"instance_name" required:"false"`
|
||||
// Identifier of the hardware platform configuration for the instance. This defaults to `standard-v2`.
|
||||
PlatformID string `mapstructure:"platform_id" required:"false"`
|
||||
// Key/value pair labels to apply to the launched instance.
|
||||
Labels map[string]string `mapstructure:"labels" required:"false"`
|
||||
// Metadata applied to the launched instance.
|
||||
Metadata map[string]string `mapstructure:"metadata" required:"false"`
|
||||
// Metadata applied to the launched instance.
|
||||
// The values in this map are the paths to the content files for the corresponding metadata keys.
|
||||
MetadataFromFile map[string]string `mapstructure:"metadata_from_file"`
|
||||
// Launch a preemptible instance. This defaults to `false`.
|
||||
Preemptible bool `mapstructure:"preemptible"`
|
||||
}
|
||||
|
||||
func (c *InstanceConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
if c.InstanceCores == 0 {
|
||||
c.InstanceCores = 2
|
||||
}
|
||||
|
||||
if c.InstanceMemory == 0 {
|
||||
c.InstanceMemory = 4
|
||||
}
|
||||
|
||||
if c.InstanceName == "" {
|
||||
c.InstanceName = fmt.Sprintf("packer-%s", uuid.TimeOrderedUUID())
|
||||
}
|
||||
|
||||
for key, file := range c.MetadataFromFile {
|
||||
if _, err := os.Stat(file); err != nil {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, fmt.Errorf("cannot access file '%s' with content for value of metadata key '%s': %s", file, key, err))
|
||||
}
|
||||
}
|
||||
|
||||
if c.PlatformID == "" {
|
||||
c.PlatformID = defaultPlatformID
|
||||
if c.InstanceGpus != 0 {
|
||||
c.PlatformID = defaultGpuPlatformID
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
type SourceImageConfig struct {
|
||||
// The source image family to create the new image
|
||||
// from. You can also specify source_image_id instead. Just one of a source_image_id or
|
||||
// source_image_family must be specified. Example: `ubuntu-1804-lts`.
|
||||
SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
|
||||
// The ID of the folder containing the source image.
|
||||
SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"`
|
||||
// The source image ID to use to create the new image from.
|
||||
SourceImageID string `mapstructure:"source_image_id" required:"false"`
|
||||
// The source image name to use to create the new image
|
||||
// from. Name will be looked up in `source_image_folder_id`.
|
||||
SourceImageName string `mapstructure:"source_image_name"`
|
||||
}
|
||||
|
||||
func (c *SourceImageConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
// Process required parameters.
|
||||
if c.SourceImageID == "" {
|
||||
if c.SourceImageFamily == "" && c.SourceImageName == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("a source_image_name or source_image_family must be specified"))
|
||||
}
|
||||
if c.SourceImageFamily != "" && c.SourceImageName != "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("one of source_image_name or source_image_family must be specified, not both"))
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
//go:generate packer-sdc struct-markdown
|
||||
//go:generate packer-sdc mapstructure-to-hcl2 -type Config
|
||||
|
||||
package yandex
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
Communicator communicator.Config `mapstructure:",squash"`
|
||||
AccessConfig `mapstructure:",squash"`
|
||||
|
||||
CommonConfig `mapstructure:",squash"`
|
||||
ImageConfig `mapstructure:",squash"`
|
||||
|
||||
SourceImageConfig `mapstructure:",squash"`
|
||||
// Service account identifier to assign to instance.
|
||||
ServiceAccountID string `mapstructure:"service_account_id" required:"false"`
|
||||
|
||||
// The ID of the folder to save built image in.
|
||||
// This defaults to value of 'folder_id'.
|
||||
TargetImageFolderID string `mapstructure:"target_image_folder_id" required:"false"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
func (c *Config) Prepare(raws ...interface{}) ([]string, error) {
|
||||
c.ctx.Funcs = TemplateFuncs
|
||||
err := config.Decode(c, &config.DecodeOpts{
|
||||
Interpolate: true,
|
||||
InterpolateContext: &c.ctx,
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
var errs *packersdk.MultiError
|
||||
|
||||
errs = packersdk.MultiErrorAppend(errs, c.AccessConfig.Prepare(&c.ctx)...)
|
||||
|
||||
errs = c.CommonConfig.Prepare(errs)
|
||||
errs = c.ImageConfig.Prepare(errs)
|
||||
errs = c.SourceImageConfig.Prepare(errs)
|
||||
|
||||
if c.ImageMinDiskSizeGb == 0 {
|
||||
c.ImageMinDiskSizeGb = c.DiskSizeGb
|
||||
}
|
||||
|
||||
if c.ImageMinDiskSizeGb < c.DiskSizeGb {
|
||||
errs = packersdk.MultiErrorAppend(errs,
|
||||
fmt.Errorf("Invalid image_min_disk_size value (%d): Must be equal or greate than disk_size_gb (%d)",
|
||||
c.ImageMinDiskSizeGb, c.DiskSizeGb))
|
||||
}
|
||||
|
||||
if c.DiskName == "" {
|
||||
c.DiskName = c.InstanceName + "-disk"
|
||||
}
|
||||
|
||||
if es := c.Communicator.Prepare(&c.ctx); len(es) > 0 {
|
||||
errs = packersdk.MultiErrorAppend(errs, es...)
|
||||
}
|
||||
|
||||
if c.TargetImageFolderID == "" {
|
||||
c.TargetImageFolderID = c.FolderID
|
||||
}
|
||||
|
||||
// Check for any errors.
|
||||
if errs != nil && len(errs.Errors) > 0 {
|
||||
return nil, errs
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
|
@ -1,217 +0,0 @@
|
|||
// Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT.
|
||||
|
||||
package yandex
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
Type *string `mapstructure:"communicator" cty:"communicator" hcl:"communicator"`
|
||||
PauseBeforeConnect *string `mapstructure:"pause_before_connecting" cty:"pause_before_connecting" hcl:"pause_before_connecting"`
|
||||
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
|
||||
SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"`
|
||||
SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"`
|
||||
SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"`
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"`
|
||||
SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"`
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"`
|
||||
SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"`
|
||||
SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"`
|
||||
SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"`
|
||||
SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"`
|
||||
SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"`
|
||||
SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"`
|
||||
SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"`
|
||||
SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"`
|
||||
SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"`
|
||||
SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"`
|
||||
SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"`
|
||||
SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"`
|
||||
SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"`
|
||||
SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"`
|
||||
SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"`
|
||||
SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"`
|
||||
SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"`
|
||||
SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"`
|
||||
SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"`
|
||||
SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"`
|
||||
SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"`
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"`
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"`
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"`
|
||||
WinRMUser *string `mapstructure:"winrm_username" cty:"winrm_username" hcl:"winrm_username"`
|
||||
WinRMPassword *string `mapstructure:"winrm_password" cty:"winrm_password" hcl:"winrm_password"`
|
||||
WinRMHost *string `mapstructure:"winrm_host" cty:"winrm_host" hcl:"winrm_host"`
|
||||
WinRMNoProxy *bool `mapstructure:"winrm_no_proxy" cty:"winrm_no_proxy" hcl:"winrm_no_proxy"`
|
||||
WinRMPort *int `mapstructure:"winrm_port" cty:"winrm_port" hcl:"winrm_port"`
|
||||
WinRMTimeout *string `mapstructure:"winrm_timeout" cty:"winrm_timeout" hcl:"winrm_timeout"`
|
||||
WinRMUseSSL *bool `mapstructure:"winrm_use_ssl" cty:"winrm_use_ssl" hcl:"winrm_use_ssl"`
|
||||
WinRMInsecure *bool `mapstructure:"winrm_insecure" cty:"winrm_insecure" hcl:"winrm_insecure"`
|
||||
WinRMUseNTLM *bool `mapstructure:"winrm_use_ntlm" cty:"winrm_use_ntlm" hcl:"winrm_use_ntlm"`
|
||||
Endpoint *string `mapstructure:"endpoint" required:"false" cty:"endpoint" hcl:"endpoint"`
|
||||
ServiceAccountKeyFile *string `mapstructure:"service_account_key_file" required:"false" cty:"service_account_key_file" hcl:"service_account_key_file"`
|
||||
Token *string `mapstructure:"token" required:"true" cty:"token" hcl:"token"`
|
||||
MaxRetries *int `mapstructure:"max_retries" cty:"max_retries" hcl:"max_retries"`
|
||||
SerialLogFile *string `mapstructure:"serial_log_file" required:"false" cty:"serial_log_file" hcl:"serial_log_file"`
|
||||
StateTimeout *string `mapstructure:"state_timeout" required:"false" cty:"state_timeout" hcl:"state_timeout"`
|
||||
InstanceCores *int `mapstructure:"instance_cores" required:"false" cty:"instance_cores" hcl:"instance_cores"`
|
||||
InstanceGpus *int `mapstructure:"instance_gpus" required:"false" cty:"instance_gpus" hcl:"instance_gpus"`
|
||||
InstanceMemory *int `mapstructure:"instance_mem_gb" required:"false" cty:"instance_mem_gb" hcl:"instance_mem_gb"`
|
||||
InstanceName *string `mapstructure:"instance_name" required:"false" cty:"instance_name" hcl:"instance_name"`
|
||||
PlatformID *string `mapstructure:"platform_id" required:"false" cty:"platform_id" hcl:"platform_id"`
|
||||
Labels map[string]string `mapstructure:"labels" required:"false" cty:"labels" hcl:"labels"`
|
||||
Metadata map[string]string `mapstructure:"metadata" required:"false" cty:"metadata" hcl:"metadata"`
|
||||
MetadataFromFile map[string]string `mapstructure:"metadata_from_file" cty:"metadata_from_file" hcl:"metadata_from_file"`
|
||||
Preemptible *bool `mapstructure:"preemptible" cty:"preemptible" hcl:"preemptible"`
|
||||
DiskName *string `mapstructure:"disk_name" required:"false" cty:"disk_name" hcl:"disk_name"`
|
||||
DiskSizeGb *int `mapstructure:"disk_size_gb" required:"false" cty:"disk_size_gb" hcl:"disk_size_gb"`
|
||||
DiskType *string `mapstructure:"disk_type" required:"false" cty:"disk_type" hcl:"disk_type"`
|
||||
DiskLabels map[string]string `mapstructure:"disk_labels" required:"false" cty:"disk_labels" hcl:"disk_labels"`
|
||||
SubnetID *string `mapstructure:"subnet_id" required:"false" cty:"subnet_id" hcl:"subnet_id"`
|
||||
Zone *string `mapstructure:"zone" required:"false" cty:"zone" hcl:"zone"`
|
||||
UseIPv4Nat *bool `mapstructure:"use_ipv4_nat" required:"false" cty:"use_ipv4_nat" hcl:"use_ipv4_nat"`
|
||||
UseIPv6 *bool `mapstructure:"use_ipv6" required:"false" cty:"use_ipv6" hcl:"use_ipv6"`
|
||||
UseInternalIP *bool `mapstructure:"use_internal_ip" required:"false" cty:"use_internal_ip" hcl:"use_internal_ip"`
|
||||
FolderID *string `mapstructure:"folder_id" required:"true" cty:"folder_id" hcl:"folder_id"`
|
||||
ImageName *string `mapstructure:"image_name" required:"false" cty:"image_name" hcl:"image_name"`
|
||||
ImageDescription *string `mapstructure:"image_description" required:"false" cty:"image_description" hcl:"image_description"`
|
||||
ImageFamily *string `mapstructure:"image_family" required:"false" cty:"image_family" hcl:"image_family"`
|
||||
ImageLabels map[string]string `mapstructure:"image_labels" required:"false" cty:"image_labels" hcl:"image_labels"`
|
||||
ImageMinDiskSizeGb *int `mapstructure:"image_min_disk_size_gb" required:"false" cty:"image_min_disk_size_gb" hcl:"image_min_disk_size_gb"`
|
||||
ImageProductIDs []string `mapstructure:"image_product_ids" required:"false" cty:"image_product_ids" hcl:"image_product_ids"`
|
||||
SourceImageFamily *string `mapstructure:"source_image_family" required:"true" cty:"source_image_family" hcl:"source_image_family"`
|
||||
SourceImageFolderID *string `mapstructure:"source_image_folder_id" required:"false" cty:"source_image_folder_id" hcl:"source_image_folder_id"`
|
||||
SourceImageID *string `mapstructure:"source_image_id" required:"false" cty:"source_image_id" hcl:"source_image_id"`
|
||||
SourceImageName *string `mapstructure:"source_image_name" cty:"source_image_name" hcl:"source_image_name"`
|
||||
ServiceAccountID *string `mapstructure:"service_account_id" required:"false" cty:"service_account_id" hcl:"service_account_id"`
|
||||
TargetImageFolderID *string `mapstructure:"target_image_folder_id" required:"false" cty:"target_image_folder_id" hcl:"target_image_folder_id"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"communicator": &hcldec.AttrSpec{Name: "communicator", Type: cty.String, Required: false},
|
||||
"pause_before_connecting": &hcldec.AttrSpec{Name: "pause_before_connecting", Type: cty.String, Required: false},
|
||||
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},
|
||||
"ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false},
|
||||
"ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
"ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false},
|
||||
"ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false},
|
||||
"ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false},
|
||||
"ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false},
|
||||
"ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false},
|
||||
"ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false},
|
||||
"ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false},
|
||||
"ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false},
|
||||
"ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false},
|
||||
"ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false},
|
||||
"ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false},
|
||||
"ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false},
|
||||
"ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false},
|
||||
"ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false},
|
||||
"ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false},
|
||||
"ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false},
|
||||
"ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false},
|
||||
"winrm_username": &hcldec.AttrSpec{Name: "winrm_username", Type: cty.String, Required: false},
|
||||
"winrm_password": &hcldec.AttrSpec{Name: "winrm_password", Type: cty.String, Required: false},
|
||||
"winrm_host": &hcldec.AttrSpec{Name: "winrm_host", Type: cty.String, Required: false},
|
||||
"winrm_no_proxy": &hcldec.AttrSpec{Name: "winrm_no_proxy", Type: cty.Bool, Required: false},
|
||||
"winrm_port": &hcldec.AttrSpec{Name: "winrm_port", Type: cty.Number, Required: false},
|
||||
"winrm_timeout": &hcldec.AttrSpec{Name: "winrm_timeout", Type: cty.String, Required: false},
|
||||
"winrm_use_ssl": &hcldec.AttrSpec{Name: "winrm_use_ssl", Type: cty.Bool, Required: false},
|
||||
"winrm_insecure": &hcldec.AttrSpec{Name: "winrm_insecure", Type: cty.Bool, Required: false},
|
||||
"winrm_use_ntlm": &hcldec.AttrSpec{Name: "winrm_use_ntlm", Type: cty.Bool, Required: false},
|
||||
"endpoint": &hcldec.AttrSpec{Name: "endpoint", Type: cty.String, Required: false},
|
||||
"service_account_key_file": &hcldec.AttrSpec{Name: "service_account_key_file", Type: cty.String, Required: false},
|
||||
"token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false},
|
||||
"max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false},
|
||||
"serial_log_file": &hcldec.AttrSpec{Name: "serial_log_file", Type: cty.String, Required: false},
|
||||
"state_timeout": &hcldec.AttrSpec{Name: "state_timeout", Type: cty.String, Required: false},
|
||||
"instance_cores": &hcldec.AttrSpec{Name: "instance_cores", Type: cty.Number, Required: false},
|
||||
"instance_gpus": &hcldec.AttrSpec{Name: "instance_gpus", Type: cty.Number, Required: false},
|
||||
"instance_mem_gb": &hcldec.AttrSpec{Name: "instance_mem_gb", Type: cty.Number, Required: false},
|
||||
"instance_name": &hcldec.AttrSpec{Name: "instance_name", Type: cty.String, Required: false},
|
||||
"platform_id": &hcldec.AttrSpec{Name: "platform_id", Type: cty.String, Required: false},
|
||||
"labels": &hcldec.AttrSpec{Name: "labels", Type: cty.Map(cty.String), Required: false},
|
||||
"metadata": &hcldec.AttrSpec{Name: "metadata", Type: cty.Map(cty.String), Required: false},
|
||||
"metadata_from_file": &hcldec.AttrSpec{Name: "metadata_from_file", Type: cty.Map(cty.String), Required: false},
|
||||
"preemptible": &hcldec.AttrSpec{Name: "preemptible", Type: cty.Bool, Required: false},
|
||||
"disk_name": &hcldec.AttrSpec{Name: "disk_name", Type: cty.String, Required: false},
|
||||
"disk_size_gb": &hcldec.AttrSpec{Name: "disk_size_gb", Type: cty.Number, Required: false},
|
||||
"disk_type": &hcldec.AttrSpec{Name: "disk_type", Type: cty.String, Required: false},
|
||||
"disk_labels": &hcldec.AttrSpec{Name: "disk_labels", Type: cty.Map(cty.String), Required: false},
|
||||
"subnet_id": &hcldec.AttrSpec{Name: "subnet_id", Type: cty.String, Required: false},
|
||||
"zone": &hcldec.AttrSpec{Name: "zone", Type: cty.String, Required: false},
|
||||
"use_ipv4_nat": &hcldec.AttrSpec{Name: "use_ipv4_nat", Type: cty.Bool, Required: false},
|
||||
"use_ipv6": &hcldec.AttrSpec{Name: "use_ipv6", Type: cty.Bool, Required: false},
|
||||
"use_internal_ip": &hcldec.AttrSpec{Name: "use_internal_ip", Type: cty.Bool, Required: false},
|
||||
"folder_id": &hcldec.AttrSpec{Name: "folder_id", Type: cty.String, Required: false},
|
||||
"image_name": &hcldec.AttrSpec{Name: "image_name", Type: cty.String, Required: false},
|
||||
"image_description": &hcldec.AttrSpec{Name: "image_description", Type: cty.String, Required: false},
|
||||
"image_family": &hcldec.AttrSpec{Name: "image_family", Type: cty.String, Required: false},
|
||||
"image_labels": &hcldec.AttrSpec{Name: "image_labels", Type: cty.Map(cty.String), Required: false},
|
||||
"image_min_disk_size_gb": &hcldec.AttrSpec{Name: "image_min_disk_size_gb", Type: cty.Number, Required: false},
|
||||
"image_product_ids": &hcldec.AttrSpec{Name: "image_product_ids", Type: cty.List(cty.String), Required: false},
|
||||
"source_image_family": &hcldec.AttrSpec{Name: "source_image_family", Type: cty.String, Required: false},
|
||||
"source_image_folder_id": &hcldec.AttrSpec{Name: "source_image_folder_id", Type: cty.String, Required: false},
|
||||
"source_image_id": &hcldec.AttrSpec{Name: "source_image_id", Type: cty.String, Required: false},
|
||||
"source_image_name": &hcldec.AttrSpec{Name: "source_image_name", Type: cty.String, Required: false},
|
||||
"service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false},
|
||||
"target_image_folder_id": &hcldec.AttrSpec{Name: "target_image_folder_id", Type: cty.String, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,340 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const TestServiceAccountKeyFile = "./testdata/fake-sa-key.json"
|
||||
|
||||
func TestConfigPrepare(t *testing.T) {
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
require.NoError(t, err, "create temporary file failed")
|
||||
|
||||
defer os.Remove(tf.Name())
|
||||
tf.Close()
|
||||
|
||||
cases := []struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
Err bool
|
||||
}{
|
||||
{
|
||||
"unknown_key",
|
||||
"bad",
|
||||
true,
|
||||
},
|
||||
|
||||
{
|
||||
"service_account_key_file",
|
||||
"/tmp/i/should/not/exist",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"service_account_key_file",
|
||||
tf.Name(),
|
||||
true,
|
||||
},
|
||||
{
|
||||
"service_account_key_file",
|
||||
TestServiceAccountKeyFile,
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"folder_id",
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"folder_id",
|
||||
"foo",
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"source_image_id",
|
||||
nil,
|
||||
true,
|
||||
},
|
||||
{
|
||||
"source_image_id",
|
||||
"foo",
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"source_image_family",
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"source_image_family",
|
||||
"foo",
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"zone",
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"zone",
|
||||
"foo",
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"ssh_timeout",
|
||||
"SO BAD",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"ssh_timeout",
|
||||
"5s",
|
||||
false,
|
||||
},
|
||||
|
||||
{
|
||||
"image_family",
|
||||
nil,
|
||||
false,
|
||||
},
|
||||
{
|
||||
"image_family",
|
||||
"",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"image_family",
|
||||
"foo-bar",
|
||||
false,
|
||||
},
|
||||
{
|
||||
"image_family",
|
||||
"foo bar",
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
raw := testConfig(t)
|
||||
|
||||
if tc.Value == nil {
|
||||
delete(raw, tc.Key)
|
||||
} else {
|
||||
raw[tc.Key] = tc.Value
|
||||
}
|
||||
|
||||
if tc.Key == "service_account_key_file" {
|
||||
delete(raw, "token")
|
||||
}
|
||||
|
||||
var c Config
|
||||
warns, errs := c.Prepare(raw)
|
||||
|
||||
if tc.Err {
|
||||
testConfigErr(t, warns, errs, tc.Key)
|
||||
} else {
|
||||
testConfigOk(t, warns, errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigPrepareStartupScriptFile(t *testing.T) {
|
||||
config := testConfig(t)
|
||||
|
||||
config["metadata"] = map[string]string{
|
||||
"key": "value",
|
||||
}
|
||||
|
||||
config["metadata_from_file"] = map[string]string{
|
||||
"key": "file_not_exist",
|
||||
}
|
||||
|
||||
var c Config
|
||||
_, errs := c.Prepare(config)
|
||||
|
||||
if errs == nil || !strings.Contains(errs.Error(), "cannot access file 'file_not_exist' with content "+
|
||||
"for value of metadata key 'key':") {
|
||||
t.Fatalf("should error: metadata_from_file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigImageMinDiskSize(t *testing.T) {
|
||||
cases := []struct {
|
||||
Name string
|
||||
Config map[string]interface{}
|
||||
Err bool
|
||||
}{
|
||||
{
|
||||
Name: "image_min_disk_size lower than disk_size (default value)",
|
||||
Config: map[string]interface{}{
|
||||
"image_min_disk_size_gb": 2,
|
||||
},
|
||||
Err: true,
|
||||
},
|
||||
{
|
||||
Name: "image_min_disk_size greater than disk_size (default value)",
|
||||
Config: map[string]interface{}{
|
||||
"image_min_disk_size_gb": 20,
|
||||
},
|
||||
Err: false,
|
||||
},
|
||||
{
|
||||
Name: "image_min_disk_size lower than disk_size (custom value)",
|
||||
Config: map[string]interface{}{
|
||||
"disk_size_gb": 50,
|
||||
"image_min_disk_size_gb": 20,
|
||||
},
|
||||
Err: true,
|
||||
},
|
||||
{
|
||||
Name: "image_min_disk_size greate than disk_size (custom value)",
|
||||
Config: map[string]interface{}{
|
||||
"disk_size_gb": 50,
|
||||
"image_min_disk_size_gb": 55,
|
||||
},
|
||||
Err: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
raw := testConfig(t)
|
||||
|
||||
for k, v := range tc.Config {
|
||||
raw[k] = v
|
||||
}
|
||||
|
||||
var c Config
|
||||
warns, errs := c.Prepare(raw)
|
||||
|
||||
if tc.Err {
|
||||
testConfigErr(t, warns, errs, tc.Name)
|
||||
} else {
|
||||
testConfigOk(t, warns, errs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigDefaults(t *testing.T) {
|
||||
cases := []struct {
|
||||
Read func(c *Config) interface{}
|
||||
Value interface{}
|
||||
}{
|
||||
{
|
||||
func(c *Config) interface{} { return c.Communicator.Type },
|
||||
"ssh",
|
||||
},
|
||||
|
||||
{
|
||||
func(c *Config) interface{} { return c.Communicator.SSHPort },
|
||||
22,
|
||||
},
|
||||
|
||||
{
|
||||
func(c *Config) interface{} { return c.TargetImageFolderID },
|
||||
"hashicorp",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
raw := testConfig(t)
|
||||
|
||||
var c Config
|
||||
warns, errs := c.Prepare(raw)
|
||||
testConfigOk(t, warns, errs)
|
||||
|
||||
actual := tc.Read(&c)
|
||||
if actual != tc.Value {
|
||||
t.Fatalf("bad: %#v", actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestImageName(t *testing.T) {
|
||||
raw := testConfig(t)
|
||||
|
||||
var c Config
|
||||
c.Prepare(raw)
|
||||
if !strings.HasPrefix(c.ImageName, "packer-") {
|
||||
t.Fatalf("ImageName should have 'packer-' prefix, found %s", c.ImageName)
|
||||
}
|
||||
if strings.Contains(c.ImageName, "{{timestamp}}") {
|
||||
t.Errorf("ImageName should be interpolated; found %s", c.ImageName)
|
||||
}
|
||||
}
|
||||
|
||||
func TestZone(t *testing.T) {
|
||||
raw := testConfig(t)
|
||||
|
||||
var c Config
|
||||
c.Prepare(raw)
|
||||
if c.Zone != "ru-central1-a" {
|
||||
t.Fatalf("Zone should be 'ru-central1-a' given, but is '%s'", c.Zone)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGpuDefaultPlatformID(t *testing.T) {
|
||||
raw := testConfig(t)
|
||||
raw["instance_gpus"] = 1
|
||||
|
||||
var c Config
|
||||
c.Prepare(raw)
|
||||
if c.PlatformID != "gpu-standard-v1" {
|
||||
t.Fatalf("expected 'gpu-standard-v1' as default platform_id for instance with GPU(s), but got '%s'", c.PlatformID)
|
||||
}
|
||||
}
|
||||
|
||||
// Helper stuff below
|
||||
|
||||
func testConfig(t *testing.T) (config map[string]interface{}) {
|
||||
config = map[string]interface{}{
|
||||
"token": "test_token",
|
||||
"folder_id": "hashicorp",
|
||||
"source_image_id": "foo",
|
||||
"ssh_username": "root",
|
||||
"image_family": "bar",
|
||||
"image_product_ids": []string{
|
||||
"test-license",
|
||||
},
|
||||
"zone": "ru-central1-a",
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func testConfigStruct(t *testing.T) *Config {
|
||||
raw := testConfig(t)
|
||||
|
||||
var c Config
|
||||
warns, errs := c.Prepare(raw)
|
||||
|
||||
require.True(t, len(warns) == 0, "bad: %#v", warns)
|
||||
require.NoError(t, errs, "should not have error: %s", errs)
|
||||
|
||||
return &c
|
||||
}
|
||||
|
||||
func testConfigErr(t *testing.T, warns []string, err error, extra string) {
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err == nil {
|
||||
t.Fatalf("should error: %s", extra)
|
||||
}
|
||||
}
|
||||
|
||||
func testConfigOk(t *testing.T, warns []string, err error) {
|
||||
if len(warns) > 0 {
|
||||
t.Fatalf("bad: %#v", warns)
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
||||
type Driver interface {
|
||||
DeleteImage(id string) error
|
||||
SDK() *ycsdk.SDK
|
||||
GetImage(imageID string) (*Image, error)
|
||||
GetImageFromFolder(ctx context.Context, folderID string, family string) (*Image, error)
|
||||
GetImageFromFolderByName(ctx context.Context, folderID string, name string) (*Image, error)
|
||||
DeleteDisk(ctx context.Context, diskID string) error
|
||||
DeleteInstance(ctx context.Context, instanceID string) error
|
||||
DeleteSubnet(ctx context.Context, subnetID string) error
|
||||
DeleteNetwork(ctx context.Context, networkID string) error
|
||||
GetInstanceMetadata(ctx context.Context, instanceID string, key string) (string, error)
|
||||
}
|
|
@ -1,265 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/useragent"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
|
||||
"github.com/hashicorp/packer/builder/yandex/version"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/endpoint"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
"github.com/yandex-cloud/go-sdk/iamkey"
|
||||
"github.com/yandex-cloud/go-sdk/pkg/requestid"
|
||||
"github.com/yandex-cloud/go-sdk/pkg/retry"
|
||||
"github.com/yandex-cloud/go-sdk/sdkresolvers"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultExponentialBackoffBase = 50 * time.Millisecond
|
||||
defaultExponentialBackoffCap = 1 * time.Minute
|
||||
)
|
||||
|
||||
type driverYC struct {
|
||||
sdk *ycsdk.SDK
|
||||
ui packersdk.Ui
|
||||
}
|
||||
|
||||
func NewDriverYC(ui packersdk.Ui, ac *AccessConfig) (Driver, error) {
|
||||
log.Printf("[INFO] Initialize Yandex.Cloud client...")
|
||||
|
||||
sdkConfig := ycsdk.Config{}
|
||||
|
||||
if ac.Endpoint != "" {
|
||||
sdkConfig.Endpoint = ac.Endpoint
|
||||
}
|
||||
|
||||
switch {
|
||||
case ac.Token == "" && ac.ServiceAccountKeyFile == "":
|
||||
log.Printf("[INFO] Use Instance Service Account for authentication")
|
||||
sdkConfig.Credentials = ycsdk.InstanceServiceAccount()
|
||||
|
||||
case ac.Token != "":
|
||||
if strings.HasPrefix(ac.Token, "t1.") && strings.Count(ac.Token, ".") == 2 {
|
||||
log.Printf("[INFO] Use IAM token for authentication")
|
||||
sdkConfig.Credentials = ycsdk.NewIAMTokenCredentials(ac.Token)
|
||||
} else {
|
||||
log.Printf("[INFO] Use OAuth token for authentication")
|
||||
sdkConfig.Credentials = ycsdk.OAuthToken(ac.Token)
|
||||
}
|
||||
case ac.ServiceAccountKeyFile != "":
|
||||
log.Printf("[INFO] Use Service Account key file %q for authentication", ac.ServiceAccountKeyFile)
|
||||
key, err := iamkey.ReadFromJSONFile(ac.ServiceAccountKeyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
credentials, err := ycsdk.ServiceAccountKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sdkConfig.Credentials = credentials
|
||||
}
|
||||
|
||||
requestIDInterceptor := requestid.Interceptor()
|
||||
|
||||
retryInterceptor := retry.Interceptor(
|
||||
retry.WithMax(ac.MaxRetries),
|
||||
retry.WithCodes(codes.Unavailable),
|
||||
retry.WithAttemptHeader(true),
|
||||
retry.WithBackoff(retry.BackoffExponentialWithJitter(defaultExponentialBackoffBase, defaultExponentialBackoffCap)))
|
||||
|
||||
// Make sure retry interceptor is above id interceptor.
|
||||
// Now we will have new request id for every retry attempt.
|
||||
interceptorChain := grpc_middleware.ChainUnaryClient(retryInterceptor, requestIDInterceptor)
|
||||
|
||||
sdk, err := ycsdk.Build(context.Background(), sdkConfig,
|
||||
grpc.WithUserAgent(useragent.String(version.YandexPluginVersion.FormattedVersion())),
|
||||
grpc.WithUnaryInterceptor(interceptorChain),
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = sdk.ApiEndpoint().ApiEndpoint().List(context.Background(), &endpoint.ListApiEndpointsRequest{}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &driverYC{
|
||||
sdk: sdk,
|
||||
ui: ui,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (d *driverYC) GetImage(imageID string) (*Image, error) {
|
||||
image, err := d.sdk.Compute().Image().Get(context.Background(), &compute.GetImageRequest{
|
||||
ImageId: imageID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Image{
|
||||
ID: image.Id,
|
||||
Labels: image.Labels,
|
||||
Licenses: image.ProductIds,
|
||||
Name: image.Name,
|
||||
Description: image.Description,
|
||||
FolderID: image.FolderId,
|
||||
MinDiskSizeGb: toGigabytes(image.MinDiskSize),
|
||||
SizeGb: toGigabytes(image.StorageSize),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *driverYC) GetImageFromFolder(ctx context.Context, folderID string, family string) (*Image, error) {
|
||||
image, err := d.sdk.Compute().Image().GetLatestByFamily(ctx, &compute.GetImageLatestByFamilyRequest{
|
||||
FolderId: folderID,
|
||||
Family: family,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Image{
|
||||
ID: image.Id,
|
||||
Labels: image.Labels,
|
||||
Licenses: image.ProductIds,
|
||||
Name: image.Name,
|
||||
Description: image.Description,
|
||||
FolderID: image.FolderId,
|
||||
Family: image.Family,
|
||||
MinDiskSizeGb: toGigabytes(image.MinDiskSize),
|
||||
SizeGb: toGigabytes(image.StorageSize),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *driverYC) GetImageFromFolderByName(ctx context.Context, folderID string, imageName string) (*Image, error) {
|
||||
imageResolver := sdkresolvers.ImageResolver(imageName, sdkresolvers.FolderID(folderID))
|
||||
|
||||
if err := d.sdk.Resolve(ctx, imageResolver); err != nil {
|
||||
return nil, fmt.Errorf("failed to resolve image name: %s", err)
|
||||
}
|
||||
|
||||
return d.GetImage(imageResolver.ID())
|
||||
}
|
||||
|
||||
func (d *driverYC) DeleteImage(ID string) error {
|
||||
ctx := context.TODO()
|
||||
op, err := d.sdk.WrapOperation(d.sdk.Compute().Image().Delete(ctx, &compute.DeleteImageRequest{
|
||||
ImageId: ID,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = op.Response()
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *driverYC) SDK() *ycsdk.SDK {
|
||||
return d.sdk
|
||||
}
|
||||
|
||||
func (d *driverYC) DeleteInstance(ctx context.Context, instanceID string) error {
|
||||
op, err := d.sdk.WrapOperation(d.sdk.Compute().Instance().Delete(ctx, &compute.DeleteInstanceRequest{
|
||||
InstanceId: instanceID,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = op.Response()
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *driverYC) DeleteSubnet(ctx context.Context, subnetID string) error {
|
||||
op, err := d.sdk.WrapOperation(d.sdk.VPC().Subnet().Delete(ctx, &vpc.DeleteSubnetRequest{
|
||||
SubnetId: subnetID,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = op.Response()
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *driverYC) DeleteNetwork(ctx context.Context, networkID string) error {
|
||||
op, err := d.sdk.WrapOperation(d.sdk.VPC().Network().Delete(ctx, &vpc.DeleteNetworkRequest{
|
||||
NetworkId: networkID,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = op.Response()
|
||||
return err
|
||||
|
||||
}
|
||||
|
||||
func (d *driverYC) DeleteDisk(ctx context.Context, diskID string) error {
|
||||
op, err := d.sdk.WrapOperation(d.sdk.Compute().Disk().Delete(ctx, &compute.DeleteDiskRequest{
|
||||
DiskId: diskID,
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = op.Response()
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *driverYC) GetInstanceMetadata(ctx context.Context, instanceID string, key string) (string, error) {
|
||||
instance, err := d.sdk.Compute().Instance().Get(ctx, &compute.GetInstanceRequest{
|
||||
InstanceId: instanceID,
|
||||
View: compute.InstanceView_FULL,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for k, v := range instance.GetMetadata() {
|
||||
if k == key {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Instance metadata key, %s, not found.", key)
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package yandex
|
||||
|
||||
type Image struct {
|
||||
ID string
|
||||
FolderID string
|
||||
Labels map[string]string
|
||||
Licenses []string
|
||||
MinDiskSizeGb int
|
||||
Name string
|
||||
Description string
|
||||
Family string
|
||||
SizeGb int
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
func CommHost(state multistep.StateBag) (string, error) {
|
||||
ipAddress := state.Get("instance_ip").(string)
|
||||
return ipAddress, nil
|
||||
}
|
|
@ -1,83 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
||||
type stepCreateImage struct {
|
||||
GeneratedData *packerbuilderdata.GeneratedData
|
||||
}
|
||||
|
||||
func (s *stepCreateImage) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
sdk := state.Get("sdk").(*ycsdk.SDK)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
c := state.Get("config").(*Config)
|
||||
diskID := state.Get("disk_id").(string)
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating image: %v", c.ImageName))
|
||||
ctx, cancel := context.WithTimeout(ctx, c.StateTimeout)
|
||||
defer cancel()
|
||||
|
||||
op, err := sdk.WrapOperation(sdk.Compute().Image().Create(ctx, &compute.CreateImageRequest{
|
||||
FolderId: c.TargetImageFolderID,
|
||||
Name: c.ImageName,
|
||||
Family: c.ImageFamily,
|
||||
Description: c.ImageDescription,
|
||||
Labels: c.ImageLabels,
|
||||
MinDiskSize: toBytes(c.ImageMinDiskSizeGb),
|
||||
ProductIds: c.ImageProductIDs,
|
||||
Source: &compute.CreateImageRequest_DiskId{
|
||||
DiskId: diskID,
|
||||
},
|
||||
}))
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error creating image: %s", err))
|
||||
}
|
||||
|
||||
ui.Say("Waiting for image to complete...")
|
||||
if err := op.Wait(ctx); err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error waiting for image: %s", err))
|
||||
}
|
||||
|
||||
resp, err := op.Response()
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, err)
|
||||
}
|
||||
ui.Say("Success image create...")
|
||||
|
||||
image, ok := resp.(*compute.Image)
|
||||
if !ok {
|
||||
return StepHaltWithError(state, errors.New("API call response doesn't contain Compute Image"))
|
||||
}
|
||||
|
||||
log.Printf("Image ID: %s", image.Id)
|
||||
log.Printf("Image Name: %s", image.Name)
|
||||
log.Printf("Image Family: %s", image.Family)
|
||||
log.Printf("Image Description: %s", image.Description)
|
||||
log.Printf("Image Storage size: %d", image.StorageSize)
|
||||
state.Put("image", image)
|
||||
|
||||
// provision generated_data from declared in Builder.Prepare func
|
||||
// see doc https://www.packer.io/docs/extending/custom-builders#build-variables for details
|
||||
s.GeneratedData.Put("ImageID", image.Id)
|
||||
s.GeneratedData.Put("ImageName", image.Name)
|
||||
s.GeneratedData.Put("ImageFamily", image.Family)
|
||||
s.GeneratedData.Put("ImageDescription", image.Description)
|
||||
s.GeneratedData.Put("ImageFolderID", image.FolderId)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (stepCreateImage) Cleanup(state multistep.StateBag) {
|
||||
// no cleanup
|
||||
}
|
|
@ -1,426 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/uuid"
|
||||
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/vpc/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
||||
const StandardImagesFolderID = "standard-images"
|
||||
|
||||
type StepCreateInstance struct {
|
||||
Debug bool
|
||||
SerialLogFile string
|
||||
|
||||
GeneratedData *packerbuilderdata.GeneratedData
|
||||
}
|
||||
|
||||
func createNetwork(ctx context.Context, c *Config, d Driver) (*vpc.Network, error) {
|
||||
req := &vpc.CreateNetworkRequest{
|
||||
FolderId: c.FolderID,
|
||||
Name: fmt.Sprintf("packer-network-%s", uuid.TimeOrderedUUID()),
|
||||
}
|
||||
|
||||
sdk := d.SDK()
|
||||
|
||||
op, err := sdk.WrapOperation(sdk.VPC().Network().Create(ctx, req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := op.Response()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
network, ok := resp.(*vpc.Network)
|
||||
if !ok {
|
||||
return nil, errors.New("network create operation response doesn't contain Network")
|
||||
}
|
||||
return network, nil
|
||||
}
|
||||
|
||||
func createDisk(ctx context.Context, state multistep.StateBag, c *Config, d Driver, sourceImage *Image) (*compute.Disk, error) {
|
||||
req := &compute.CreateDiskRequest{
|
||||
Name: c.DiskName,
|
||||
FolderId: c.FolderID,
|
||||
TypeId: c.DiskType,
|
||||
Labels: c.DiskLabels,
|
||||
ZoneId: c.Zone,
|
||||
Size: int64((datasize.ByteSize(c.DiskSizeGb) * datasize.GB).Bytes()),
|
||||
Source: &compute.CreateDiskRequest_ImageId{
|
||||
ImageId: sourceImage.ID,
|
||||
},
|
||||
}
|
||||
sdk := d.SDK()
|
||||
|
||||
op, err := sdk.WrapOperation(sdk.Compute().Disk().Create(ctx, req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
protoMD, err := op.Metadata()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
md, ok := protoMD.(*compute.CreateDiskMetadata)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not get Instance ID from create operation metadata")
|
||||
}
|
||||
state.Put("disk_id", md.DiskId)
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := op.Response()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
disk, ok := resp.(*compute.Disk)
|
||||
if !ok {
|
||||
return nil, errors.New("disk create operation response doesn't contain Disk")
|
||||
}
|
||||
return disk, nil
|
||||
|
||||
}
|
||||
|
||||
func createSubnet(ctx context.Context, c *Config, d Driver, networkID string) (*vpc.Subnet, error) {
|
||||
req := &vpc.CreateSubnetRequest{
|
||||
FolderId: c.FolderID,
|
||||
NetworkId: networkID,
|
||||
Name: fmt.Sprintf("packer-subnet-%s", uuid.TimeOrderedUUID()),
|
||||
ZoneId: c.Zone,
|
||||
V4CidrBlocks: []string{"192.168.111.0/24"},
|
||||
}
|
||||
|
||||
sdk := d.SDK()
|
||||
|
||||
op, err := sdk.WrapOperation(sdk.VPC().Subnet().Create(ctx, req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := op.Response()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subnet, ok := resp.(*vpc.Subnet)
|
||||
if !ok {
|
||||
return nil, errors.New("subnet create operation response doesn't contain Subnet")
|
||||
}
|
||||
return subnet, nil
|
||||
}
|
||||
|
||||
func getImage(ctx context.Context, c *Config, d Driver) (*Image, error) {
|
||||
if c.SourceImageID != "" {
|
||||
return d.GetImage(c.SourceImageID)
|
||||
}
|
||||
|
||||
folderID := c.SourceImageFolderID
|
||||
if folderID == "" {
|
||||
folderID = StandardImagesFolderID
|
||||
}
|
||||
|
||||
switch {
|
||||
case c.SourceImageFamily != "":
|
||||
return d.GetImageFromFolder(ctx, folderID, c.SourceImageFamily)
|
||||
case c.SourceImageName != "":
|
||||
return d.GetImageFromFolderByName(ctx, folderID, c.SourceImageName)
|
||||
}
|
||||
|
||||
return &Image{}, errors.New("neither source_image_name nor source_image_family defined in config")
|
||||
}
|
||||
|
||||
func (s *StepCreateInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
sdk := state.Get("sdk").(*ycsdk.SDK)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
config := state.Get("config").(*Config)
|
||||
driver := state.Get("driver").(Driver)
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, config.StateTimeout)
|
||||
defer cancel()
|
||||
|
||||
sourceImage, err := getImage(ctx, config, driver)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error getting source image for instance creation: %s", err))
|
||||
}
|
||||
|
||||
if sourceImage.MinDiskSizeGb > config.DiskSizeGb {
|
||||
return StepHaltWithError(state, fmt.Errorf("Instance DiskSizeGb (%d) should be equal or greater "+
|
||||
"than SourceImage disk requirement (%d)", config.DiskSizeGb, sourceImage.MinDiskSizeGb))
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Using as source image: %s (name: %q, family: %q)", sourceImage.ID, sourceImage.Name, sourceImage.Family))
|
||||
|
||||
// create or reuse network configuration
|
||||
instanceSubnetID := ""
|
||||
if config.SubnetID == "" {
|
||||
// create Network and Subnet
|
||||
ui.Say("Creating network...")
|
||||
network, err := createNetwork(ctx, config, driver)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error creating network: %s", err))
|
||||
}
|
||||
state.Put("network_id", network.Id)
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating subnet in zone %q...", config.Zone))
|
||||
subnet, err := createSubnet(ctx, config, driver, network.Id)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error creating subnet: %s", err))
|
||||
}
|
||||
instanceSubnetID = subnet.Id
|
||||
// save for cleanup
|
||||
state.Put("subnet_id", subnet.Id)
|
||||
} else {
|
||||
ui.Say("Use provided subnet id " + config.SubnetID)
|
||||
instanceSubnetID = config.SubnetID
|
||||
}
|
||||
|
||||
// Create a disk manually to have a delete ID
|
||||
ui.Say("Creating disk...")
|
||||
disk, err := createDisk(ctx, state, config, driver, sourceImage)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error creating disk: %s", err))
|
||||
}
|
||||
|
||||
// Create an instance based on the configuration
|
||||
ui.Say("Creating instance...")
|
||||
|
||||
instanceMetadata, err := config.createInstanceMetadata(string(config.Communicator.SSHPublicKey))
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error preparing instance metadata: %s", err))
|
||||
}
|
||||
|
||||
if config.UseIPv6 {
|
||||
ui.Say("Prepare user-data...")
|
||||
|
||||
oldUserData, ok := instanceMetadata["user-data"]
|
||||
if !ok {
|
||||
oldUserData = ""
|
||||
}
|
||||
instanceMetadata["user-data"], err = MergeCloudUserMetaData(cloudInitIPv6Config, oldUserData)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error merge user data configs: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
req := &compute.CreateInstanceRequest{
|
||||
FolderId: config.FolderID,
|
||||
Name: config.InstanceName,
|
||||
Labels: config.Labels,
|
||||
ZoneId: config.Zone,
|
||||
PlatformId: config.PlatformID,
|
||||
SchedulingPolicy: &compute.SchedulingPolicy{
|
||||
Preemptible: config.Preemptible,
|
||||
},
|
||||
ResourcesSpec: &compute.ResourcesSpec{
|
||||
Memory: toBytes(config.InstanceMemory),
|
||||
Cores: int64(config.InstanceCores),
|
||||
Gpus: int64(config.InstanceGpus),
|
||||
},
|
||||
Metadata: instanceMetadata,
|
||||
BootDiskSpec: &compute.AttachedDiskSpec{
|
||||
AutoDelete: false,
|
||||
Disk: &compute.AttachedDiskSpec_DiskId{
|
||||
DiskId: disk.Id,
|
||||
},
|
||||
},
|
||||
NetworkInterfaceSpecs: []*compute.NetworkInterfaceSpec{
|
||||
{
|
||||
SubnetId: instanceSubnetID,
|
||||
PrimaryV4AddressSpec: &compute.PrimaryAddressSpec{},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if config.ServiceAccountID != "" {
|
||||
req.ServiceAccountId = config.ServiceAccountID
|
||||
}
|
||||
|
||||
if config.UseIPv6 {
|
||||
req.NetworkInterfaceSpecs[0].PrimaryV6AddressSpec = &compute.PrimaryAddressSpec{}
|
||||
}
|
||||
|
||||
if config.UseIPv4Nat {
|
||||
req.NetworkInterfaceSpecs[0].PrimaryV4AddressSpec = &compute.PrimaryAddressSpec{
|
||||
OneToOneNatSpec: &compute.OneToOneNatSpec{
|
||||
IpVersion: compute.IpVersion_IPV4,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
op, err := sdk.WrapOperation(sdk.Compute().Instance().Create(ctx, req))
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error create instance: %s", err))
|
||||
}
|
||||
|
||||
opMetadata, err := op.Metadata()
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error get create operation metadata: %s", err))
|
||||
}
|
||||
|
||||
if cimd, ok := opMetadata.(*compute.CreateInstanceMetadata); ok {
|
||||
state.Put("instance_id", cimd.InstanceId)
|
||||
} else {
|
||||
return StepHaltWithError(state, fmt.Errorf("could not get Instance ID from operation metadata"))
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error create instance: %s", err))
|
||||
}
|
||||
|
||||
resp, err := op.Response()
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, err)
|
||||
}
|
||||
|
||||
instance, ok := resp.(*compute.Instance)
|
||||
if !ok {
|
||||
return StepHaltWithError(state, fmt.Errorf("response doesn't contain Instance"))
|
||||
}
|
||||
|
||||
// instance_id is the generic term used so that users can have access to the
|
||||
// instance id inside of the provisioners, used in step_provision.
|
||||
state.Put("instance_id", instance.Id)
|
||||
|
||||
if s.Debug {
|
||||
ui.Message(fmt.Sprintf("Instance ID %s started. Current instance status %s", instance.Id, instance.Status))
|
||||
ui.Message(fmt.Sprintf("Disk ID %s. ", disk.Id))
|
||||
}
|
||||
|
||||
// provision generated_data from declared in Builder.Prepare func
|
||||
// see doc https://www.packer.io/docs/extending/custom-builders#build-variables for details
|
||||
s.GeneratedData.Put("SourceImageID", sourceImage.ID)
|
||||
s.GeneratedData.Put("SourceImageName", sourceImage.Name)
|
||||
s.GeneratedData.Put("SourceImageDescription", sourceImage.Description)
|
||||
s.GeneratedData.Put("SourceImageFamily", sourceImage.Family)
|
||||
s.GeneratedData.Put("SourceImageFolderID", sourceImage.FolderID)
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateInstance) Cleanup(state multistep.StateBag) {
|
||||
config := state.Get("config").(*Config)
|
||||
driver := state.Get("driver").(Driver)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), config.StateTimeout)
|
||||
defer cancel()
|
||||
|
||||
if s.SerialLogFile != "" {
|
||||
ui.Say("Current state 'cancelled' or 'halted'...")
|
||||
err := writeSerialLogFile(ctx, state, s.SerialLogFile)
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
instanceIDRaw, ok := state.GetOk("instance_id")
|
||||
if ok {
|
||||
instanceID := instanceIDRaw.(string)
|
||||
if instanceID != "" {
|
||||
ui.Say("Destroying instance...")
|
||||
err := driver.DeleteInstance(ctx, instanceID)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error destroying instance (id: %s). Please destroy it manually: %s", instanceID, err))
|
||||
}
|
||||
ui.Message("Instance has been destroyed!")
|
||||
}
|
||||
}
|
||||
|
||||
subnetIDRaw, ok := state.GetOk("subnet_id")
|
||||
if ok {
|
||||
subnetID := subnetIDRaw.(string)
|
||||
if subnetID != "" {
|
||||
// Destroy the subnet we just created
|
||||
ui.Say("Destroying subnet...")
|
||||
err := driver.DeleteSubnet(ctx, subnetID)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error destroying subnet (id: %s). Please destroy it manually: %s", subnetID, err))
|
||||
}
|
||||
ui.Message("Subnet has been deleted!")
|
||||
}
|
||||
}
|
||||
|
||||
// Destroy the network we just created
|
||||
networkIDRaw, ok := state.GetOk("network_id")
|
||||
if ok {
|
||||
networkID := networkIDRaw.(string)
|
||||
if networkID != "" {
|
||||
// Destroy the network we just created
|
||||
ui.Say("Destroying network...")
|
||||
err := driver.DeleteNetwork(ctx, networkID)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error destroying network (id: %s). Please destroy it manually: %s", networkID, err))
|
||||
}
|
||||
ui.Message("Network has been deleted!")
|
||||
}
|
||||
}
|
||||
|
||||
diskIDRaw, ok := state.GetOk("disk_id")
|
||||
if ok {
|
||||
ui.Say("Destroying boot disk...")
|
||||
diskID := diskIDRaw.(string)
|
||||
err := driver.DeleteDisk(ctx, diskID)
|
||||
if err != nil {
|
||||
ui.Error(fmt.Sprintf(
|
||||
"Error destroying boot disk (id: %s). Please destroy it manually: %s", diskID, err))
|
||||
}
|
||||
ui.Message("Disk has been deleted!")
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) createInstanceMetadata(sshPublicKey string) (map[string]string, error) {
|
||||
instanceMetadata := make(map[string]string)
|
||||
|
||||
// Copy metadata from config.
|
||||
for k, file := range c.MetadataFromFile {
|
||||
contents, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while read file '%s' with content for value of metadata key '%s': %s", file, k, err)
|
||||
}
|
||||
instanceMetadata[k] = string(contents)
|
||||
}
|
||||
|
||||
for k, v := range c.Metadata {
|
||||
instanceMetadata[k] = v
|
||||
}
|
||||
|
||||
if sshPublicKey != "" {
|
||||
sshMetaKey := "ssh-keys"
|
||||
sshKeys := fmt.Sprintf("%s:%s", c.Communicator.SSHUsername, sshPublicKey)
|
||||
if confSSHKeys, exists := instanceMetadata[sshMetaKey]; exists {
|
||||
sshKeys = fmt.Sprintf("%s\n%s", sshKeys, confSSHKeys)
|
||||
}
|
||||
instanceMetadata[sshMetaKey] = sshKeys
|
||||
}
|
||||
|
||||
return instanceMetadata, nil
|
||||
}
|
|
@ -1,125 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const testMetadataFileContent = `meta data value`
|
||||
|
||||
func testMetadataFile(t *testing.T) string {
|
||||
tf, err := ioutil.TempFile("", "packer")
|
||||
require.NoErrorf(t, err, "create temporary file failed")
|
||||
defer tf.Close()
|
||||
|
||||
_, err = tf.Write([]byte(testMetadataFileContent))
|
||||
require.NoErrorf(t, err, "write to file failed")
|
||||
|
||||
return tf.Name()
|
||||
}
|
||||
|
||||
func TestCreateInstanceMetadata(t *testing.T) {
|
||||
state := testState(t)
|
||||
c := state.Get("config").(*Config)
|
||||
pubKey := "abcdefgh123456789"
|
||||
|
||||
// create our metadata
|
||||
metadata, err := c.createInstanceMetadata(pubKey)
|
||||
require.NoError(t, err, "Metadata creation should have succeeded.")
|
||||
|
||||
// ensure our pubKey is listed
|
||||
assert.True(t, strings.Contains(metadata["ssh-keys"], pubKey), "Instance metadata should contain provided public key")
|
||||
}
|
||||
|
||||
func TestCreateInstanceMetadata_noPublicKey(t *testing.T) {
|
||||
state := testState(t)
|
||||
c := state.Get("config").(*Config)
|
||||
sshKeys := c.Metadata["sshKeys"]
|
||||
|
||||
// create our metadata
|
||||
metadata, err := c.createInstanceMetadata("")
|
||||
|
||||
// ensure the metadata created without err
|
||||
require.NoError(t, err, "Metadata creation should have succeeded.")
|
||||
|
||||
// ensure the ssh metadata hasn't changed
|
||||
assert.Equal(t, metadata["sshKeys"], sshKeys, "Instance metadata should not have been modified")
|
||||
}
|
||||
|
||||
func TestCreateInstanceMetadata_fromFile(t *testing.T) {
|
||||
state := testState(t)
|
||||
metadataFile := testMetadataFile(t)
|
||||
defer os.Remove(metadataFile)
|
||||
|
||||
state.Put("config", testConfigStruct(t))
|
||||
c := state.Get("config").(*Config)
|
||||
c.MetadataFromFile = map[string]string{
|
||||
"test-key": metadataFile,
|
||||
}
|
||||
|
||||
// create our metadata
|
||||
metadata, err := c.createInstanceMetadata("")
|
||||
require.NoError(t, err, "Metadata creation should have succeeded.")
|
||||
|
||||
// ensure the metadata from file hasn't changed
|
||||
assert.Equal(t, testMetadataFileContent, metadata["test-key"], "Instance metadata should not have been modified")
|
||||
}
|
||||
|
||||
func TestCreateInstanceMetadata_fromFileAndTemplate(t *testing.T) {
|
||||
state := testState(t)
|
||||
metadataFile := testMetadataFile(t)
|
||||
defer os.Remove(metadataFile)
|
||||
|
||||
state.Put("config", testConfigStruct(t))
|
||||
c := state.Get("config").(*Config)
|
||||
c.MetadataFromFile = map[string]string{
|
||||
"test-key": metadataFile,
|
||||
}
|
||||
c.Metadata = map[string]string{
|
||||
"test-key": "override value",
|
||||
"test-key-2": "new-value",
|
||||
}
|
||||
|
||||
// create our metadata
|
||||
metadata, err := c.createInstanceMetadata("")
|
||||
require.NoError(t, err, "Metadata creation should have succeeded.")
|
||||
|
||||
// ensure the metadata merged
|
||||
assert.Equal(t, "override value", metadata["test-key"], "Instance metadata should not have been modified")
|
||||
assert.Equal(t, "new-value", metadata["test-key-2"], "Instance metadata should not have been modified")
|
||||
}
|
||||
|
||||
func TestCreateInstanceMetadata_fromNotExistFile(t *testing.T) {
|
||||
state := testState(t)
|
||||
metadataFile := "not-exist-file"
|
||||
|
||||
state.Put("config", testConfigStruct(t))
|
||||
c := state.Get("config").(*Config)
|
||||
c.MetadataFromFile = map[string]string{
|
||||
"test-key": metadataFile,
|
||||
}
|
||||
|
||||
// create our metadata
|
||||
_, err := c.createInstanceMetadata("")
|
||||
|
||||
assert.True(t, err != nil, "Metadata creation should have an error.")
|
||||
}
|
||||
|
||||
func testState(t *testing.T) multistep.StateBag {
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", testConfigStruct(t))
|
||||
state.Put("hook", &packersdk.MockHook{})
|
||||
state.Put("ui", &packersdk.BasicUi{
|
||||
Reader: new(bytes.Buffer),
|
||||
Writer: new(bytes.Buffer),
|
||||
})
|
||||
return state
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
type StepCreateSSHKey struct {
|
||||
Debug bool
|
||||
DebugKeyPath string
|
||||
}
|
||||
|
||||
func (s *StepCreateSSHKey) Run(_ context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
config := state.Get("config").(*Config)
|
||||
|
||||
if config.Communicator.SSHPrivateKeyFile != "" {
|
||||
ui.Say("Using existing SSH private key")
|
||||
privateKeyBytes, err := config.Communicator.ReadSSHPrivateKeyFile()
|
||||
if err != nil {
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
key, err := ssh.ParsePrivateKey(privateKeyBytes)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error parsing 'ssh_private_key_file': %s", err)
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
config.Communicator.SSHPublicKey = ssh.MarshalAuthorizedKey(key.PublicKey())
|
||||
config.Communicator.SSHPrivateKey = privateKeyBytes
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
ui.Say("Creating temporary ssh key for instance...")
|
||||
|
||||
priv, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error generating temporary SSH key: %s", err))
|
||||
}
|
||||
|
||||
// ASN.1 DER encoded form
|
||||
privDer := x509.MarshalPKCS1PrivateKey(priv)
|
||||
privBlk := pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Headers: nil,
|
||||
Bytes: privDer,
|
||||
}
|
||||
|
||||
// Marshal the public key into SSH compatible format
|
||||
pub, err := ssh.NewPublicKey(&priv.PublicKey)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error creating public ssh key: %s", err)
|
||||
ui.Error(err.Error())
|
||||
state.Put("error", err)
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
pubSSHFormat := string(ssh.MarshalAuthorizedKey(pub))
|
||||
|
||||
hashMD5 := ssh.FingerprintLegacyMD5(pub)
|
||||
hashSHA256 := ssh.FingerprintSHA256(pub)
|
||||
|
||||
log.Printf("[INFO] md5 hash of ssh pub key: %s", hashMD5)
|
||||
log.Printf("[INFO] sha256 hash of ssh pub key: %s", hashSHA256)
|
||||
|
||||
// Remember some state for the future
|
||||
state.Put("ssh_key_public", pubSSHFormat)
|
||||
|
||||
// Set the private key in the config for later
|
||||
config.Communicator.SSHPrivateKey = pem.EncodeToMemory(&privBlk)
|
||||
config.Communicator.SSHPublicKey = ssh.MarshalAuthorizedKey(pub)
|
||||
|
||||
// If we're in debug mode, output the private key to the working directory.
|
||||
if s.Debug {
|
||||
ui.Message(fmt.Sprintf("Saving key for debug purposes: %s", s.DebugKeyPath))
|
||||
err := ioutil.WriteFile(s.DebugKeyPath, config.Communicator.SSHPrivateKey, 0600)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error saving debug key: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateSSHKey) Cleanup(state multistep.StateBag) {
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
||||
type StepInstanceInfo struct{}
|
||||
|
||||
func (s *StepInstanceInfo) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
sdk := state.Get("sdk").(*ycsdk.SDK)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
c := state.Get("config").(*Config)
|
||||
instanceID := state.Get("instance_id").(string)
|
||||
|
||||
ui.Say(fmt.Sprintf("Waiting for instance with id %s to become active...", instanceID))
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, c.StateTimeout)
|
||||
defer cancel()
|
||||
|
||||
instance, err := sdk.Compute().Instance().Get(ctx, &compute.GetInstanceRequest{
|
||||
InstanceId: instanceID,
|
||||
View: compute.InstanceView_FULL,
|
||||
})
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error retrieving instance data: %s", err))
|
||||
}
|
||||
|
||||
instanceIP, err := getInstanceIPAddress(c, instance)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Failed to find instance ip address: %s", err))
|
||||
}
|
||||
|
||||
state.Put("instance_ip", instanceIP)
|
||||
ui.Message(fmt.Sprintf("Detected instance IP: %s", instanceIP))
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func getInstanceIPAddress(c *Config, instance *compute.Instance) (address string, err error) {
|
||||
// Instance could have several network interfaces with different configuration each
|
||||
// Get all possible addresses for instance
|
||||
addrIPV4Internal, addrIPV4External, addrIPV6Addr, err := instanceAddresses(instance)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if c.UseIPv6 {
|
||||
if addrIPV6Addr != "" {
|
||||
return "[" + addrIPV6Addr + "]", nil
|
||||
}
|
||||
return "", errors.New("instance has no one IPv6 address")
|
||||
}
|
||||
|
||||
if c.UseInternalIP {
|
||||
if addrIPV4Internal != "" {
|
||||
return addrIPV4Internal, nil
|
||||
}
|
||||
return "", errors.New("instance has no one IPv4 internal address")
|
||||
}
|
||||
if addrIPV4External != "" {
|
||||
return addrIPV4External, nil
|
||||
}
|
||||
return "", errors.New("instance has no one IPv4 external address")
|
||||
}
|
||||
|
||||
func instanceAddresses(instance *compute.Instance) (ipV4Int, ipV4Ext, ipV6 string, err error) {
|
||||
if len(instance.NetworkInterfaces) == 0 {
|
||||
return "", "", "", errors.New("No one network interface found for an instance")
|
||||
}
|
||||
|
||||
var ipV4IntFound, ipV4ExtFound, ipV6Found bool
|
||||
for _, iface := range instance.NetworkInterfaces {
|
||||
if !ipV6Found && iface.PrimaryV6Address != nil {
|
||||
ipV6 = iface.PrimaryV6Address.Address
|
||||
ipV6Found = true
|
||||
}
|
||||
|
||||
if !ipV4IntFound && iface.PrimaryV4Address != nil {
|
||||
ipV4Int = iface.PrimaryV4Address.Address
|
||||
ipV4IntFound = true
|
||||
|
||||
if !ipV4ExtFound && iface.PrimaryV4Address.OneToOneNat != nil {
|
||||
ipV4Ext = iface.PrimaryV4Address.OneToOneNat.Address
|
||||
ipV4ExtFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if ipV6Found && ipV4IntFound && ipV4ExtFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !ipV4IntFound {
|
||||
// internal ipV4 address always should present
|
||||
return "", "", "", errors.New("No IPv4 internal address found. Bug?")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *StepInstanceInfo) Cleanup(state multistep.StateBag) {
|
||||
// no cleanup
|
||||
}
|
|
@ -1,67 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
||||
type StepTeardownInstance struct {
|
||||
SerialLogFile string
|
||||
}
|
||||
|
||||
func (s *StepTeardownInstance) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
sdk := state.Get("sdk").(*ycsdk.SDK)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
c := state.Get("config").(*Config)
|
||||
|
||||
instanceID := state.Get("instance_id").(string)
|
||||
|
||||
ui.Say("Stopping instance...")
|
||||
ctx, cancel := context.WithTimeout(ctx, c.StateTimeout)
|
||||
defer cancel()
|
||||
|
||||
if s.SerialLogFile != "" {
|
||||
err := writeSerialLogFile(ctx, state, s.SerialLogFile)
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
op, err := sdk.WrapOperation(sdk.Compute().Instance().Stop(ctx, &compute.StopInstanceRequest{
|
||||
InstanceId: instanceID,
|
||||
}))
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error stopping instance: %s", err))
|
||||
}
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error stopping instance: %s", err))
|
||||
}
|
||||
|
||||
ui.Say("Deleting instance...")
|
||||
op, err = sdk.WrapOperation(sdk.Compute().Instance().Delete(ctx, &compute.DeleteInstanceRequest{
|
||||
InstanceId: instanceID,
|
||||
}))
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error deleting instance: %s", err))
|
||||
}
|
||||
err = op.Wait(ctx)
|
||||
if err != nil {
|
||||
return StepHaltWithError(state, fmt.Errorf("Error deleting instance: %s", err))
|
||||
}
|
||||
|
||||
ui.Message("Instance has been deleted!")
|
||||
state.Put("instance_id", "")
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepTeardownInstance) Cleanup(state multistep.StateBag) {
|
||||
// no cleanup
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
func isalphanumeric(b byte) bool {
|
||||
if '0' <= b && b <= '9' {
|
||||
return true
|
||||
}
|
||||
if 'a' <= b && b <= 'z' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Clean up resource name by replacing invalid characters with "-"
|
||||
// and converting upper cases to lower cases
|
||||
func templateCleanResourceName(s string) string {
|
||||
if reImageFamily.MatchString(s) {
|
||||
return s
|
||||
}
|
||||
b := []byte(strings.ToLower(s))
|
||||
newb := make([]byte, len(b))
|
||||
for i := range newb {
|
||||
if isalphanumeric(b[i]) {
|
||||
newb[i] = b[i]
|
||||
} else {
|
||||
newb[i] = '-'
|
||||
}
|
||||
}
|
||||
return string(newb)
|
||||
}
|
||||
|
||||
var TemplateFuncs = template.FuncMap{
|
||||
"clean_resource_name": templateCleanResourceName,
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
{
|
||||
"id": "ajeboa0du6edu6m43c3t",
|
||||
"service_account_id": "ajeq7dsmihqple6761c5",
|
||||
"created_at": "2018-11-19T13:38:09Z",
|
||||
"description": "description",
|
||||
"key_algorithm": "RSA_4096",
|
||||
"public_key": "-----BEGIN PUBLIC KEY-----\nMIICCgKCAgEAo/s1lN5vFpFNJvS/l+yRilQHAPDeC3JqBwpLstbqJXW4kAUaKKoe\nxkIuJuPUKOUcd/JE3LXOEt/LOFb9mkCRdpjaIW7Jd5Fw0kTHIZ5rDoq7DZx0LV9b\nGJNskdccd6M6stb1GEqVuGpVcyXMCH8tMSG3c85DkcAg0cxXgyrirAzHMPiWSTpj\nJjICkxXRVj01Xq7dIDqL2LSMrZ2kLda5m+CnfscUbwnGRPPoEg20jLiEgBM2o43e\nhpWko1NStRR5fMQcQSUBbdtvbfPracjZz2/fq4fZfqlnObgq3WpYpdGynniLH3i5\nbxPM3ufYL3HY2w5aIOY6KIwMKLf3WYlug90ieviMYAvCukrCASwyqBQlt3MKCHlN\nIcebZXJDQ1VSBuEs+4qXYlhG1p+5C07zahzigNNTm6rEo47FFfClF04mv2uJN42F\nfWlEPR+V9JHBcfcBCdvyhiGzftl/vDo2NdO751ETIhyNKzxM/Ve2PR9h/qcuEatC\nLlXUA+40epNNHbSxAauxcngyrtkn7FZAEhdjyTtx46sELyb90Z56WgnbNUUGnsS/\nHBnBy5z8RyCmI5MjTC2NtplVqtAWkG+x59mU3GoCeuI8EaNtu2YPXhl1ovRkS4NB\n1G0F4c5FiJ27/E2MbNKlV5iw9ICcDforATYTeqiXbkkEKqIIiZYZWOsCAwEAAQ==\n-----END PUBLIC KEY-----\n",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIJKQIBAAKCAgEAo/s1lN5vFpFNJvS/l+yRilQHAPDeC3JqBwpLstbqJXW4kAUa\nKKoexkIuJuPUKOUcd/JE3LXOEt/LOFb9mkCRdpjaIW7Jd5Fw0kTHIZ5rDoq7DZx0\nLV9bGJNskdccd6M6stb1GEqVuGpVcyXMCH8tMSG3c85DkcAg0cxXgyrirAzHMPiW\nSTpjJjICkxXRVj01Xq7dIDqL2LSMrZ2kLda5m+CnfscUbwnGRPPoEg20jLiEgBM2\no43ehpWko1NStRR5fMQcQSUBbdtvbfPracjZz2/fq4fZfqlnObgq3WpYpdGynniL\nH3i5bxPM3ufYL3HY2w5aIOY6KIwMKLf3WYlug90ieviMYAvCukrCASwyqBQlt3MK\nCHlNIcebZXJDQ1VSBuEs+4qXYlhG1p+5C07zahzigNNTm6rEo47FFfClF04mv2uJ\nN42FfWlEPR+V9JHBcfcBCdvyhiGzftl/vDo2NdO751ETIhyNKzxM/Ve2PR9h/qcu\nEatCLlXUA+40epNNHbSxAauxcngyrtkn7FZAEhdjyTtx46sELyb90Z56WgnbNUUG\nnsS/HBnBy5z8RyCmI5MjTC2NtplVqtAWkG+x59mU3GoCeuI8EaNtu2YPXhl1ovRk\nS4NB1G0F4c5FiJ27/E2MbNKlV5iw9ICcDforATYTeqiXbkkEKqIIiZYZWOsCAwEA\nAQKCAgEAihT1L6CGhshf4VfjJfktLQBIzYAGWjlEEx2WVMgobtbMTWoedvOZ6nS8\nDD943d7ftBkr53aoSrhslcqazpNkaiuYMuLpf2fXSxhjXmnZ2Gr1zCZcpgBP40fw\n+nXbINswiHv98zCLFrljrwy63MTKtz6fDkM4HrlcaY3aezdXnG0+JnyNgKhL6VPf\nWx/aIPZ1xH8W8RabwCV4+JFwOLFBpoLsSBM3n7DpZhLE7r7ftEeEO5zyO5MxOL81\n3dpCIP1Wt7sj169jnrBTCpGFQJTC5Kxd+kDw4nmf1LjCT6RHdYo5ELyM2jl8XI6d\ny24LWxhQ9VUGjAGSI6aabodLH/hcOBB2wG1tnO+n5y85GnKKOJgxCxaj1yR/LAcT\nFvZgbDGwAMd7h7+fU46Yj5BILk6mRvBNL6Mk2VAlBzUatGduU+Xxha3JkGxIJY4G\np1qPLNiP7as90mXXMgNEtsP2zXtyi+9q7XBOBnfL3ftHWQmu7MKQCHIKcNRchFJ4\nS1LtndjXtNchzDhbXru2qsRiASmL9u4CgZn/lM3kDHs+v2JI+V8cPk5XZhoPrrpP\nZ0SPeoLZEJ5/TtlTWAXXqP6F24rziBqnEJgpNCkeBnQYx2Rs9OKVsrlDk8cf3KkL\nH8qQ/86HYz9cEtFnVKAYOV5GtQsJRyzipMy7R/cegdtWJ8ScuiECggEBANOT7lBX\nRYw+k53TRpkk7NlWuQogKKEQx4PEf8A6HQj3SseH8u+tt3HfTFJktzWs/9EQerLS\nJky9bSPxBvDq0Zfj+IPamiY+c2w5a9WbLxk8UHCaUHcSUeWoWQwmCZqzXeUNj9f5\nQOfF+ajsqhaXE68/HuIj+dgOOn/XYyqNkxlidXa9U3gUanuftwRSephsGcsaEGTe\nep2My4Jj3hPH/9Qoith0X18atRru6RanK63bDl0FqAU/1uUycQr+h0hEwQHWoRiq\nNVXI1uxfi5/2pxK0w1MOzZLitwEQ/veCv6CZwNPf1SW1U8j70SvKVR8Z7gGDIPjS\n8klW2Z9g6gxPQ1MCggEBAMZpBFa4mEnsmt+paEFCGUtoeBapjZF94PBtdxII/T5t\ne5z4Iz7RMl+ixLhNepQu+0t+v1iDVJgDJuUjCsSF69jEca7gzmsWhs9d+gDU5Knm\n18ChbQyeaDvmqINCs2t45pA/mVIQHbA8L8n/ToI5P63ZELDUFVzZo9kerZu1ALNB\nRoG0PhIHrGkZKwL8oE72nrZmWtfjROsZBhu7FqJ0i7va/6fgNMuMtBC/abOC7yVT\nir5XP+ZGF8XNyIZ3Ic0X8xc+XqagYsf+XobHGmbSct/ZaDP3g1z4B/7JZcbYjuTZ\nMJ3s5T+6l/qo0dfDuaVBJFJrnw8YfahX/Bn4OQ2TuQkCggEBALfhs5dDogA3Spg6\nTPtAalCh3IP+WxFQwfW1S8pHN4DZW7Z6YxsHgY2IIo7hVZFi35pVli3gEsVTRI2e\nJwgvLSWzTgNac+qVED+Y0C1/h7mI/+g9VX2HAIJ2g53ZWTOIfCxcUw3DZTOKjmbP\n+StU9hiy5SZpWfT6uMDu8xLCpHvFZI1kEi0koT78GlW5US8zlF8+Mc1YxnwzJ5QV\nM6dBhQhgi/t/eHvxfEECLrYvZ/jbj2otRk/5oczkv/ZsLCsVBiGQ5cXH+D6sJI6e\no3zNI3tQewmurd/hBmf4239FtUHhHwOFX3w8Uas1oB9M5Bn5sS7DRl67BzPSNaUc\n140HPl0CggEAX1+13TXoxog8vkzBt7TdUdlK+KHSUmCvEwObnAjEKxEXvZGt55FJ\n5JzqcSmVRcv7sgOgWRzwOg4x0S1yDJvPjiiH+SdJMkLm1KF4/pNXw7AagBdYwxsW\nQc0Trd0PQBcixa48tizXCJM16aSXCZQZXykbk9Su3C4mS8UqcNGmH4S+LrUErUgR\nAYg+m7XyHWMBUe6LtoEh7Nzfic76B2d8j/WqtPjaiAn/uJk6ZzcGW+v3op1wMvH4\nlXXg8XosvljH2qF5gCFSuo40xBbLQyfgXmg0Zd6Rv8velAQdr2MD9U/NxexNGsBI\nNA6YqF4GTECvBAuFrwz3wkdhAN7IFhWveQKCAQBdfdHB3D+m+b/hZoEIv0nPcgQf\ncCOPPNO/ufObjWed2jTL3RjoDT337Mp3mYkoP4GE9n6cl7mjlcrf7KQeRG8k35fv\n3nMoMOp21qj9J66UgGf1/RHsV/+ljcu87ggYDCVKd8uGzkspRIQIsD77He/TwZNa\nyWL4fa1EvRU6STwi7CZFfhWhMF3rBGAPshABoyJZh6Z14cioAKSR0Sl6XZ5dcB9B\naoJM8sISSlOqMIJyNnyMtdE55Ag+P7LyMe2grxlwVTv3h0o5mHSzWnjSHVYvN4q5\n6h5UUopLtyVMGCwOJz+zNT7zFqi4XIGU8a8Lg1iiKtfjgHB2X8ZWZuXBdrTj\n-----END PRIVATE KEY-----\n"
|
||||
}
|
|
@ -1,82 +0,0 @@
|
|||
// This terraform code allows you to quickly create an instance with the assigned service account and the necessary rights.
|
||||
// To start, it is required to specify two parameters: oauth token and folder id.
|
||||
//
|
||||
// $ terraform apply -var yc_token=<your_token_value> -var folder_id=<your_folder_id>
|
||||
//
|
||||
// After testing and completing work, just run the command to delete all provisioned cloud resources:
|
||||
//
|
||||
// $ terraform destroy -var yc_token=<your_token_value> -var folder_id=<your_folder_id>
|
||||
//
|
||||
|
||||
// Variables section
|
||||
variable "yc_token" {
|
||||
description = "Yandex.Cloud OAuth token"
|
||||
}
|
||||
|
||||
variable "folder_id" {
|
||||
description = "Folder ID to provision all cloud resources"
|
||||
}
|
||||
|
||||
variable "username" {
|
||||
default = "ubuntu"
|
||||
}
|
||||
|
||||
variable "path_to_ssh_public_key" {
|
||||
default = "~/.ssh/id_rsa.pub"
|
||||
}
|
||||
|
||||
// Provider section
|
||||
provider "yandex" {
|
||||
folder_id = var.folder_id
|
||||
token = var.yc_token
|
||||
zone = "ru-central1-a"
|
||||
}
|
||||
|
||||
// Main section
|
||||
data "yandex_compute_image" "ubuntu" {
|
||||
family = "ubuntu-1604-lts"
|
||||
}
|
||||
|
||||
resource "yandex_vpc_network" "this" {
|
||||
}
|
||||
|
||||
resource "yandex_vpc_subnet" "this" {
|
||||
network_id = yandex_vpc_network.this.id
|
||||
v4_cidr_blocks = ["192.168.86.0/24"]
|
||||
}
|
||||
|
||||
resource "yandex_compute_instance" "this" {
|
||||
service_account_id = yandex_iam_service_account.this.id
|
||||
boot_disk {
|
||||
initialize_params {
|
||||
image_id = data.yandex_compute_image.ubuntu.id
|
||||
}
|
||||
}
|
||||
network_interface {
|
||||
subnet_id = yandex_vpc_subnet.this.id
|
||||
nat = true
|
||||
}
|
||||
resources {
|
||||
cores = 1
|
||||
memory = 1
|
||||
core_fraction = 20
|
||||
}
|
||||
metadata = {
|
||||
ssh-keys = "${var.username}:${file(var.path_to_ssh_public_key)}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "yandex_iam_service_account" "this" {
|
||||
name = "test-sa-for-instance"
|
||||
}
|
||||
|
||||
resource "yandex_resourcemanager_folder_iam_member" "this" {
|
||||
folder_id = var.folder_id
|
||||
member = "serviceAccount:${yandex_iam_service_account.this.id}"
|
||||
role = "editor"
|
||||
}
|
||||
|
||||
// Output section
|
||||
output "result" {
|
||||
value = "\nuse ssh with login `ubuntu` to connect instance like:\n\n$ ssh -i ${var.path_to_ssh_public_key} -l ${var.username} ${yandex_compute_instance.this.network_interface[0].nat_ip_address}"
|
||||
}
|
|
@ -1,51 +0,0 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
||||
func StepHaltWithError(state multistep.StateBag, err error) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
state.Put("error", err)
|
||||
ui.Error(err.Error())
|
||||
return multistep.ActionHalt
|
||||
}
|
||||
|
||||
func toGigabytes(bytesCount int64) int {
|
||||
return int((datasize.ByteSize(bytesCount) * datasize.B).GBytes())
|
||||
}
|
||||
|
||||
func toBytes(gigabytesCount int) int64 {
|
||||
return int64((datasize.ByteSize(gigabytesCount) * datasize.GB).Bytes())
|
||||
}
|
||||
|
||||
func writeSerialLogFile(ctx context.Context, state multistep.StateBag, serialLogFile string) error {
|
||||
sdk := state.Get("sdk").(*ycsdk.SDK)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
instanceID, ok := state.GetOk("instance_id")
|
||||
|
||||
if !ok || instanceID.(string) == "" {
|
||||
return nil
|
||||
}
|
||||
ui.Say("Try get instance's serial port output and write to file " + serialLogFile)
|
||||
serialOutput, err := sdk.Compute().Instance().GetSerialPortOutput(ctx, &compute.GetInstanceSerialPortOutputRequest{
|
||||
InstanceId: instanceID.(string),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get serial port output for instance (id: %s): %s", instanceID, err)
|
||||
}
|
||||
if err := ioutil.WriteFile(serialLogFile, []byte(serialOutput.Contents), 0600); err != nil {
|
||||
return fmt.Errorf("Failed to write serial port output to file: %s", err)
|
||||
}
|
||||
ui.Message("Serial port output has been successfully written")
|
||||
return nil
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/version"
|
||||
packerVersion "github.com/hashicorp/packer/version"
|
||||
)
|
||||
|
||||
var YandexPluginVersion *version.PluginVersion
|
||||
|
||||
func init() {
|
||||
YandexPluginVersion = version.InitializePluginVersion(
|
||||
packerVersion.Version, packerVersion.VersionPrerelease)
|
||||
}
|
|
@ -20,14 +20,11 @@ import (
|
|||
nullbuilder "github.com/hashicorp/packer/builder/null"
|
||||
oneandonebuilder "github.com/hashicorp/packer/builder/oneandone"
|
||||
profitbricksbuilder "github.com/hashicorp/packer/builder/profitbricks"
|
||||
yandexbuilder "github.com/hashicorp/packer/builder/yandex"
|
||||
artificepostprocessor "github.com/hashicorp/packer/post-processor/artifice"
|
||||
checksumpostprocessor "github.com/hashicorp/packer/post-processor/checksum"
|
||||
compresspostprocessor "github.com/hashicorp/packer/post-processor/compress"
|
||||
manifestpostprocessor "github.com/hashicorp/packer/post-processor/manifest"
|
||||
shelllocalpostprocessor "github.com/hashicorp/packer/post-processor/shell-local"
|
||||
yandexexportpostprocessor "github.com/hashicorp/packer/post-processor/yandex-export"
|
||||
yandeximportpostprocessor "github.com/hashicorp/packer/post-processor/yandex-import"
|
||||
azuredtlartifactprovisioner "github.com/hashicorp/packer/provisioner/azure-dtlartifact"
|
||||
breakpointprovisioner "github.com/hashicorp/packer/provisioner/breakpoint"
|
||||
fileprovisioner "github.com/hashicorp/packer/provisioner/file"
|
||||
|
@ -53,7 +50,6 @@ var Builders = map[string]packersdk.Builder{
|
|||
"null": new(nullbuilder.Builder),
|
||||
"oneandone": new(oneandonebuilder.Builder),
|
||||
"profitbricks": new(profitbricksbuilder.Builder),
|
||||
"yandex": new(yandexbuilder.Builder),
|
||||
}
|
||||
|
||||
var Provisioners = map[string]packersdk.Provisioner{
|
||||
|
@ -71,13 +67,11 @@ var Provisioners = map[string]packersdk.Provisioner{
|
|||
}
|
||||
|
||||
var PostProcessors = map[string]packersdk.PostProcessor{
|
||||
"artifice": new(artificepostprocessor.PostProcessor),
|
||||
"checksum": new(checksumpostprocessor.PostProcessor),
|
||||
"compress": new(compresspostprocessor.PostProcessor),
|
||||
"manifest": new(manifestpostprocessor.PostProcessor),
|
||||
"shell-local": new(shelllocalpostprocessor.PostProcessor),
|
||||
"yandex-export": new(yandexexportpostprocessor.PostProcessor),
|
||||
"yandex-import": new(yandeximportpostprocessor.PostProcessor),
|
||||
"artifice": new(artificepostprocessor.PostProcessor),
|
||||
"checksum": new(checksumpostprocessor.PostProcessor),
|
||||
"compress": new(compresspostprocessor.PostProcessor),
|
||||
"manifest": new(manifestpostprocessor.PostProcessor),
|
||||
"shell-local": new(shelllocalpostprocessor.PostProcessor),
|
||||
}
|
||||
|
||||
var Datasources = map[string]packersdk.Datasource{}
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package command
|
||||
|
||||
import (
|
||||
convergeprovisioner "github.com/hashicorp/packer-plugin-converge/provisioner/converge"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
|
||||
// Previously core-bundled components, split into their own plugins but
|
||||
|
@ -23,6 +22,7 @@ import (
|
|||
chefclientprovisioner "github.com/hashicorp/packer-plugin-chef/provisioner/chef-client"
|
||||
chefsoloprovisioner "github.com/hashicorp/packer-plugin-chef/provisioner/chef-solo"
|
||||
cloudstackbuilder "github.com/hashicorp/packer-plugin-cloudstack/builder/cloudstack"
|
||||
convergeprovisioner "github.com/hashicorp/packer-plugin-converge/provisioner/converge"
|
||||
digitaloceanbuilder "github.com/hashicorp/packer-plugin-digitalocean/builder/digitalocean"
|
||||
digitaloceanimportpostprocessor "github.com/hashicorp/packer-plugin-digitalocean/post-processor/digitalocean-import"
|
||||
dockerbuilder "github.com/hashicorp/packer-plugin-docker/builder/docker"
|
||||
|
@ -73,6 +73,9 @@ import (
|
|||
vsphereisobuilder "github.com/hashicorp/packer-plugin-vsphere/builder/vsphere/iso"
|
||||
vspherepostprocessor "github.com/hashicorp/packer-plugin-vsphere/post-processor/vsphere"
|
||||
vspheretemplatepostprocessor "github.com/hashicorp/packer-plugin-vsphere/post-processor/vsphere-template"
|
||||
yandexbuilder "github.com/hashicorp/packer-plugin-yandex/builder/yandex"
|
||||
yandexexportpostprocessor "github.com/hashicorp/packer-plugin-yandex/post-processor/yandex-export"
|
||||
yandeximportpostprocessor "github.com/hashicorp/packer-plugin-yandex/post-processor/yandex-import"
|
||||
)
|
||||
|
||||
// VendoredDatasources are datasource components that were once bundled with the
|
||||
|
@ -129,6 +132,7 @@ var VendoredBuilders = map[string]packersdk.Builder{
|
|||
"osc-bsusurrogate": new(oscbsusurrogatebuilder.Builder),
|
||||
"osc-bsuvolume": new(oscbsuvolumebuilder.Builder),
|
||||
"osc-chroot": new(oscchrootbuilder.Builder),
|
||||
"yandex": new(yandexbuilder.Builder),
|
||||
}
|
||||
|
||||
// VendoredProvisioners are provisioner components that were once bundled with the
|
||||
|
@ -161,6 +165,8 @@ var VendoredPostProcessors = map[string]packersdk.PostProcessor{
|
|||
"vagrant-cloud": new(vagrantcloudpostprocessor.PostProcessor),
|
||||
"vsphere-template": new(vspheretemplatepostprocessor.PostProcessor),
|
||||
"vsphere": new(vspherepostprocessor.PostProcessor),
|
||||
"yandex-export": new(yandexexportpostprocessor.PostProcessor),
|
||||
"yandex-import": new(yandeximportpostprocessor.PostProcessor),
|
||||
}
|
||||
|
||||
// Upon init lets load up any plugins that were vendored manually into the default
|
||||
|
|
9
go.mod
9
go.mod
|
@ -10,20 +10,15 @@ require (
|
|||
github.com/Azure/go-autorest/autorest/date v0.2.0
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0
|
||||
github.com/approvals/go-approval-tests v0.0.0-20160714161514-ad96e53bea43
|
||||
github.com/aws/aws-sdk-go v1.38.22
|
||||
github.com/biogo/hts v0.0.0-20160420073057-50da7d4131a3
|
||||
github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee
|
||||
github.com/cheggaaa/pb v1.0.27
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible
|
||||
github.com/dsnet/compress v0.0.1
|
||||
github.com/exoscale/packer-plugin-exoscale v0.1.1
|
||||
github.com/go-resty/resty/v2 v2.3.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/google/go-cmp v0.5.5
|
||||
github.com/google/go-github/v33 v33.0.1-0.20210113204525-9318e629ec69
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0
|
||||
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026
|
||||
github.com/hashicorp/go-checkpoint v0.0.0-20171009173528-1545e56e46de
|
||||
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
||||
|
@ -65,6 +60,7 @@ require (
|
|||
github.com/hashicorp/packer-plugin-virtualbox v0.0.1
|
||||
github.com/hashicorp/packer-plugin-vmware v0.0.1
|
||||
github.com/hashicorp/packer-plugin-vsphere v0.0.1
|
||||
github.com/hashicorp/packer-plugin-yandex v0.0.4
|
||||
github.com/klauspost/pgzip v0.0.0-20151221113845-47f36e165cec
|
||||
github.com/masterzen/winrm v0.0.0-20201030141608-56ca5c5f2380
|
||||
github.com/mattn/go-tty v0.0.0-20191112051231-74040eebce08
|
||||
|
@ -80,8 +76,6 @@ require (
|
|||
github.com/shirou/gopsutil v3.21.1+incompatible
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/ulikunitz/xz v0.5.6
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20200915125933-33de72a328bd
|
||||
github.com/yandex-cloud/go-sdk v0.0.0-20200921111412-ef15ded2014c
|
||||
github.com/zclconf/go-cty v1.8.2
|
||||
github.com/zclconf/go-cty-yaml v1.0.1
|
||||
golang.org/x/crypto v0.0.0-20210415154028-4f45737414dc
|
||||
|
@ -90,7 +84,6 @@ require (
|
|||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/tools v0.1.0
|
||||
google.golang.org/grpc v1.36.1
|
||||
)
|
||||
|
||||
go 1.16
|
||||
|
|
24
go.sum
24
go.sum
|
@ -181,8 +181,9 @@ github.com/biogo/hts v0.0.0-20160420073057-50da7d4131a3 h1:3b+p838vN4sc37brz9W2H
|
|||
github.com/biogo/hts v0.0.0-20160420073057-50da7d4131a3/go.mod h1:YOY5xnRf7Jz2SZCLSKgVfyqNzbRgyTznM3HyDqQMxcU=
|
||||
github.com/bmatcuk/doublestar v1.1.5 h1:2bNwBOmhyFEFcoB3tGvTD5xanq+4kyOZlB8wFYbMjkk=
|
||||
github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
|
||||
github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee h1:BnPxIde0gjtTnc9Er7cxvBk8DHLWhEux0SxayC8dP6I=
|
||||
github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
||||
github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2 h1:t8KYCwSKsOEZBFELI4Pn/phbp38iJ1RRAkDFNin1aak=
|
||||
github.com/c2h5oh/datasize v0.0.0-20200825124411-48ed595a09d2/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cheggaaa/pb v1.0.27 h1:wIkZHkNfC7R6GI5w7l/PdAdzXzlrbcI3p8OAlnkTsnc=
|
||||
|
@ -280,6 +281,7 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I
|
|||
github.com/go-ini/ini v1.62.0 h1:7VJT/ZXjzqSrvtraFp4ONq80hTcRQth1c9ZnQ3uNQvU=
|
||||
github.com/go-ini/ini v1.62.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
|
@ -294,8 +296,9 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA
|
|||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48/go.mod h1:dZGr0i9PLlaaTD4H/hoZIDjQ+r6xq8mgbRzHZf7f2J8=
|
||||
github.com/go-resty/resty/v2 v2.3.0 h1:JOOeAvjSlapTT92p8xiS19Zxev1neGikoHsXJeOq8So=
|
||||
github.com/go-resty/resty/v2 v2.3.0/go.mod h1:UpN9CgLZNsv4e9XG50UU8xdI0F43UQ4HmxLBDwaroHU=
|
||||
github.com/go-resty/resty/v2 v2.6.0 h1:joIR5PNLM2EFqqESUjCMGXrWmXNHEU9CEiK813oKYS4=
|
||||
github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w/BIH7cC3Q=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
|
@ -418,8 +421,9 @@ github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORR
|
|||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0 h1:THDBEeQ9xZ8JEaCLyLQqXMMdRqNr0QAUJTIkQAUtFjg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 h1:FlFbCRLd5Jr4iYXZufAvgWN6Ao0JrI5chLINnUXDDr0=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/hako/durafmt v0.0.0-20200710122514-c0fb7b4da026 h1:BpJ2o0OR5FV7vrkDYfXYVJQeMNWa8RhklZOpW2ITAIQ=
|
||||
|
@ -598,6 +602,8 @@ github.com/hashicorp/packer-plugin-vmware v0.0.1 h1:jRQAdjHwg3zeCBb52KoZsuxugrHc
|
|||
github.com/hashicorp/packer-plugin-vmware v0.0.1/go.mod h1:NsiT4IOeDKf/aszQNX+/B1xHrfBR3RdUM3sSqANgNec=
|
||||
github.com/hashicorp/packer-plugin-vsphere v0.0.1 h1:4SUmRP+mGpBJHp6dLL4dmBCC+yDseTktb9YNLj11mVI=
|
||||
github.com/hashicorp/packer-plugin-vsphere v0.0.1/go.mod h1:XMhsLDDT7sD2BWaruLvGPynnn4IqdbrfvuKhb1GK1RI=
|
||||
github.com/hashicorp/packer-plugin-yandex v0.0.4 h1:ev5tPBqR9idsYOY96J0M/UGeuBJFEHhTolzt0JnJQYs=
|
||||
github.com/hashicorp/packer-plugin-yandex v0.0.4/go.mod h1:AFpOpUZMqgn+4RhAfkj8xFgpFJsEtG60dGB0MiFI9cQ=
|
||||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hashicorp/serf v0.9.2 h1:yJoyfZXo4Pk2p/M/viW+YLibBFiIbKoP79gu7kDAFP0=
|
||||
github.com/hashicorp/serf v0.9.2/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
||||
|
@ -925,10 +931,13 @@ github.com/xanzy/go-cloudstack v2.4.1+incompatible h1:Oc4xa2+I94h1g/QJ+nHoq597nJ
|
|||
github.com/xanzy/go-cloudstack v2.4.1+incompatible/go.mod h1:s3eL3z5pNXF5FVybcT+LIVdId8pYn709yv6v5mrkrQE=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20200915125933-33de72a328bd h1:o4pvS7D4OErKOM6y+/q6IfOa65OaentKbEDh1ABirE8=
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20200915125933-33de72a328bd/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
|
||||
github.com/yandex-cloud/go-sdk v0.0.0-20200921111412-ef15ded2014c h1:LJrgyICodRAgtBvOO2eCbhDDIoaJgeLa1tGQecqW9ac=
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20210413095726-6b0dcd341e19/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20210419102011-ea71516bb3f7 h1:qI0iykKTUTAFQ34/7cpnFZefKJTa5vprwPs8ncTkFGQ=
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20210419102011-ea71516bb3f7/go.mod h1:HEUYX/p8966tMUHHT+TsS0hF/Ca/NYwqprC5WXSDMfE=
|
||||
github.com/yandex-cloud/go-sdk v0.0.0-20200921111412-ef15ded2014c/go.mod h1:Zn/U9YKH0w8n83ezLps5eB6Jftc4gSoZWxVR8hgXgoY=
|
||||
github.com/yandex-cloud/go-sdk v0.0.0-20210413100926-1c3eb10c58d7 h1:uPMQzYjpkRcn7oWGvFpMKOwgwO4gUm3v7dITULSYw6s=
|
||||
github.com/yandex-cloud/go-sdk v0.0.0-20210413100926-1c3eb10c58d7/go.mod h1:cxZ4BdZRJC1XMqb7x09H26zzy6NB9uL9SfcwqIvYmyw=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
|
@ -991,6 +1000,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -1053,6 +1063,7 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200320220750-118fecf932d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
|
@ -1068,6 +1079,7 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210415231046-e915ea6b2b7d h1:BgJvlyh+UqCUaPlscHJ+PN8GcpfrFdr7NHjd1JL0+Gs=
|
||||
golang.org/x/net v0.0.0-20210415231046-e915ea6b2b7d/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
@ -1139,6 +1151,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -1299,6 +1312,7 @@ google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfG
|
|||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200323114720-3f67cca34472/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.post-processor.yandex-export"
|
||||
|
||||
type Artifact struct {
|
||||
paths []string
|
||||
urls []string
|
||||
}
|
||||
|
||||
func (*Artifact) BuilderId() string {
|
||||
return BuilderId
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return a.urls[0]
|
||||
}
|
||||
|
||||
func (a *Artifact) Files() []string {
|
||||
pathsCopy := make([]string, len(a.paths))
|
||||
copy(pathsCopy, a.paths)
|
||||
return pathsCopy
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("Exported artifacts in: %s", a.paths)
|
||||
}
|
||||
|
||||
func (*Artifact) State(name string) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
return nil
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
//go:generate packer-sdc struct-markdown
|
||||
package yandexexport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
type ExchangeConfig struct {
|
||||
// Service Account ID with proper permission to modify an instance, create and attach disk and
|
||||
// make upload to specific Yandex Object Storage paths.
|
||||
ServiceAccountID string `mapstructure:"service_account_id" required:"true"`
|
||||
}
|
||||
|
||||
func (c *ExchangeConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
if c.ServiceAccountID == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, fmt.Errorf("service_account_id must be specified"))
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
|
@ -1,345 +0,0 @@
|
|||
//go:generate packer-sdc struct-markdown
|
||||
//go:generate packer-sdc mapstructure-to-hcl2 -type Config
|
||||
|
||||
package yandexexport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer/builder/file"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
"github.com/hashicorp/packer/post-processor/artifice"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStorageEndpoint = "storage.yandexcloud.net"
|
||||
defaultStorageRegion = "ru-central1"
|
||||
defaultSourceImageFamily = "ubuntu-1604-lts"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
yandex.AccessConfig `mapstructure:",squash"`
|
||||
yandex.CommonConfig `mapstructure:",squash"`
|
||||
ExchangeConfig `mapstructure:",squash"`
|
||||
communicator.SSH `mapstructure:",squash"`
|
||||
communicator.Config `mapstructure:"-"`
|
||||
|
||||
// List of paths to Yandex Object Storage where exported image will be uploaded.
|
||||
// Please be aware that use of space char inside path not supported.
|
||||
// Also this param support [build](/docs/templates/legacy_json_templates/engine) template function.
|
||||
// Check available template data for [Yandex](/docs/builders/yandex#build-template-data) builder.
|
||||
// Paths to Yandex Object Storage where exported image will be uploaded.
|
||||
Paths []string `mapstructure:"paths" required:"true"`
|
||||
|
||||
// The ID of the folder containing the source image. Default `standard-images`.
|
||||
SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"`
|
||||
// The source image family to start export process. Default `ubuntu-1604-lts`.
|
||||
// Image must contains utils or supported package manager: `apt` or `yum` -
|
||||
// requires `root` or `sudo` without password.
|
||||
// Utils: `qemu-img`, `aws`. The `qemu-img` utility requires `root` user or
|
||||
// `sudo` access without password.
|
||||
SourceImageFamily string `mapstructure:"source_image_family" required:"false"`
|
||||
// The source image ID to use to create the new image from. Just one of a source_image_id or
|
||||
// source_image_family must be specified.
|
||||
SourceImageID string `mapstructure:"source_image_id" required:"false"`
|
||||
// The extra size of the source disk in GB. This defaults to `0GB`.
|
||||
// Requires `losetup` utility on the instance.
|
||||
// > **Careful!** Increases payment cost.
|
||||
// > See [perfomance](https://cloud.yandex.com/docs/compute/concepts/disk#performance).
|
||||
SourceDiskExtraSize int `mapstructure:"source_disk_extra_size" required:"false"`
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
|
||||
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||
err := config.Decode(&p.config, &config.DecodeOpts{
|
||||
PluginType: BuilderId,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &p.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"paths",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
var errs *packersdk.MultiError
|
||||
|
||||
errs = packersdk.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...)
|
||||
|
||||
// Set defaults.
|
||||
if p.config.DiskSizeGb == 0 {
|
||||
p.config.DiskSizeGb = 100
|
||||
}
|
||||
if p.config.SSH.SSHUsername == "" {
|
||||
p.config.SSH.SSHUsername = "ubuntu"
|
||||
}
|
||||
p.config.Config = communicator.Config{
|
||||
Type: "ssh",
|
||||
SSH: p.config.SSH,
|
||||
}
|
||||
errs = packersdk.MultiErrorAppend(errs, p.config.Config.Prepare(&p.config.ctx)...)
|
||||
|
||||
if p.config.SourceImageID == "" {
|
||||
if p.config.SourceImageFamily == "" {
|
||||
p.config.SourceImageFamily = defaultSourceImageFamily
|
||||
}
|
||||
if p.config.SourceImageFolderID == "" {
|
||||
p.config.SourceImageFolderID = yandex.StandardImagesFolderID
|
||||
}
|
||||
}
|
||||
if p.config.SourceDiskExtraSize < 0 {
|
||||
errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_disk_extra_size must be greater than zero"))
|
||||
}
|
||||
|
||||
errs = p.config.CommonConfig.Prepare(errs)
|
||||
errs = p.config.ExchangeConfig.Prepare(errs)
|
||||
|
||||
if len(p.config.Paths) == 0 {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, fmt.Errorf("paths must be specified"))
|
||||
}
|
||||
|
||||
// Validate templates in 'paths'
|
||||
for _, path := range p.config.Paths {
|
||||
if err = interpolate.Validate(path, &p.config.ctx); err != nil {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, fmt.Errorf("Error parsing one of 'paths' template: %s", err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
// Due to the fact that now it's impossible to go to the object storage
|
||||
// through the internal network - we need access
|
||||
// to the global Internet: either through ipv4 or ipv6
|
||||
// TODO: delete this when access appears
|
||||
if p.config.UseIPv4Nat == false && p.config.UseIPv6 == false {
|
||||
log.Printf("[DEBUG] Force use IPv4")
|
||||
p.config.UseIPv4Nat = true
|
||||
}
|
||||
p.config.Preemptible = true //? safety
|
||||
|
||||
if p.config.Labels == nil {
|
||||
p.config.Labels = make(map[string]string)
|
||||
}
|
||||
if _, ok := p.config.Labels["role"]; !ok {
|
||||
p.config.Labels["role"] = "exporter"
|
||||
}
|
||||
if _, ok := p.config.Labels["target"]; !ok {
|
||||
p.config.Labels["target"] = "object-storage"
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifact packersdk.Artifact) (packersdk.Artifact, bool, bool, error) {
|
||||
imageID := ""
|
||||
switch artifact.BuilderId() {
|
||||
case yandex.BuilderID, artifice.BuilderId:
|
||||
imageID = artifact.State("ImageID").(string)
|
||||
case file.BuilderId:
|
||||
fileName := artifact.Files()[0]
|
||||
if content, err := ioutil.ReadFile(fileName); err == nil {
|
||||
imageID = strings.TrimSpace(string(content))
|
||||
} else {
|
||||
return nil, false, false, err
|
||||
}
|
||||
default:
|
||||
err := fmt.Errorf(
|
||||
"Unknown artifact type: %s\nCan only export from Yandex Cloud builder artifact or File builder or Artifice post-processor artifact.",
|
||||
artifact.BuilderId())
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
// prepare and render values
|
||||
var generatedData map[interface{}]interface{}
|
||||
stateData := artifact.State("generated_data")
|
||||
if stateData != nil {
|
||||
// Make sure it's not a nil map so we can assign to it later.
|
||||
generatedData = stateData.(map[interface{}]interface{})
|
||||
}
|
||||
// If stateData has a nil map generatedData will be nil
|
||||
// and we need to make sure it's not
|
||||
if generatedData == nil {
|
||||
generatedData = make(map[interface{}]interface{})
|
||||
}
|
||||
p.config.ctx.Data = generatedData
|
||||
|
||||
var err error
|
||||
// Render this key since we didn't in the configure phase
|
||||
for i, path := range p.config.Paths {
|
||||
p.config.Paths[i], err = interpolate.Render(path, &p.config.ctx)
|
||||
if err != nil {
|
||||
return nil, false, false, fmt.Errorf("Error rendering one of 'path' template: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Rendered path items: %v", p.config.Paths)
|
||||
|
||||
ui.Say(fmt.Sprintf("Exporting image %v to destination: %v", imageID, p.config.Paths))
|
||||
|
||||
driver, err := yandex.NewDriverYC(ui, &p.config.AccessConfig)
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
imageDescription, err := driver.SDK().Compute().Image().Get(ctx, &compute.GetImageRequest{
|
||||
ImageId: imageID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
p.config.DiskConfig.DiskSizeGb = chooseBetterDiskSize(ctx, int(imageDescription.GetMinDiskSize()), p.config.DiskConfig.DiskSizeGb)
|
||||
|
||||
// Set up exporter instance configuration.
|
||||
exporterName := strings.ToLower(fmt.Sprintf("%s-exporter", artifact.Id()))
|
||||
yandexConfig := ycSaneDefaults(&p.config, nil)
|
||||
if yandexConfig.InstanceConfig.InstanceName == "" {
|
||||
yandexConfig.InstanceConfig.InstanceName = exporterName
|
||||
}
|
||||
if yandexConfig.DiskName == "" {
|
||||
yandexConfig.DiskName = exporterName
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Validating service_account_id: '%s'...", yandexConfig.ServiceAccountID))
|
||||
if err := validateServiceAccount(ctx, driver.SDK(), yandexConfig.ServiceAccountID); err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
// Set up the state.
|
||||
state := new(multistep.BasicStateBag)
|
||||
state.Put("config", &yandexConfig)
|
||||
state.Put("driver", driver)
|
||||
state.Put("sdk", driver.SDK())
|
||||
state.Put("ui", ui)
|
||||
|
||||
// Build the steps.
|
||||
steps := []multistep.Step{
|
||||
&StepCreateS3Keys{
|
||||
ServiceAccountID: p.config.ServiceAccountID,
|
||||
Paths: p.config.Paths,
|
||||
},
|
||||
&yandex.StepCreateSSHKey{
|
||||
Debug: p.config.PackerDebug,
|
||||
DebugKeyPath: fmt.Sprintf("yc_export_pp_%s.pem", p.config.PackerBuildName),
|
||||
},
|
||||
&yandex.StepCreateInstance{
|
||||
Debug: p.config.PackerDebug,
|
||||
SerialLogFile: yandexConfig.SerialLogFile,
|
||||
GeneratedData: &packerbuilderdata.GeneratedData{State: state},
|
||||
},
|
||||
new(yandex.StepInstanceInfo),
|
||||
&communicator.StepConnect{
|
||||
Config: &yandexConfig.Communicator,
|
||||
Host: yandex.CommHost,
|
||||
SSHConfig: yandexConfig.Communicator.SSHConfigFunc(),
|
||||
},
|
||||
&StepAttachDisk{
|
||||
CommonConfig: p.config.CommonConfig,
|
||||
ImageID: imageID,
|
||||
ExtraSize: p.config.SourceDiskExtraSize,
|
||||
},
|
||||
new(StepUploadSecrets),
|
||||
new(StepPrepareTools),
|
||||
&StepDump{
|
||||
ExtraSize: p.config.SourceDiskExtraSize != 0,
|
||||
SizeLimit: imageDescription.GetMinDiskSize(),
|
||||
},
|
||||
&StepUploadToS3{
|
||||
Paths: p.config.Paths,
|
||||
},
|
||||
&yandex.StepTeardownInstance{
|
||||
SerialLogFile: yandexConfig.SerialLogFile,
|
||||
},
|
||||
&commonsteps.StepCleanupTempKeys{Comm: &yandexConfig.Communicator},
|
||||
}
|
||||
|
||||
// Run the steps.
|
||||
p.runner = commonsteps.NewRunner(steps, p.config.PackerConfig, ui)
|
||||
p.runner.Run(ctx, state)
|
||||
if rawErr, ok := state.GetOk("error"); ok {
|
||||
return nil, false, false, rawErr.(error)
|
||||
}
|
||||
|
||||
result := &Artifact{
|
||||
paths: p.config.Paths,
|
||||
urls: formUrls(p.config.Paths),
|
||||
}
|
||||
|
||||
return result, false, false, nil
|
||||
}
|
||||
|
||||
func ycSaneDefaults(c *Config, md map[string]string) yandex.Config {
|
||||
yandexConfig := yandex.Config{
|
||||
CommonConfig: c.CommonConfig,
|
||||
AccessConfig: c.AccessConfig,
|
||||
Communicator: c.Config,
|
||||
}
|
||||
if yandexConfig.Metadata == nil {
|
||||
yandexConfig.Metadata = md
|
||||
} else {
|
||||
for k, v := range md {
|
||||
yandexConfig.Metadata[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
yandexConfig.SourceImageFamily = c.SourceImageFamily
|
||||
yandexConfig.SourceImageFolderID = c.SourceImageFolderID
|
||||
yandexConfig.SourceImageID = c.SourceImageID
|
||||
yandexConfig.ServiceAccountID = c.ServiceAccountID
|
||||
|
||||
return yandexConfig
|
||||
}
|
||||
|
||||
func formUrls(paths []string) []string {
|
||||
result := []string{}
|
||||
for _, path := range paths {
|
||||
url := fmt.Sprintf("https://%s/%s", defaultStorageEndpoint, strings.TrimPrefix(path, "s3://"))
|
||||
result = append(result, url)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func validateServiceAccount(ctx context.Context, ycsdk *ycsdk.SDK, serviceAccountID string) error {
|
||||
_, err := ycsdk.IAM().ServiceAccount().Get(ctx, &iam.GetServiceAccountRequest{
|
||||
ServiceAccountId: serviceAccountID,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func chooseBetterDiskSize(ctx context.Context, minSizeBytes, oldSizeGB int) int {
|
||||
max := math.Max(float64(minSizeBytes), float64((datasize.GB * datasize.ByteSize(oldSizeGB)).Bytes()))
|
||||
return int(math.Ceil(datasize.ByteSize(max).GBytes()))
|
||||
}
|
|
@ -1,186 +0,0 @@
|
|||
// Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT.
|
||||
|
||||
package yandexexport
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
Endpoint *string `mapstructure:"endpoint" required:"false" cty:"endpoint" hcl:"endpoint"`
|
||||
ServiceAccountKeyFile *string `mapstructure:"service_account_key_file" required:"false" cty:"service_account_key_file" hcl:"service_account_key_file"`
|
||||
Token *string `mapstructure:"token" required:"true" cty:"token" hcl:"token"`
|
||||
MaxRetries *int `mapstructure:"max_retries" cty:"max_retries" hcl:"max_retries"`
|
||||
SerialLogFile *string `mapstructure:"serial_log_file" required:"false" cty:"serial_log_file" hcl:"serial_log_file"`
|
||||
StateTimeout *string `mapstructure:"state_timeout" required:"false" cty:"state_timeout" hcl:"state_timeout"`
|
||||
InstanceCores *int `mapstructure:"instance_cores" required:"false" cty:"instance_cores" hcl:"instance_cores"`
|
||||
InstanceGpus *int `mapstructure:"instance_gpus" required:"false" cty:"instance_gpus" hcl:"instance_gpus"`
|
||||
InstanceMemory *int `mapstructure:"instance_mem_gb" required:"false" cty:"instance_mem_gb" hcl:"instance_mem_gb"`
|
||||
InstanceName *string `mapstructure:"instance_name" required:"false" cty:"instance_name" hcl:"instance_name"`
|
||||
PlatformID *string `mapstructure:"platform_id" required:"false" cty:"platform_id" hcl:"platform_id"`
|
||||
Labels map[string]string `mapstructure:"labels" required:"false" cty:"labels" hcl:"labels"`
|
||||
Metadata map[string]string `mapstructure:"metadata" required:"false" cty:"metadata" hcl:"metadata"`
|
||||
MetadataFromFile map[string]string `mapstructure:"metadata_from_file" cty:"metadata_from_file" hcl:"metadata_from_file"`
|
||||
Preemptible *bool `mapstructure:"preemptible" cty:"preemptible" hcl:"preemptible"`
|
||||
DiskName *string `mapstructure:"disk_name" required:"false" cty:"disk_name" hcl:"disk_name"`
|
||||
DiskSizeGb *int `mapstructure:"disk_size_gb" required:"false" cty:"disk_size_gb" hcl:"disk_size_gb"`
|
||||
DiskType *string `mapstructure:"disk_type" required:"false" cty:"disk_type" hcl:"disk_type"`
|
||||
DiskLabels map[string]string `mapstructure:"disk_labels" required:"false" cty:"disk_labels" hcl:"disk_labels"`
|
||||
SubnetID *string `mapstructure:"subnet_id" required:"false" cty:"subnet_id" hcl:"subnet_id"`
|
||||
Zone *string `mapstructure:"zone" required:"false" cty:"zone" hcl:"zone"`
|
||||
UseIPv4Nat *bool `mapstructure:"use_ipv4_nat" required:"false" cty:"use_ipv4_nat" hcl:"use_ipv4_nat"`
|
||||
UseIPv6 *bool `mapstructure:"use_ipv6" required:"false" cty:"use_ipv6" hcl:"use_ipv6"`
|
||||
UseInternalIP *bool `mapstructure:"use_internal_ip" required:"false" cty:"use_internal_ip" hcl:"use_internal_ip"`
|
||||
FolderID *string `mapstructure:"folder_id" required:"true" cty:"folder_id" hcl:"folder_id"`
|
||||
ServiceAccountID *string `mapstructure:"service_account_id" required:"true" cty:"service_account_id" hcl:"service_account_id"`
|
||||
SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"`
|
||||
SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"`
|
||||
SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"`
|
||||
SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"`
|
||||
SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"`
|
||||
SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"`
|
||||
SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"`
|
||||
SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"`
|
||||
SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"`
|
||||
SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"`
|
||||
SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"`
|
||||
SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"`
|
||||
SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"`
|
||||
SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"`
|
||||
SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"`
|
||||
SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"`
|
||||
SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"`
|
||||
SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"`
|
||||
SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"`
|
||||
SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"`
|
||||
SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"`
|
||||
SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"`
|
||||
SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"`
|
||||
SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"`
|
||||
SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"`
|
||||
SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"`
|
||||
SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"`
|
||||
SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"`
|
||||
SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"`
|
||||
SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"`
|
||||
SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"`
|
||||
SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"`
|
||||
SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"`
|
||||
SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"`
|
||||
Config *communicator.FlatConfig `mapstructure:"-" cty:"-" hcl:"-"`
|
||||
Paths []string `mapstructure:"paths" required:"true" cty:"paths" hcl:"paths"`
|
||||
SourceImageFolderID *string `mapstructure:"source_image_folder_id" required:"false" cty:"source_image_folder_id" hcl:"source_image_folder_id"`
|
||||
SourceImageFamily *string `mapstructure:"source_image_family" required:"false" cty:"source_image_family" hcl:"source_image_family"`
|
||||
SourceImageID *string `mapstructure:"source_image_id" required:"false" cty:"source_image_id" hcl:"source_image_id"`
|
||||
SourceDiskExtraSize *int `mapstructure:"source_disk_extra_size" required:"false" cty:"source_disk_extra_size" hcl:"source_disk_extra_size"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"endpoint": &hcldec.AttrSpec{Name: "endpoint", Type: cty.String, Required: false},
|
||||
"service_account_key_file": &hcldec.AttrSpec{Name: "service_account_key_file", Type: cty.String, Required: false},
|
||||
"token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false},
|
||||
"max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false},
|
||||
"serial_log_file": &hcldec.AttrSpec{Name: "serial_log_file", Type: cty.String, Required: false},
|
||||
"state_timeout": &hcldec.AttrSpec{Name: "state_timeout", Type: cty.String, Required: false},
|
||||
"instance_cores": &hcldec.AttrSpec{Name: "instance_cores", Type: cty.Number, Required: false},
|
||||
"instance_gpus": &hcldec.AttrSpec{Name: "instance_gpus", Type: cty.Number, Required: false},
|
||||
"instance_mem_gb": &hcldec.AttrSpec{Name: "instance_mem_gb", Type: cty.Number, Required: false},
|
||||
"instance_name": &hcldec.AttrSpec{Name: "instance_name", Type: cty.String, Required: false},
|
||||
"platform_id": &hcldec.AttrSpec{Name: "platform_id", Type: cty.String, Required: false},
|
||||
"labels": &hcldec.AttrSpec{Name: "labels", Type: cty.Map(cty.String), Required: false},
|
||||
"metadata": &hcldec.AttrSpec{Name: "metadata", Type: cty.Map(cty.String), Required: false},
|
||||
"metadata_from_file": &hcldec.AttrSpec{Name: "metadata_from_file", Type: cty.Map(cty.String), Required: false},
|
||||
"preemptible": &hcldec.AttrSpec{Name: "preemptible", Type: cty.Bool, Required: false},
|
||||
"disk_name": &hcldec.AttrSpec{Name: "disk_name", Type: cty.String, Required: false},
|
||||
"disk_size_gb": &hcldec.AttrSpec{Name: "disk_size_gb", Type: cty.Number, Required: false},
|
||||
"disk_type": &hcldec.AttrSpec{Name: "disk_type", Type: cty.String, Required: false},
|
||||
"disk_labels": &hcldec.AttrSpec{Name: "disk_labels", Type: cty.Map(cty.String), Required: false},
|
||||
"subnet_id": &hcldec.AttrSpec{Name: "subnet_id", Type: cty.String, Required: false},
|
||||
"zone": &hcldec.AttrSpec{Name: "zone", Type: cty.String, Required: false},
|
||||
"use_ipv4_nat": &hcldec.AttrSpec{Name: "use_ipv4_nat", Type: cty.Bool, Required: false},
|
||||
"use_ipv6": &hcldec.AttrSpec{Name: "use_ipv6", Type: cty.Bool, Required: false},
|
||||
"use_internal_ip": &hcldec.AttrSpec{Name: "use_internal_ip", Type: cty.Bool, Required: false},
|
||||
"folder_id": &hcldec.AttrSpec{Name: "folder_id", Type: cty.String, Required: false},
|
||||
"service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false},
|
||||
"ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false},
|
||||
"ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false},
|
||||
"ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false},
|
||||
"temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false},
|
||||
"ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false},
|
||||
"ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false},
|
||||
"ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false},
|
||||
"ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false},
|
||||
"ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false},
|
||||
"ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false},
|
||||
"ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false},
|
||||
"ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false},
|
||||
"ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false},
|
||||
"ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false},
|
||||
"ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false},
|
||||
"ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false},
|
||||
"ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false},
|
||||
"ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false},
|
||||
"ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false},
|
||||
"ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false},
|
||||
"ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false},
|
||||
"ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false},
|
||||
"ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false},
|
||||
"ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false},
|
||||
"ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false},
|
||||
"-": &hcldec.BlockSpec{TypeName: "-", Nested: hcldec.ObjectSpec((*communicator.FlatConfig)(nil).HCL2Spec())},
|
||||
"paths": &hcldec.AttrSpec{Name: "paths", Type: cty.List(cty.String), Required: false},
|
||||
"source_image_folder_id": &hcldec.AttrSpec{Name: "source_image_folder_id", Type: cty.String, Required: false},
|
||||
"source_image_family": &hcldec.AttrSpec{Name: "source_image_family", Type: cty.String, Required: false},
|
||||
"source_image_id": &hcldec.AttrSpec{Name: "source_image_id", Type: cty.String, Required: false},
|
||||
"source_disk_extra_size": &hcldec.AttrSpec{Name: "source_disk_extra_size", Type: cty.Number, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,183 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
)
|
||||
|
||||
func TestPostProcessor_Configure(t *testing.T) {
|
||||
type fields struct {
|
||||
config Config
|
||||
runner multistep.Runner
|
||||
}
|
||||
type args struct {
|
||||
raws []interface{}
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "no one creds",
|
||||
fields: fields{
|
||||
config: Config{
|
||||
AccessConfig: yandex.AccessConfig{
|
||||
Token: "",
|
||||
ServiceAccountKeyFile: "",
|
||||
},
|
||||
ExchangeConfig: ExchangeConfig{
|
||||
ServiceAccountID: "some-srv-acc-id",
|
||||
},
|
||||
CommonConfig: yandex.CommonConfig{
|
||||
CloudConfig: yandex.CloudConfig{
|
||||
FolderID: "some-folder-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "both token and sa key file",
|
||||
fields: fields{
|
||||
config: Config{
|
||||
AccessConfig: yandex.AccessConfig{
|
||||
Token: "some-value",
|
||||
ServiceAccountKeyFile: "path/not-exist.file",
|
||||
},
|
||||
ExchangeConfig: ExchangeConfig{
|
||||
ServiceAccountID: "some-srv-acc-id",
|
||||
},
|
||||
CommonConfig: yandex.CommonConfig{
|
||||
CloudConfig: yandex.CloudConfig{
|
||||
FolderID: "some-folder-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "use sa key file",
|
||||
fields: fields{
|
||||
config: Config{
|
||||
AccessConfig: yandex.AccessConfig{
|
||||
Token: "",
|
||||
ServiceAccountKeyFile: "testdata/fake-sa-key.json",
|
||||
},
|
||||
ExchangeConfig: ExchangeConfig{
|
||||
ServiceAccountID: "some-srv-acc-id",
|
||||
},
|
||||
CommonConfig: yandex.CommonConfig{
|
||||
CloudConfig: yandex.CloudConfig{
|
||||
FolderID: "some-folder-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "service_account_id required",
|
||||
fields: fields{
|
||||
config: Config{
|
||||
AccessConfig: yandex.AccessConfig{
|
||||
Token: "some token",
|
||||
},
|
||||
ExchangeConfig: ExchangeConfig{
|
||||
ServiceAccountID: "",
|
||||
},
|
||||
CommonConfig: yandex.CommonConfig{
|
||||
CloudConfig: yandex.CloudConfig{
|
||||
FolderID: "some-folder-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "folderID required",
|
||||
fields: fields{
|
||||
config: Config{
|
||||
AccessConfig: yandex.AccessConfig{
|
||||
Token: "some token",
|
||||
},
|
||||
ExchangeConfig: ExchangeConfig{
|
||||
ServiceAccountID: "some-srv-acc-id",
|
||||
},
|
||||
CommonConfig: yandex.CommonConfig{
|
||||
CloudConfig: yandex.CloudConfig{
|
||||
FolderID: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.fields.config.Paths = []string{"some-path"} // make Paths not empty
|
||||
p := &PostProcessor{
|
||||
config: tt.fields.config,
|
||||
runner: tt.fields.runner,
|
||||
}
|
||||
if err := p.Configure(tt.args.raws...); (err != nil) != tt.wantErr {
|
||||
t.Errorf("Configure() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_formUrls(t *testing.T) {
|
||||
type args struct {
|
||||
paths []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantResult []string
|
||||
}{
|
||||
{
|
||||
name: "empty list",
|
||||
args: args{
|
||||
paths: []string{},
|
||||
},
|
||||
wantResult: []string{},
|
||||
},
|
||||
{
|
||||
name: "one element",
|
||||
args: args{
|
||||
paths: []string{"s3://bucket1/object1"},
|
||||
},
|
||||
wantResult: []string{"https://" + defaultStorageEndpoint + "/bucket1/object1"},
|
||||
},
|
||||
{
|
||||
name: "several elements",
|
||||
args: args{
|
||||
paths: []string{
|
||||
"s3://bucket1/object1",
|
||||
"s3://bucket-name/object-with/prefix/filename.blob",
|
||||
"s3://bucket-too/foo/bar.test",
|
||||
},
|
||||
},
|
||||
wantResult: []string{
|
||||
"https://" + defaultStorageEndpoint + "/bucket1/object1",
|
||||
"https://" + defaultStorageEndpoint + "/bucket-name/object-with/prefix/filename.blob",
|
||||
"https://" + defaultStorageEndpoint + "/bucket-too/foo/bar.test",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
require.Equal(t, tt.wantResult, formUrls(tt.args.paths))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
)
|
||||
|
||||
type StepAttachDisk struct {
|
||||
yandex.CommonConfig
|
||||
ImageID string
|
||||
ExtraSize int
|
||||
}
|
||||
|
||||
func (c *StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
driver := state.Get("driver").(yandex.Driver)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
instanceID := state.Get("instance_id").(string)
|
||||
|
||||
ui.Say("Create secondary disk from image for export...")
|
||||
|
||||
imageDesc, err := driver.SDK().Compute().Image().Get(ctx, &compute.GetImageRequest{
|
||||
ImageId: c.ImageID,
|
||||
})
|
||||
if err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
|
||||
op, err := driver.SDK().WrapOperation(driver.SDK().Compute().Disk().Create(ctx, &compute.CreateDiskRequest{
|
||||
Source: &compute.CreateDiskRequest_ImageId{
|
||||
ImageId: c.ImageID,
|
||||
},
|
||||
Name: fmt.Sprintf("export-%s-disk", instanceID),
|
||||
Size: int64(datasize.ByteSize(c.ExtraSize)*datasize.GB) + imageDesc.GetMinDiskSize(),
|
||||
ZoneId: c.Zone,
|
||||
FolderId: c.FolderID,
|
||||
TypeId: c.DiskType,
|
||||
Description: "Temporary disk for exporting",
|
||||
}))
|
||||
if op == nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
protoMD, err := op.Metadata()
|
||||
if err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
md, ok := protoMD.(*compute.CreateDiskMetadata)
|
||||
if !ok {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("could not get Disk ID from create operation metadata"))
|
||||
}
|
||||
state.Put("secondary_disk_id", md.GetDiskId())
|
||||
|
||||
if err := op.Wait(ctx); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
|
||||
ui.Say("Attach secondary disk to instance...")
|
||||
|
||||
op, err = driver.SDK().WrapOperation(driver.SDK().Compute().Instance().AttachDisk(ctx, &compute.AttachInstanceDiskRequest{
|
||||
InstanceId: instanceID,
|
||||
AttachedDiskSpec: &compute.AttachedDiskSpec{
|
||||
AutoDelete: true,
|
||||
DeviceName: "doexport",
|
||||
Disk: &compute.AttachedDiskSpec_DiskId{
|
||||
DiskId: md.GetDiskId(),
|
||||
},
|
||||
},
|
||||
}))
|
||||
if err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
ui.Message("Wait attached disk...")
|
||||
if err := op.Wait(ctx); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
|
||||
state.Remove("secondary_disk_id")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepAttachDisk) Cleanup(state multistep.StateBag) {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
driver := state.Get("driver").(yandex.Driver)
|
||||
if diskID, ok := state.GetOk("secondary_disk_id"); ok {
|
||||
ui.Say("Remove the secondary disk...")
|
||||
op, err := driver.SDK().WrapOperation(driver.SDK().Compute().Disk().Delete(context.Background(), &compute.DeleteDiskRequest{
|
||||
DiskId: diskID.(string),
|
||||
}))
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
return
|
||||
}
|
||||
if err := op.Wait(context.Background()); err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility"
|
||||
)
|
||||
|
||||
type StepCreateS3Keys struct {
|
||||
ServiceAccountID string
|
||||
Paths []string
|
||||
}
|
||||
|
||||
func (c *StepCreateS3Keys) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
driver := state.Get("driver").(yandex.Driver)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
ui.Say("Create temporary storage Access Key")
|
||||
// Create temporary storage Access Key
|
||||
respWithKey, err := driver.SDK().IAM().AWSCompatibility().AccessKey().Create(ctx, &awscompatibility.CreateAccessKeyRequest{
|
||||
ServiceAccountId: c.ServiceAccountID,
|
||||
Description: "this temporary key is for upload image to storage; created by Packer",
|
||||
})
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for cloud-init script to finish: %s", err)
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
state.Put("s3_secret", respWithKey)
|
||||
|
||||
ui.Say("Verify access to paths")
|
||||
if err := verfiyAccess(respWithKey.GetAccessKey().GetKeyId(), respWithKey.Secret, c.Paths); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
func (s *StepCreateS3Keys) Cleanup(state multistep.StateBag) {
|
||||
driver := state.Get("driver").(yandex.Driver)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
|
||||
if val, ok := state.GetOk("s3_secret"); ok {
|
||||
ui.Say("S3 secrets have been found")
|
||||
s3Secret := val.(*awscompatibility.CreateAccessKeyResponse)
|
||||
|
||||
ui.Message("Cleanup empty objects...")
|
||||
cleanUpEmptyObjects(s3Secret.GetAccessKey().GetKeyId(), s3Secret.GetSecret(), s.Paths)
|
||||
|
||||
ui.Say("Delete S3 secrets...")
|
||||
_, err := driver.SDK().IAM().AWSCompatibility().AccessKey().Delete(context.Background(), &awscompatibility.DeleteAccessKeyRequest{
|
||||
AccessKeyId: s3Secret.GetAccessKey().GetId(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ui.Error(err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func verfiyAccess(keyID, secret string, paths []string) error {
|
||||
newSession, err := session.NewSession(&aws.Config{
|
||||
Endpoint: aws.String(defaultStorageEndpoint),
|
||||
Region: aws.String(defaultStorageRegion),
|
||||
Credentials: credentials.NewStaticCredentials(
|
||||
keyID, secret, "",
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s3Conn := s3.New(newSession)
|
||||
|
||||
for _, path := range paths {
|
||||
u, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := u.Path
|
||||
if strings.HasSuffix(key, "/") {
|
||||
key = filepath.Join(key, "disk.qcow2")
|
||||
}
|
||||
_, err = s3Conn.PutObject(&s3.PutObjectInput{
|
||||
Body: aws.ReadSeekCloser(strings.NewReader("")),
|
||||
Bucket: aws.String(u.Host),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanUpEmptyObjects(keyID, secret string, paths []string) {
|
||||
newSession, err := session.NewSession(&aws.Config{
|
||||
Endpoint: aws.String(defaultStorageEndpoint),
|
||||
Region: aws.String(defaultStorageRegion),
|
||||
Credentials: credentials.NewStaticCredentials(
|
||||
keyID, secret, "",
|
||||
),
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("[WARN] %s", err)
|
||||
return
|
||||
}
|
||||
s3Conn := s3.New(newSession)
|
||||
|
||||
for _, path := range paths {
|
||||
u, err := url.Parse(path)
|
||||
if err != nil {
|
||||
log.Printf("[WARN] %s", err)
|
||||
continue
|
||||
}
|
||||
key := u.Path
|
||||
if strings.HasSuffix(key, "/") {
|
||||
key = filepath.Join(key, "disk.qcow2")
|
||||
}
|
||||
|
||||
log.Printf("Check object: '%s'", path)
|
||||
respHead, err := s3Conn.HeadObject(&s3.HeadObjectInput{
|
||||
Bucket: aws.String(u.Host),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("[WARN] %s", err)
|
||||
continue
|
||||
}
|
||||
if *respHead.ContentLength > 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Delete object: '%s'", path)
|
||||
_, err = s3Conn.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: aws.String(u.Host),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("[WARN] %s", err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
)
|
||||
|
||||
type StepDump struct {
|
||||
ExtraSize bool
|
||||
SizeLimit int64
|
||||
}
|
||||
|
||||
const (
|
||||
dumpCommand = "%sqemu-img convert -O qcow2 -o cluster_size=2M %s disk.qcow2 2>&1"
|
||||
)
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepDump) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
|
||||
device := "/dev/disk/by-id/virtio-doexport"
|
||||
cmdDumpCheckAccess := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf("qemu-img info %s", device),
|
||||
}
|
||||
if err := comm.Start(ctx, cmdDumpCheckAccess); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
sudo := ""
|
||||
if cmdDumpCheckAccess.Wait() != 0 {
|
||||
sudo = "sudo "
|
||||
}
|
||||
|
||||
if s.ExtraSize && which(ctx, comm, "losetup") == nil {
|
||||
ui.Say("Map loop device...")
|
||||
buff := new(bytes.Buffer)
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf("%slosetup --show -r --sizelimit %d -f %s", sudo, s.SizeLimit, device),
|
||||
Stdout: buff,
|
||||
}
|
||||
if err := comm.Start(ctx, cmd); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
if cmd.Wait() != 0 {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("Cannot losetup: %d", cmd.ExitStatus()))
|
||||
}
|
||||
device = strings.TrimSpace(buff.String())
|
||||
if device == "" {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("Bad lo device"))
|
||||
}
|
||||
}
|
||||
wg := new(sync.WaitGroup)
|
||||
defer wg.Wait()
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: "while true ; do sleep 3; sudo kill -s SIGUSR1 $(pidof qemu-img); done",
|
||||
}
|
||||
|
||||
err := cmd.RunWithUi(ctxWithCancel, comm, ui)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
ui.Error("qemu-img signal sender error: " + err.Error())
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
cmdDump := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf(dumpCommand, sudo, device),
|
||||
}
|
||||
ui.Say("Dumping...")
|
||||
if err := cmdDump.RunWithUi(ctx, comm, ui); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
if cmdDump.ExitStatus() != 0 {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("Cannot dump disk, exit code: %d", cmdDump.ExitStatus()))
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup nothing
|
||||
func (s *StepDump) Cleanup(state multistep.StateBag) {}
|
|
@ -1,150 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
)
|
||||
|
||||
type StepPrepareTools struct{}
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepPrepareTools) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
pkgManager, errPkgManager := detectPkgManager(ctx, comm)
|
||||
|
||||
if which(ctx, comm, "qemu-img") != nil {
|
||||
if errPkgManager != nil {
|
||||
return yandex.StepHaltWithError(state, errPkgManager)
|
||||
}
|
||||
ui.Message("Install qemu-img...")
|
||||
if err := pkgManager.InstallQemuIMG(ctx, comm); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
}
|
||||
if which(ctx, comm, "aws") != nil {
|
||||
if errPkgManager != nil {
|
||||
return yandex.StepHaltWithError(state, errPkgManager)
|
||||
}
|
||||
ui.Message("Install aws...")
|
||||
if err := pkgManager.InstallAWS(ctx, comm); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup nothing
|
||||
func (s *StepPrepareTools) Cleanup(state multistep.StateBag) {}
|
||||
|
||||
func detectPkgManager(ctx context.Context, comm packersdk.Communicator) (pkgManager, error) {
|
||||
if err := which(ctx, comm, "apt"); err == nil {
|
||||
return &apt{}, nil
|
||||
}
|
||||
if err := which(ctx, comm, "yum"); err == nil {
|
||||
return &yum{}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Cannot detect package manager")
|
||||
}
|
||||
|
||||
func which(ctx context.Context, comm packersdk.Communicator, what string) error {
|
||||
cmdCheckAPT := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf("which %s", what),
|
||||
}
|
||||
if err := comm.Start(ctx, cmdCheckAPT); err != nil {
|
||||
return err
|
||||
}
|
||||
if cmdCheckAPT.Wait() == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Not found: %s", what)
|
||||
}
|
||||
|
||||
type pkgManager interface {
|
||||
InstallQemuIMG(ctx context.Context, comm packersdk.Communicator) error
|
||||
InstallAWS(ctx context.Context, comm packersdk.Communicator) error
|
||||
}
|
||||
|
||||
type apt struct {
|
||||
updated bool
|
||||
}
|
||||
|
||||
func (p *apt) InstallAWS(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if err := p.Update(ctx, comm); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := execCMDWithSudo(ctx, comm, "apt install -y awscli"); err != nil {
|
||||
return fmt.Errorf("Cannot install awscli")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *apt) InstallQemuIMG(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if err := p.Update(ctx, comm); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := execCMDWithSudo(ctx, comm, "apt install -y qemu-utils"); err != nil {
|
||||
return fmt.Errorf("Cannot install qemu-utils")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (p *apt) Update(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if p.updated {
|
||||
return nil
|
||||
}
|
||||
if err := execCMDWithSudo(ctx, comm, "apt update"); err != nil {
|
||||
return fmt.Errorf("Cannot update: %s", err)
|
||||
}
|
||||
p.updated = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type yum struct{}
|
||||
|
||||
func (p *yum) InstallAWS(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if which(ctx, comm, "pip3") != nil {
|
||||
if err := execCMDWithSudo(ctx, comm, "yum install -y python3-pip"); err != nil {
|
||||
return fmt.Errorf("Cannot install qemu-img: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := execCMDWithSudo(ctx, comm, "pip3 install awscli"); err != nil {
|
||||
return fmt.Errorf("Install awscli: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *yum) InstallQemuIMG(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if err := execCMDWithSudo(ctx, comm, "yum install -y libgcrypt qemu-img"); err != nil {
|
||||
return fmt.Errorf("Cannot install qemu-img: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func execCMDWithSudo(ctx context.Context, comm packersdk.Communicator, cmdStr string) error {
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: cmdStr,
|
||||
}
|
||||
if err := comm.Start(ctx, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
if cmd.Wait() != 0 {
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf("sudo %s", cmdStr),
|
||||
}
|
||||
if err := comm.Start(ctx, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
if cmd.Wait() != 0 {
|
||||
return fmt.Errorf("Bad exit code: %d", cmd.ExitStatus())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility"
|
||||
)
|
||||
|
||||
type StepUploadSecrets struct{}
|
||||
|
||||
const (
|
||||
sharedAWSCredFile = "/tmp/aws-credentials"
|
||||
)
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepUploadSecrets) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
_ = state.Get("config").(*yandex.Config)
|
||||
_ = state.Get("driver").(yandex.Driver)
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
s3Secret := state.Get("s3_secret").(*awscompatibility.CreateAccessKeyResponse)
|
||||
|
||||
ui.Say("Upload secrets..")
|
||||
creds := fmt.Sprintf(
|
||||
"[default]\naws_access_key_id = %s\naws_secret_access_key = %s\n",
|
||||
s3Secret.GetAccessKey().GetKeyId(),
|
||||
s3Secret.GetSecret())
|
||||
|
||||
err := comm.Upload(sharedAWSCredFile, strings.NewReader(creds), nil)
|
||||
if err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
ui.Message("Secrets has been uploaded")
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup.
|
||||
func (s *StepUploadSecrets) Cleanup(state multistep.StateBag) {}
|
|
@ -1,101 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
)
|
||||
|
||||
type StepUploadToS3 struct {
|
||||
Paths []string
|
||||
}
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepUploadToS3) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
|
||||
cmdUploadToS3 := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf(
|
||||
"%s=%s aws s3 --region=%s --endpoint-url=https://%s cp disk.qcow2 %s",
|
||||
"AWS_SHARED_CREDENTIALS_FILE",
|
||||
sharedAWSCredFile,
|
||||
defaultStorageRegion,
|
||||
defaultStorageEndpoint,
|
||||
s.Paths[0],
|
||||
),
|
||||
}
|
||||
ui.Say("Upload to S3...")
|
||||
if err := cmdUploadToS3.RunWithUi(ctx, comm, ui); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
if cmdUploadToS3.ExitStatus() != 0 {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("Cannout upload to S3, exit code %d", cmdUploadToS3.ExitStatus()))
|
||||
}
|
||||
|
||||
versionExtraFlags, err := getVersionExtraFlags(ctx, comm)
|
||||
if err != nil {
|
||||
ui.Message(fmt.Sprintf("[WARN] Cannot upload to other storage: %s", err))
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
wg := new(sync.WaitGroup)
|
||||
defer wg.Wait()
|
||||
for _, path := range s.Paths[1:] {
|
||||
|
||||
wg.Add(1)
|
||||
go func(path string) {
|
||||
defer wg.Done()
|
||||
ui.Message(fmt.Sprintf("Start copy %s to %s...", s.Paths[0], path))
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf(
|
||||
"%s=%s aws s3 --region=%s --endpoint-url=https://%s cp %s %s %s",
|
||||
"AWS_SHARED_CREDENTIALS_FILE",
|
||||
sharedAWSCredFile,
|
||||
defaultStorageRegion,
|
||||
defaultStorageEndpoint,
|
||||
versionExtraFlags,
|
||||
s.Paths[0],
|
||||
path,
|
||||
),
|
||||
}
|
||||
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
|
||||
ui.Message(fmt.Sprintf("[WARN] Failed upload to %s", path))
|
||||
}
|
||||
if cmd.ExitStatus() != 0 {
|
||||
ui.Message(fmt.Sprintf("[WARN] Failed upload to %s", path))
|
||||
}
|
||||
}(path)
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup nothing
|
||||
func (s *StepUploadToS3) Cleanup(state multistep.StateBag) {}
|
||||
|
||||
func getVersionExtraFlags(ctx context.Context, comm packersdk.Communicator) (string, error) {
|
||||
buff := new(bytes.Buffer)
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: "aws --version",
|
||||
Stdout: buff,
|
||||
}
|
||||
if err := comm.Start(ctx, cmd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if cmd.Wait() != 0 {
|
||||
return "", fmt.Errorf("Cannot detect aws version")
|
||||
}
|
||||
vsn := buff.String()
|
||||
switch {
|
||||
case strings.HasPrefix(vsn, "aws-cli/2."):
|
||||
return "--copy-props metadata-directive", nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
{
|
||||
"id": "ajeboa0du6edu6m43c3t",
|
||||
"service_account_id": "ajeq7dsmihqple6761c5",
|
||||
"created_at": "2018-11-19T13:38:09Z",
|
||||
"description": "description",
|
||||
"key_algorithm": "RSA_4096",
|
||||
"public_key": "-----BEGIN PUBLIC KEY-----\nMIICCgKCAgEAo/s1lN5vFpFNJvS/l+yRilQHAPDeC3JqBwpLstbqJXW4kAUaKKoe\nxkIuJuPUKOUcd/JE3LXOEt/LOFb9mkCRdpjaIW7Jd5Fw0kTHIZ5rDoq7DZx0LV9b\nGJNskdccd6M6stb1GEqVuGpVcyXMCH8tMSG3c85DkcAg0cxXgyrirAzHMPiWSTpj\nJjICkxXRVj01Xq7dIDqL2LSMrZ2kLda5m+CnfscUbwnGRPPoEg20jLiEgBM2o43e\nhpWko1NStRR5fMQcQSUBbdtvbfPracjZz2/fq4fZfqlnObgq3WpYpdGynniLH3i5\nbxPM3ufYL3HY2w5aIOY6KIwMKLf3WYlug90ieviMYAvCukrCASwyqBQlt3MKCHlN\nIcebZXJDQ1VSBuEs+4qXYlhG1p+5C07zahzigNNTm6rEo47FFfClF04mv2uJN42F\nfWlEPR+V9JHBcfcBCdvyhiGzftl/vDo2NdO751ETIhyNKzxM/Ve2PR9h/qcuEatC\nLlXUA+40epNNHbSxAauxcngyrtkn7FZAEhdjyTtx46sELyb90Z56WgnbNUUGnsS/\nHBnBy5z8RyCmI5MjTC2NtplVqtAWkG+x59mU3GoCeuI8EaNtu2YPXhl1ovRkS4NB\n1G0F4c5FiJ27/E2MbNKlV5iw9ICcDforATYTeqiXbkkEKqIIiZYZWOsCAwEAAQ==\n-----END PUBLIC KEY-----\n",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIJKQIBAAKCAgEAo/s1lN5vFpFNJvS/l+yRilQHAPDeC3JqBwpLstbqJXW4kAUa\nKKoexkIuJuPUKOUcd/JE3LXOEt/LOFb9mkCRdpjaIW7Jd5Fw0kTHIZ5rDoq7DZx0\nLV9bGJNskdccd6M6stb1GEqVuGpVcyXMCH8tMSG3c85DkcAg0cxXgyrirAzHMPiW\nSTpjJjICkxXRVj01Xq7dIDqL2LSMrZ2kLda5m+CnfscUbwnGRPPoEg20jLiEgBM2\no43ehpWko1NStRR5fMQcQSUBbdtvbfPracjZz2/fq4fZfqlnObgq3WpYpdGynniL\nH3i5bxPM3ufYL3HY2w5aIOY6KIwMKLf3WYlug90ieviMYAvCukrCASwyqBQlt3MK\nCHlNIcebZXJDQ1VSBuEs+4qXYlhG1p+5C07zahzigNNTm6rEo47FFfClF04mv2uJ\nN42FfWlEPR+V9JHBcfcBCdvyhiGzftl/vDo2NdO751ETIhyNKzxM/Ve2PR9h/qcu\nEatCLlXUA+40epNNHbSxAauxcngyrtkn7FZAEhdjyTtx46sELyb90Z56WgnbNUUG\nnsS/HBnBy5z8RyCmI5MjTC2NtplVqtAWkG+x59mU3GoCeuI8EaNtu2YPXhl1ovRk\nS4NB1G0F4c5FiJ27/E2MbNKlV5iw9ICcDforATYTeqiXbkkEKqIIiZYZWOsCAwEA\nAQKCAgEAihT1L6CGhshf4VfjJfktLQBIzYAGWjlEEx2WVMgobtbMTWoedvOZ6nS8\nDD943d7ftBkr53aoSrhslcqazpNkaiuYMuLpf2fXSxhjXmnZ2Gr1zCZcpgBP40fw\n+nXbINswiHv98zCLFrljrwy63MTKtz6fDkM4HrlcaY3aezdXnG0+JnyNgKhL6VPf\nWx/aIPZ1xH8W8RabwCV4+JFwOLFBpoLsSBM3n7DpZhLE7r7ftEeEO5zyO5MxOL81\n3dpCIP1Wt7sj169jnrBTCpGFQJTC5Kxd+kDw4nmf1LjCT6RHdYo5ELyM2jl8XI6d\ny24LWxhQ9VUGjAGSI6aabodLH/hcOBB2wG1tnO+n5y85GnKKOJgxCxaj1yR/LAcT\nFvZgbDGwAMd7h7+fU46Yj5BILk6mRvBNL6Mk2VAlBzUatGduU+Xxha3JkGxIJY4G\np1qPLNiP7as90mXXMgNEtsP2zXtyi+9q7XBOBnfL3ftHWQmu7MKQCHIKcNRchFJ4\nS1LtndjXtNchzDhbXru2qsRiASmL9u4CgZn/lM3kDHs+v2JI+V8cPk5XZhoPrrpP\nZ0SPeoLZEJ5/TtlTWAXXqP6F24rziBqnEJgpNCkeBnQYx2Rs9OKVsrlDk8cf3KkL\nH8qQ/86HYz9cEtFnVKAYOV5GtQsJRyzipMy7R/cegdtWJ8ScuiECggEBANOT7lBX\nRYw+k53TRpkk7NlWuQogKKEQx4PEf8A6HQj3SseH8u+tt3HfTFJktzWs/9EQerLS\nJky9bSPxBvDq0Zfj+IPamiY+c2w5a9WbLxk8UHCaUHcSUeWoWQwmCZqzXeUNj9f5\nQOfF+ajsqhaXE68/HuIj+dgOOn/XYyqNkxlidXa9U3gUanuftwRSephsGcsaEGTe\nep2My4Jj3hPH/9Qoith0X18atRru6RanK63bDl0FqAU/1uUycQr+h0hEwQHWoRiq\nNVXI1uxfi5/2pxK0w1MOzZLitwEQ/veCv6CZwNPf1SW1U8j70SvKVR8Z7gGDIPjS\n8klW2Z9g6gxPQ1MCggEBAMZpBFa4mEnsmt+paEFCGUtoeBapjZF94PBtdxII/T5t\ne5z4Iz7RMl+ixLhNepQu+0t+v1iDVJgDJuUjCsSF69jEca7gzmsWhs9d+gDU5Knm\n18ChbQyeaDvmqINCs2t45pA/mVIQHbA8L8n/ToI5P63ZELDUFVzZo9kerZu1ALNB\nRoG0PhIHrGkZKwL8oE72nrZmWtfjROsZBhu7FqJ0i7va/6fgNMuMtBC/abOC7yVT\nir5XP+ZGF8XNyIZ3Ic0X8xc+XqagYsf+XobHGmbSct/ZaDP3g1z4B/7JZcbYjuTZ\nMJ3s5T+6l/qo0dfDuaVBJFJrnw8YfahX/Bn4OQ2TuQkCggEBALfhs5dDogA3Spg6\nTPtAalCh3IP+WxFQwfW1S8pHN4DZW7Z6YxsHgY2IIo7hVZFi35pVli3gEsVTRI2e\nJwgvLSWzTgNac+qVED+Y0C1/h7mI/+g9VX2HAIJ2g53ZWTOIfCxcUw3DZTOKjmbP\n+StU9hiy5SZpWfT6uMDu8xLCpHvFZI1kEi0koT78GlW5US8zlF8+Mc1YxnwzJ5QV\nM6dBhQhgi/t/eHvxfEECLrYvZ/jbj2otRk/5oczkv/ZsLCsVBiGQ5cXH+D6sJI6e\no3zNI3tQewmurd/hBmf4239FtUHhHwOFX3w8Uas1oB9M5Bn5sS7DRl67BzPSNaUc\n140HPl0CggEAX1+13TXoxog8vkzBt7TdUdlK+KHSUmCvEwObnAjEKxEXvZGt55FJ\n5JzqcSmVRcv7sgOgWRzwOg4x0S1yDJvPjiiH+SdJMkLm1KF4/pNXw7AagBdYwxsW\nQc0Trd0PQBcixa48tizXCJM16aSXCZQZXykbk9Su3C4mS8UqcNGmH4S+LrUErUgR\nAYg+m7XyHWMBUe6LtoEh7Nzfic76B2d8j/WqtPjaiAn/uJk6ZzcGW+v3op1wMvH4\nlXXg8XosvljH2qF5gCFSuo40xBbLQyfgXmg0Zd6Rv8velAQdr2MD9U/NxexNGsBI\nNA6YqF4GTECvBAuFrwz3wkdhAN7IFhWveQKCAQBdfdHB3D+m+b/hZoEIv0nPcgQf\ncCOPPNO/ufObjWed2jTL3RjoDT337Mp3mYkoP4GE9n6cl7mjlcrf7KQeRG8k35fv\n3nMoMOp21qj9J66UgGf1/RHsV/+ljcu87ggYDCVKd8uGzkspRIQIsD77He/TwZNa\nyWL4fa1EvRU6STwi7CZFfhWhMF3rBGAPshABoyJZh6Z14cioAKSR0Sl6XZ5dcB9B\naoJM8sISSlOqMIJyNnyMtdE55Ag+P7LyMe2grxlwVTv3h0o5mHSzWnjSHVYvN4q5\n6h5UUopLtyVMGCwOJz+zNT7zFqi4XIGU8a8Lg1iiKtfjgHB2X8ZWZuXBdrTj\n-----END PRIVATE KEY-----\n"
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/version"
|
||||
packerVersion "github.com/hashicorp/packer/version"
|
||||
)
|
||||
|
||||
var YandexExportPluginVersion *version.PluginVersion
|
||||
|
||||
func init() {
|
||||
YandexExportPluginVersion = version.InitializePluginVersion(
|
||||
packerVersion.Version, packerVersion.VersionPrerelease)
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package yandeximport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const BuilderId = "packer.post-processor.yandex-import"
|
||||
|
||||
type Artifact struct {
|
||||
imageID string
|
||||
sourceType string
|
||||
sourceID string
|
||||
|
||||
// StateData should store data such as GeneratedData
|
||||
// to be shared with post-processors
|
||||
StateData map[string]interface{}
|
||||
}
|
||||
|
||||
func (*Artifact) BuilderId() string {
|
||||
return BuilderId
|
||||
}
|
||||
|
||||
func (a *Artifact) Id() string {
|
||||
return a.imageID
|
||||
}
|
||||
|
||||
func (a *Artifact) Files() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) String() string {
|
||||
return fmt.Sprintf("Create image %v from source type %v with ID/URL %v", a.imageID, a.sourceType, a.sourceID)
|
||||
}
|
||||
|
||||
func (a *Artifact) State(name string) interface{} {
|
||||
if _, ok := a.StateData[name]; ok {
|
||||
return a.StateData[name]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *Artifact) Destroy() error {
|
||||
return nil
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package yandeximport
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestArtifactState_StateData(t *testing.T) {
|
||||
expectedData := "this is the data"
|
||||
artifact := &Artifact{
|
||||
StateData: map[string]interface{}{"state_data": expectedData},
|
||||
}
|
||||
|
||||
// Valid state
|
||||
result := artifact.State("state_data")
|
||||
require.Equal(t, expectedData, result)
|
||||
|
||||
// Invalid state
|
||||
result = artifact.State("invalid_key")
|
||||
require.Equal(t, nil, result)
|
||||
|
||||
// Nil StateData should not fail and should return nil
|
||||
artifact = &Artifact{}
|
||||
result = artifact.State("key")
|
||||
require.Equal(t, nil, result)
|
||||
}
|
|
@ -1,219 +0,0 @@
|
|||
//go:generate packer-sdc struct-markdown
|
||||
//go:generate packer-sdc mapstructure-to-hcl2 -type Config
|
||||
|
||||
package yandeximport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer/builder/file"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
"github.com/hashicorp/packer/post-processor/artifice"
|
||||
"github.com/hashicorp/packer/post-processor/compress"
|
||||
yandexexport "github.com/hashicorp/packer/post-processor/yandex-export"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1/awscompatibility"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.PackerConfig `mapstructure:",squash"`
|
||||
yandex.AccessConfig `mapstructure:",squash"`
|
||||
yandex.CloudConfig `mapstructure:",squash"`
|
||||
yandexexport.ExchangeConfig `mapstructure:",squash"`
|
||||
yandex.ImageConfig `mapstructure:",squash"`
|
||||
|
||||
// The name of the bucket where the qcow2 file will be uploaded to for import.
|
||||
// This bucket must exist when the post-processor is run.
|
||||
//
|
||||
// If import occurred after Yandex-Export post-processor, artifact already
|
||||
// in storage service and first paths (URL) is used to, so no need to set this param.
|
||||
Bucket string `mapstructure:"bucket" required:"false"`
|
||||
// The name of the object key in `bucket` where the qcow2 file will be copied to import.
|
||||
// This is a [template engine](/docs/templates/legacy_json_templates/engine).
|
||||
// Therefore, you may use user variables and template functions in this field.
|
||||
ObjectName string `mapstructure:"object_name" required:"false"`
|
||||
// Whether skip removing the qcow2 file uploaded to Storage
|
||||
// after the import process has completed. Possible values are: `true` to
|
||||
// leave it in the bucket, `false` to remove it. Default is `false`.
|
||||
SkipClean bool `mapstructure:"skip_clean" required:"false"`
|
||||
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
config Config
|
||||
}
|
||||
|
||||
func (p *PostProcessor) ConfigSpec() hcldec.ObjectSpec { return p.config.FlatMapstructure().HCL2Spec() }
|
||||
|
||||
func (p *PostProcessor) Configure(raws ...interface{}) error {
|
||||
err := config.Decode(&p.config, &config.DecodeOpts{
|
||||
PluginType: BuilderId,
|
||||
Interpolate: true,
|
||||
InterpolateContext: &p.config.ctx,
|
||||
InterpolateFilter: &interpolate.RenderFilter{
|
||||
Exclude: []string{
|
||||
"object_name",
|
||||
},
|
||||
},
|
||||
}, raws...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Accumulate any errors
|
||||
var errs *packersdk.MultiError
|
||||
|
||||
errs = packersdk.MultiErrorAppend(errs, p.config.AccessConfig.Prepare(&p.config.ctx)...)
|
||||
errs = p.config.CloudConfig.Prepare(errs)
|
||||
errs = p.config.ImageConfig.Prepare(errs)
|
||||
errs = p.config.ExchangeConfig.Prepare(errs)
|
||||
|
||||
// Set defaults
|
||||
if p.config.ObjectName == "" {
|
||||
p.config.ObjectName = "packer-import-{{timestamp}}.qcow2"
|
||||
}
|
||||
|
||||
// Check and render object_name
|
||||
if err = interpolate.Validate(p.config.ObjectName, &p.config.ctx); err != nil {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, fmt.Errorf("error parsing object_name template: %s", err))
|
||||
}
|
||||
|
||||
if len(errs.Errors) > 0 {
|
||||
return errs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifact packersdk.Artifact) (packersdk.Artifact, bool, bool, error) {
|
||||
var imageSrc cloudImageSource
|
||||
var fileSource bool
|
||||
var err error
|
||||
|
||||
generatedData := artifact.State("generated_data")
|
||||
if generatedData == nil {
|
||||
// Make sure it's not a nil map so we can assign to it later.
|
||||
generatedData = make(map[string]interface{})
|
||||
}
|
||||
p.config.ctx.Data = generatedData
|
||||
|
||||
p.config.ObjectName, err = interpolate.Render(p.config.ObjectName, &p.config.ctx)
|
||||
if err != nil {
|
||||
return nil, false, false, fmt.Errorf("error rendering object_name template: %s", err)
|
||||
}
|
||||
|
||||
client, err := yandex.NewDriverYC(ui, &p.config.AccessConfig)
|
||||
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
// Create temporary storage Access Key
|
||||
respWithKey, err := client.SDK().IAM().AWSCompatibility().AccessKey().Create(ctx, &awscompatibility.CreateAccessKeyRequest{
|
||||
ServiceAccountId: p.config.ServiceAccountID,
|
||||
Description: "this temporary key is for upload image to storage; created by Packer",
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
storageClient, err := newYCStorageClient("", respWithKey.GetAccessKey().GetKeyId(), respWithKey.GetSecret())
|
||||
if err != nil {
|
||||
return nil, false, false, fmt.Errorf("error create object storage client: %s", err)
|
||||
}
|
||||
|
||||
switch artifact.BuilderId() {
|
||||
case compress.BuilderId, artifice.BuilderId, file.BuilderId:
|
||||
// Artifact as a file, need to be uploaded to storage before create Compute Image
|
||||
fileSource = true
|
||||
|
||||
// As `bucket` option validate input here
|
||||
if p.config.Bucket == "" {
|
||||
return nil, false, false, fmt.Errorf("To upload artfact you need to specify `bucket` value")
|
||||
}
|
||||
|
||||
imageSrc, err = uploadToBucket(storageClient, ui, artifact, p.config.Bucket, p.config.ObjectName)
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
case yandexexport.BuilderId:
|
||||
// Artifact already in storage, just get URL
|
||||
imageSrc, err = presignUrl(storageClient, ui, artifact.Id())
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
case yandex.BuilderID:
|
||||
// Artifact is plain Yandex Compute Image, just create new one based on provided
|
||||
imageSrc = &imageSource{
|
||||
imageID: artifact.Id(),
|
||||
}
|
||||
case BuilderId:
|
||||
// Artifact from prev yandex-import PP, reuse URL or Cloud Image ID
|
||||
imageSrc, err = chooseSource(artifact)
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
default:
|
||||
err := fmt.Errorf(
|
||||
"Unknown artifact type: %s\nCan only import from Yandex-Export, Yandex-Import, Compress, Artifice and File post-processor artifacts.",
|
||||
artifact.BuilderId())
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
ycImage, err := createYCImage(ctx, client, ui, imageSrc, &p.config)
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
if fileSource && !p.config.SkipClean {
|
||||
err = deleteFromBucket(storageClient, ui, imageSrc)
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete temporary storage Access Key
|
||||
_, err = client.SDK().IAM().AWSCompatibility().AccessKey().Delete(ctx, &awscompatibility.DeleteAccessKeyRequest{
|
||||
AccessKeyId: respWithKey.GetAccessKey().GetId(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, false, fmt.Errorf("error delete static access key: %s", err)
|
||||
}
|
||||
|
||||
return &Artifact{
|
||||
imageID: ycImage.GetId(),
|
||||
StateData: map[string]interface{}{
|
||||
"source_type": imageSrc.GetSourceType(),
|
||||
"source_id": imageSrc.GetSourceID(),
|
||||
},
|
||||
}, false, false, nil
|
||||
}
|
||||
|
||||
func chooseSource(a packersdk.Artifact) (cloudImageSource, error) {
|
||||
st := a.State("source_type").(string)
|
||||
if st == "" {
|
||||
return nil, fmt.Errorf("could not determine source type of yandex-import artifact: %v", a)
|
||||
}
|
||||
switch st {
|
||||
case sourceType_IMAGE:
|
||||
return &imageSource{
|
||||
imageID: a.State("source_id").(string),
|
||||
}, nil
|
||||
|
||||
case sourceType_OBJECT:
|
||||
return &objectSource{
|
||||
url: a.State("source_id").(string),
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknow source type of yandex-import artifact: %s", st)
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
// Code generated by "packer-sdc mapstructure-to-hcl2"; DO NOT EDIT.
|
||||
|
||||
package yandeximport
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up.
|
||||
type FlatConfig struct {
|
||||
PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"`
|
||||
PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"`
|
||||
PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"`
|
||||
PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"`
|
||||
PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"`
|
||||
PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"`
|
||||
PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"`
|
||||
PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"`
|
||||
Endpoint *string `mapstructure:"endpoint" required:"false" cty:"endpoint" hcl:"endpoint"`
|
||||
ServiceAccountKeyFile *string `mapstructure:"service_account_key_file" required:"false" cty:"service_account_key_file" hcl:"service_account_key_file"`
|
||||
Token *string `mapstructure:"token" required:"true" cty:"token" hcl:"token"`
|
||||
MaxRetries *int `mapstructure:"max_retries" cty:"max_retries" hcl:"max_retries"`
|
||||
FolderID *string `mapstructure:"folder_id" required:"true" cty:"folder_id" hcl:"folder_id"`
|
||||
ServiceAccountID *string `mapstructure:"service_account_id" required:"true" cty:"service_account_id" hcl:"service_account_id"`
|
||||
ImageName *string `mapstructure:"image_name" required:"false" cty:"image_name" hcl:"image_name"`
|
||||
ImageDescription *string `mapstructure:"image_description" required:"false" cty:"image_description" hcl:"image_description"`
|
||||
ImageFamily *string `mapstructure:"image_family" required:"false" cty:"image_family" hcl:"image_family"`
|
||||
ImageLabels map[string]string `mapstructure:"image_labels" required:"false" cty:"image_labels" hcl:"image_labels"`
|
||||
ImageMinDiskSizeGb *int `mapstructure:"image_min_disk_size_gb" required:"false" cty:"image_min_disk_size_gb" hcl:"image_min_disk_size_gb"`
|
||||
ImageProductIDs []string `mapstructure:"image_product_ids" required:"false" cty:"image_product_ids" hcl:"image_product_ids"`
|
||||
Bucket *string `mapstructure:"bucket" required:"false" cty:"bucket" hcl:"bucket"`
|
||||
ObjectName *string `mapstructure:"object_name" required:"false" cty:"object_name" hcl:"object_name"`
|
||||
SkipClean *bool `mapstructure:"skip_clean" required:"false" cty:"skip_clean" hcl:"skip_clean"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
// FlatConfig is an auto-generated flat version of Config.
|
||||
// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up.
|
||||
func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } {
|
||||
return new(FlatConfig)
|
||||
}
|
||||
|
||||
// HCL2Spec returns the hcl spec of a Config.
|
||||
// This spec is used by HCL to read the fields of Config.
|
||||
// The decoded values from this spec will then be applied to a FlatConfig.
|
||||
func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
||||
s := map[string]hcldec.Spec{
|
||||
"packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false},
|
||||
"packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false},
|
||||
"packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false},
|
||||
"packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false},
|
||||
"packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false},
|
||||
"packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false},
|
||||
"packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false},
|
||||
"packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false},
|
||||
"endpoint": &hcldec.AttrSpec{Name: "endpoint", Type: cty.String, Required: false},
|
||||
"service_account_key_file": &hcldec.AttrSpec{Name: "service_account_key_file", Type: cty.String, Required: false},
|
||||
"token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false},
|
||||
"max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false},
|
||||
"folder_id": &hcldec.AttrSpec{Name: "folder_id", Type: cty.String, Required: false},
|
||||
"service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false},
|
||||
"image_name": &hcldec.AttrSpec{Name: "image_name", Type: cty.String, Required: false},
|
||||
"image_description": &hcldec.AttrSpec{Name: "image_description", Type: cty.String, Required: false},
|
||||
"image_family": &hcldec.AttrSpec{Name: "image_family", Type: cty.String, Required: false},
|
||||
"image_labels": &hcldec.AttrSpec{Name: "image_labels", Type: cty.Map(cty.String), Required: false},
|
||||
"image_min_disk_size_gb": &hcldec.AttrSpec{Name: "image_min_disk_size_gb", Type: cty.Number, Required: false},
|
||||
"image_product_ids": &hcldec.AttrSpec{Name: "image_product_ids", Type: cty.List(cty.String), Required: false},
|
||||
"bucket": &hcldec.AttrSpec{Name: "bucket", Type: cty.String, Required: false},
|
||||
"object_name": &hcldec.AttrSpec{Name: "object_name", Type: cty.String, Required: false},
|
||||
"skip_clean": &hcldec.AttrSpec{Name: "skip_clean", Type: cty.Bool, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package yandeximport
|
||||
|
||||
import "fmt"
|
||||
|
||||
const sourceType_IMAGE = "image"
|
||||
const sourceType_OBJECT = "object"
|
||||
|
||||
type cloudImageSource interface {
|
||||
GetSourceID() string
|
||||
GetSourceType() string
|
||||
Description() string
|
||||
}
|
||||
|
||||
type imageSource struct {
|
||||
imageID string
|
||||
}
|
||||
|
||||
func (i *imageSource) GetSourceID() string {
|
||||
return i.imageID
|
||||
}
|
||||
|
||||
func (i *imageSource) GetSourceType() string {
|
||||
return sourceType_IMAGE
|
||||
}
|
||||
|
||||
func (i *imageSource) Description() string {
|
||||
return fmt.Sprintf("%s source, id: %s", i.GetSourceType(), i.imageID)
|
||||
}
|
||||
|
||||
type objectSource struct {
|
||||
url string
|
||||
}
|
||||
|
||||
func (i *objectSource) GetSourceID() string {
|
||||
return i.url
|
||||
}
|
||||
|
||||
func (i *objectSource) GetSourceType() string {
|
||||
return sourceType_OBJECT
|
||||
}
|
||||
|
||||
func (i *objectSource) Description() string {
|
||||
return fmt.Sprintf("%s source, url: %s", i.GetSourceType(), i.url)
|
||||
}
|
|
@ -1,98 +0,0 @@
|
|||
package yandeximport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
)
|
||||
|
||||
const defaultS3Region = "ru-central1"
|
||||
const defaultStorageEndpoint = "storage.yandexcloud.net"
|
||||
|
||||
func newYCStorageClient(storageEndpoint, accessKey, secretKey string) (*s3.S3, error) {
|
||||
var creds *credentials.Credentials
|
||||
|
||||
if storageEndpoint == "" {
|
||||
storageEndpoint = defaultStorageEndpoint
|
||||
}
|
||||
|
||||
s3Config := &aws.Config{
|
||||
Endpoint: aws.String(storageEndpoint),
|
||||
Region: aws.String(defaultS3Region),
|
||||
}
|
||||
|
||||
switch {
|
||||
case accessKey != "" && secretKey != "":
|
||||
creds = credentials.NewStaticCredentials(accessKey, secretKey, "")
|
||||
default:
|
||||
return nil, fmt.Errorf("either access or secret key not provided")
|
||||
}
|
||||
|
||||
s3Config.Credentials = creds
|
||||
newSession, err := session.NewSession(s3Config)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s3.New(newSession), nil
|
||||
}
|
||||
|
||||
// Get path-style S3 URL and return presigned URL
|
||||
func presignUrl(s3conn *s3.S3, ui packersdk.Ui, fullUrl string) (cloudImageSource, error) {
|
||||
bucket, key, err := s3URLToBucketKey(fullUrl)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, _ := s3conn.GetObjectRequest(&s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(key),
|
||||
})
|
||||
|
||||
// Compute service allow only `https://storage.yandexcloud.net/...` URLs for Image create process
|
||||
req.Config.S3ForcePathStyle = aws.Bool(true)
|
||||
|
||||
urlStr, _, err := req.PresignRequest(30 * time.Minute)
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Failed to presign url: %s", err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &objectSource{
|
||||
urlStr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func s3URLToBucketKey(storageURL string) (bucket string, key string, err error) {
|
||||
u, err := url.Parse(storageURL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if u.Scheme == "s3" {
|
||||
// s3://bucket/key
|
||||
bucket = u.Host
|
||||
key = strings.TrimLeft(u.Path, "/")
|
||||
} else if u.Scheme == "https" {
|
||||
// https://***.storage.yandexcloud.net/...
|
||||
if u.Host == defaultStorageEndpoint {
|
||||
// No bucket name in the host part
|
||||
path := strings.SplitN(u.Path, "/", 3)
|
||||
bucket = path[1]
|
||||
key = path[2]
|
||||
} else {
|
||||
// Bucket name in host
|
||||
bucket = strings.TrimSuffix(u.Host, "."+defaultStorageEndpoint)
|
||||
key = strings.TrimLeft(u.Path, "/")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
package yandeximport
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func Test_s3URLToBucketKey(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
storageURL string
|
||||
wantBucket string
|
||||
wantKey string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "path-style url #1",
|
||||
storageURL: "https://storage.yandexcloud.net/bucket1/key1/foobar.txt",
|
||||
wantBucket: "bucket1",
|
||||
wantKey: "key1/foobar.txt",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "path-style url #2",
|
||||
storageURL: "https://storage.yandexcloud.net/bucket1.with.dots/key1/foobar.txt",
|
||||
wantBucket: "bucket1.with.dots",
|
||||
wantKey: "key1/foobar.txt",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "host-style url #1",
|
||||
storageURL: "https://bucket1.with.dots.storage.yandexcloud.net/key1/foobar.txt",
|
||||
wantBucket: "bucket1.with.dots",
|
||||
wantKey: "key1/foobar.txt",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "host-style url #2",
|
||||
storageURL: "https://bucket-with-dash.storage.yandexcloud.net/key2/foobar.txt",
|
||||
wantBucket: "bucket-with-dash",
|
||||
wantKey: "key2/foobar.txt",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotBucket, gotKey, err := s3URLToBucketKey(tt.storageURL)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("s3URLToBucketKey() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
assert.Equal(t, tt.wantBucket, gotBucket)
|
||||
assert.Equal(t, tt.wantKey, gotKey)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,154 +0,0 @@
|
|||
package yandeximport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
)
|
||||
|
||||
func uploadToBucket(s3conn *s3.S3, ui packersdk.Ui, artifact packersdk.Artifact, bucket string, objectName string) (cloudImageSource, error) {
|
||||
ui.Say("Looking for qcow2 file in list of artifacts...")
|
||||
source := ""
|
||||
for _, path := range artifact.Files() {
|
||||
ui.Say(fmt.Sprintf("Found artifact %v...", path))
|
||||
if strings.HasSuffix(path, ".qcow2") {
|
||||
source = path
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if source == "" {
|
||||
return nil, fmt.Errorf("no qcow2 file found in list of artifacts")
|
||||
}
|
||||
|
||||
artifactFile, err := os.Open(source)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("error opening %v", source)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Uploading file %v to bucket %v/%v...", source, bucket, objectName))
|
||||
|
||||
_, err = s3conn.PutObject(&s3.PutObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(objectName),
|
||||
Body: artifactFile,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Failed to upload: %v", objectName))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req, _ := s3conn.GetObjectRequest(&s3.GetObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(objectName),
|
||||
})
|
||||
|
||||
// Compute service allow only `https://storage.yandexcloud.net/...` URLs for Image create process
|
||||
req.Config.S3ForcePathStyle = aws.Bool(true)
|
||||
|
||||
err = req.Build()
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Failed to build S3 request: %v", err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &objectSource{
|
||||
url: req.HTTPRequest.URL.String(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func createYCImage(ctx context.Context, driver yandex.Driver, ui packersdk.Ui, imageSrc cloudImageSource, c *Config) (*compute.Image, error) {
|
||||
req := &compute.CreateImageRequest{
|
||||
FolderId: c.CloudConfig.FolderID,
|
||||
Name: c.ImageConfig.ImageName,
|
||||
Description: c.ImageConfig.ImageDescription,
|
||||
Labels: c.ImageConfig.ImageLabels,
|
||||
Family: c.ImageConfig.ImageFamily,
|
||||
MinDiskSize: int64(c.ImageMinDiskSizeGb),
|
||||
ProductIds: c.ImageProductIDs,
|
||||
}
|
||||
|
||||
// switch on cloudImageSource type: cloud image id or storage URL
|
||||
switch v := imageSrc.(type) {
|
||||
case *imageSource:
|
||||
req.Source = &compute.CreateImageRequest_ImageId{ImageId: v.imageID}
|
||||
case *objectSource:
|
||||
req.Source = &compute.CreateImageRequest_Uri{Uri: v.url}
|
||||
}
|
||||
|
||||
op, err := driver.SDK().WrapOperation(driver.SDK().Compute().Image().Create(ctx, req))
|
||||
if err != nil {
|
||||
ui.Say("Error creating Yandex Compute Image")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Source of Image creation: %s", imageSrc.Description()))
|
||||
|
||||
ui.Say(fmt.Sprintf("Creating Yandex Compute Image %v within operation %#v", c.ImageName, op.Id()))
|
||||
|
||||
ui.Say("Waiting for Yandex Compute Image creation operation to complete...")
|
||||
err = op.Wait(ctx)
|
||||
|
||||
// fail if image creation operation has an error
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Yandex Compute Image: %s", err)
|
||||
}
|
||||
|
||||
protoMetadata, err := op.Metadata()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while get image create operation metadata: %s", err)
|
||||
}
|
||||
|
||||
md, ok := protoMetadata.(*compute.CreateImageMetadata)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not get Image ID from create operation metadata")
|
||||
}
|
||||
|
||||
image, err := driver.SDK().Compute().Image().Get(ctx, &compute.GetImageRequest{
|
||||
ImageId: md.ImageId,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while image get request: %s", err)
|
||||
}
|
||||
|
||||
return image, nil
|
||||
|
||||
}
|
||||
|
||||
func deleteFromBucket(s3conn *s3.S3, ui packersdk.Ui, imageSrc cloudImageSource) error {
|
||||
var url string
|
||||
// switch on cloudImageSource type: cloud image id or storage URL
|
||||
switch v := imageSrc.(type) {
|
||||
case *objectSource:
|
||||
url = v.GetSourceID()
|
||||
case *imageSource:
|
||||
return fmt.Errorf("invalid argument for `deleteFromBucket` method: %v", v)
|
||||
}
|
||||
|
||||
bucket, objectName, err := s3URLToBucketKey(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Deleting import source from Object Storage %s/%s...", bucket, objectName))
|
||||
|
||||
_, err = s3conn.DeleteObject(&s3.DeleteObjectInput{
|
||||
Bucket: aws.String(bucket),
|
||||
Key: aws.String(objectName),
|
||||
})
|
||||
if err != nil {
|
||||
ui.Say(fmt.Sprintf("Failed to delete: %v/%v", bucket, objectName))
|
||||
return fmt.Errorf("error deleting storage object %q in bucket %q: %s ", objectName, bucket, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/packer-plugin-sdk/version"
|
||||
packerVersion "github.com/hashicorp/packer/version"
|
||||
)
|
||||
|
||||
var YandexImportPluginVersion *version.PluginVersion
|
||||
|
||||
func init() {
|
||||
YandexImportPluginVersion = version.InitializePluginVersion(
|
||||
packerVersion.Version, packerVersion.VersionPrerelease)
|
||||
}
|
|
@ -1,176 +0,0 @@
|
|||
---
|
||||
description: |
|
||||
The yandex Packer builder is able to create images for use with
|
||||
Yandex.Cloud based on existing images.
|
||||
page_title: Yandex Compute - Builders
|
||||
---
|
||||
|
||||
# Yandex Compute Builder
|
||||
|
||||
Type: `yandex`
|
||||
Artifact BuilderId: `packer.yandex`
|
||||
|
||||
The `yandex` Packer builder is able to create
|
||||
[images](https://cloud.yandex.com/docs/compute/concepts/images) for use with
|
||||
[Yandex Compute Cloud](https://cloud.yandex.com/docs/compute/)
|
||||
based on existing images.
|
||||
|
||||
## Authentication
|
||||
|
||||
Builder can authenticate with Yandex.Cloud using one of the following methods:
|
||||
|
||||
- OAuth token
|
||||
- IAM token
|
||||
- File with Service Account Key
|
||||
- Service Account assigned to Compute Instance
|
||||
|
||||
### Authentication Using Token
|
||||
|
||||
To authenticate with an OAuth token or IAM token only `token` config key is needed.
|
||||
Or use the `YC_TOKEN` environment variable with proper value.
|
||||
Token you could get [here](https://oauth.yandex.ru/authorize?response_type=token&client_id=1a6990aa636648e9b2ef855fa7bec2fb).
|
||||
Check [documentation](https://cloud.yandex.ru/docs/iam/operations/iam-token/create) about how to create IAM token.
|
||||
|
||||
### Authentication Using Service Account Key File
|
||||
|
||||
To authenticate with a service account credential, only `service_account_key_file` is needed.
|
||||
Or use the `YC_SERVICE_ACCOUNT_KEY_FILE` environment variable.
|
||||
The parameter expects a value path to the service account file in json format. Check [documentation](https://cloud.yandex.com/docs/cli/operations/authentication/service-account#auth-as-sa)
|
||||
about how to create SA and its key.
|
||||
|
||||
### Authentication Using a Compute Instance Service Account
|
||||
|
||||
Authentication with a Service Account on an instance happens when neither the `token` nor the `service_account_key_file` parameter is set.
|
||||
To get more information about this kind of authentication check [documentaion](https://cloud.yandex.com/docs/compute/operations/vm-connect/auth-inside-vm).
|
||||
|
||||
-> **NB** Do not forget to assigne proper roles to Service Account, if you use this type of auth.
|
||||
|
||||
## Basic Example
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "yandex",
|
||||
"token": "YOUR OAUTH TOKEN",
|
||||
"folder_id": "YOUR FOLDER ID",
|
||||
"source_image_family": "ubuntu-1804-lts",
|
||||
"ssh_username": "ubuntu",
|
||||
"use_ipv4_nat": "true"
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration Reference
|
||||
|
||||
Configuration options are organized below into two categories: required and
|
||||
optional. Within each category, the available options are alphabetized and
|
||||
described.
|
||||
|
||||
In addition to the options listed here, a [communicator](/docs/templates/legacy_json_templates/communicator)
|
||||
can be configured for this builder. In addition to the options defined there, a private key file
|
||||
can also be supplied to override the typical auto-generated key:
|
||||
|
||||
@include 'packer-plugin-sdk/communicator/SSH-Private-Key-File-not-required.mdx'
|
||||
|
||||
### Required:
|
||||
|
||||
#### Access
|
||||
|
||||
@include 'builder/yandex/AccessConfig-required.mdx'
|
||||
|
||||
#### Builder
|
||||
|
||||
@include 'builder/yandex/Config-required.mdx'
|
||||
|
||||
@include 'builder/yandex/SourceImageConfig-required.mdx'
|
||||
|
||||
#### Common
|
||||
|
||||
@include 'builder/yandex/CloudConfig-required.mdx'
|
||||
|
||||
### Optional:
|
||||
|
||||
#### Access
|
||||
|
||||
@include 'builder/yandex/AccessConfig-not-required.mdx'
|
||||
|
||||
#### Builder
|
||||
|
||||
@include 'builder/yandex/Config-not-required.mdx'
|
||||
|
||||
@include 'builder/yandex/SourceImageConfig-not-required.mdx'
|
||||
|
||||
#### Common
|
||||
|
||||
@include 'builder/yandex/CommonConfig-not-required.mdx'
|
||||
|
||||
#### Instance
|
||||
|
||||
@include 'builder/yandex/InstanceConfig-not-required.mdx'
|
||||
|
||||
#### Disk
|
||||
|
||||
@include 'builder/yandex/DiskConfig-not-required.mdx'
|
||||
|
||||
#### Image
|
||||
|
||||
@include 'builder/yandex/ImageConfig-not-required.mdx'
|
||||
|
||||
#### Network
|
||||
|
||||
@include 'builder/yandex/NetworkConfig-not-required.mdx'
|
||||
|
||||
## Build template data
|
||||
|
||||
In configuration directives the following variables are available:
|
||||
|
||||
- `ImageID` - ID of the built image.
|
||||
- `ImageName` - Name of the built image.
|
||||
- `ImageFamily` - Family of the built image.
|
||||
- `ImageDescription` - Description of the built image.
|
||||
- `ImageFolderID` - Folder ID where the built image is stored.
|
||||
- `SourceImageID` - The source image ID (for example `fd8fjtn3mj2kfe7h6f0r`) used to build the image.
|
||||
- `SourceImageName` - The source image name (for example `ubuntu-1604-lts-1579200746`) used to build the image.
|
||||
- `SourceImageDescription` - The source image description (for example `ubuntu 16.04 lts`).
|
||||
- `SourceImageFamily` - The source image family (for example `ubuntu-1604-lts`).
|
||||
- `SourceImageFolderID` - The folder ID where source image located (for example `standard-images`).
|
||||
|
||||
## Build Shared Information Variables
|
||||
|
||||
This builder generates data that are shared with provisioner and post-processor via build function of
|
||||
[template engine](/docs/templates/legacy_json_templates/engine) for JSON and [contextual variables](/docs/templates/hcl_templates/contextual-variables)
|
||||
for HCL2.
|
||||
|
||||
The generated variables available for this builder see above
|
||||
|
||||
Usage example:
|
||||
|
||||
<Tabs>
|
||||
<Tab heading="JSON">
|
||||
|
||||
```json
|
||||
"post-processors": [
|
||||
{
|
||||
"type": "manifest",
|
||||
"output": "manifest.json",
|
||||
"strip_path": true,
|
||||
"custom_data": {
|
||||
"source_image_id": "{{ build `SourceImageID` }}"
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab heading="HCL2">
|
||||
|
||||
```hcl
|
||||
post-processor "manifest" {
|
||||
output = "manifest.json"
|
||||
strip_path = true
|
||||
custom_data = {
|
||||
source_image_id = "${build.SourceImageID}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
|
@ -1,121 +0,0 @@
|
|||
---
|
||||
description: >
|
||||
The Yandex.Cloud Compute Image Exporter post-processor exports an image from a
|
||||
Packer
|
||||
|
||||
yandex builder run and uploads it to Yandex Object Storage. The exported
|
||||
|
||||
images can be easily shared and uploaded to other Yandex.Cloud Cloud folders.
|
||||
page_title: Yandex.Cloud Compute Image Exporter - Post-Processors
|
||||
---
|
||||
|
||||
# Yandex.Cloud Compute Image Exporter Post-Processor
|
||||
|
||||
Type: `yandex-export`
|
||||
Artifact BuilderId: `packer.post-processor.yandex-export`
|
||||
|
||||
The Yandex.Cloud Compute Image Exporter post-processor exports the resultant image
|
||||
from a yandex build as a qcow2 file to Yandex Object Storage.
|
||||
|
||||
The exporter uses the same Yandex.Cloud folder and
|
||||
authentication credentials as the yandex build that produced the image.
|
||||
A temporary VM is started in the folder using these credentials. The VM
|
||||
mounts the built image as a secondary disk, then dumps the image in qcow2 format.
|
||||
The VM then uploads the file to the provided Yandex Object Storage `paths` using the same
|
||||
credentials.
|
||||
|
||||
As such, assigned Service Account must have write permissions to the Yandex Object Storage
|
||||
`paths`. A new temporary static access keys from assigned Service Account used to upload
|
||||
image.
|
||||
|
||||
Also, you should configure [ssh communicator](/docs/communicators/ssh). Default `ssh_username` to `ubuntu`.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Required:
|
||||
|
||||
#### Access
|
||||
|
||||
@include 'builder/yandex/AccessConfig-required.mdx'
|
||||
|
||||
#### Export
|
||||
|
||||
@include 'post-processor/yandex-export/Config-required.mdx'
|
||||
|
||||
#### Common
|
||||
|
||||
@include 'builder/yandex/CloudConfig-required.mdx'
|
||||
|
||||
@include 'post-processor/yandex-export/ExchangeConfig-required.mdx'
|
||||
|
||||
### Optional:
|
||||
|
||||
#### Access
|
||||
|
||||
@include 'builder/yandex/AccessConfig-not-required.mdx'
|
||||
|
||||
#### Export
|
||||
|
||||
@include 'post-processor/yandex-export/Config-not-required.mdx'
|
||||
|
||||
#### Common
|
||||
|
||||
@include 'builder/yandex/CommonConfig-not-required.mdx'
|
||||
|
||||
#### Instance
|
||||
|
||||
@include 'builder/yandex/InstanceConfig-not-required.mdx'
|
||||
|
||||
#### Disk
|
||||
|
||||
@include 'builder/yandex/DiskConfig-not-required.mdx'
|
||||
|
||||
#### Network
|
||||
|
||||
@include 'builder/yandex/NetworkConfig-not-required.mdx'
|
||||
|
||||
## Basic Example
|
||||
|
||||
The following example builds a Compute image in the folder with id `b1g8jvfcgmitdrslcn86`, with an
|
||||
Service Account whose keyfile is `account.json`. After the image build, a temporary VM
|
||||
will be created to export the image as a qcow2 file to
|
||||
`s3://packer-export/my-exported-image.qcow2` and
|
||||
`s3://packer-export/image-number-two.qcow2`. `keep_input_artifact` is true, so the
|
||||
source Compute image won't be deleted after the export.
|
||||
|
||||
In order for this example to work, the service account associated with builder
|
||||
must have write access to both `s3://packer-export/my-exported-image.qcow2` and
|
||||
`s3://packer-export/image-number-two.qcow2` and get permission to modify temporary instance
|
||||
(create new disk, attach to instance, etc).
|
||||
|
||||
```json
|
||||
{
|
||||
"builders": [
|
||||
{
|
||||
"type": "yandex",
|
||||
"folder_id": "b1g8jvfcgmitdrslcn86",
|
||||
"subnet_id": "e9bp6l8sa4q39yourxzq",
|
||||
"zone": "ru-central1-a",
|
||||
|
||||
"source_image_family": "ubuntu-1604-lts",
|
||||
"ssh_username": "ubuntu",
|
||||
"use_ipv4_nat": true
|
||||
}
|
||||
],
|
||||
"post-processors": [
|
||||
{
|
||||
"type": "yandex-export",
|
||||
"folder_id": "b1g8jvfcgmitdrslcn86",
|
||||
"subnet_id": "e9bp6l8sa4q39yourxzq",
|
||||
|
||||
"service_account_id": "ajeu0363240rrnn7xgen",
|
||||
|
||||
"paths": [
|
||||
"s3://packer-export-bucket/my-exported-image.qcow2",
|
||||
"s3://packer-export-bucket/template-supported-get-{{build `ImageID` }}-right-here.qcow2"
|
||||
],
|
||||
"keep_input_artifact": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
|
@ -1,82 +0,0 @@
|
|||
---
|
||||
description: >
|
||||
The Yandex.Cloud Compute Image Import post-processor create an image from a
|
||||
qcow2 image (or from provided Storage object in near future). It uploads qcow2
|
||||
to Yandex Object Storage and create new one Compute Image in target folder.
|
||||
page_title: Yandex.Cloud Compute Image Import - Post-Processors
|
||||
---
|
||||
|
||||
# Yandex.Cloud Compute Image Import Post-Processor
|
||||
|
||||
Type: `yandex-import`
|
||||
Artifact BuilderId: `packer.post-processor.yandex-import`
|
||||
|
||||
The Yandex.Cloud Compute Image Import post-processor create new Compute Image
|
||||
from a qcow2 file. As Compute service support image creation from Storage service object
|
||||
just before request to create its upload file into Storage service.
|
||||
|
||||
Assigned Service Account must have write permissions to the Yandex Object Storage.
|
||||
A new temporary static access keys from assigned Service Account used to upload
|
||||
file.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Required:
|
||||
|
||||
#### Access
|
||||
|
||||
@include 'builder/yandex/AccessConfig-required.mdx'
|
||||
|
||||
#### Common
|
||||
|
||||
@include 'builder/yandex/CloudConfig-required.mdx'
|
||||
|
||||
@include 'post-processor/yandex-export/ExchangeConfig-required.mdx'
|
||||
|
||||
### Optional:
|
||||
|
||||
#### Access
|
||||
|
||||
@include 'builder/yandex/AccessConfig-not-required.mdx'
|
||||
|
||||
#### Import
|
||||
|
||||
@include 'post-processor/yandex-import/Config-not-required.mdx'
|
||||
|
||||
#### Image
|
||||
|
||||
@include 'builder/yandex/ImageConfig-not-required.mdx'
|
||||
|
||||
## Basic Example
|
||||
|
||||
TBD
|
||||
|
||||
```json
|
||||
{
|
||||
"variables": {
|
||||
"token": "{{env `YC_TOKEN`}}"
|
||||
},
|
||||
"sensitive-variables": ["token"],
|
||||
"builders": [
|
||||
{
|
||||
"type": "file",
|
||||
"source": "xenial-server-cloudimg-amd64-disk1.img",
|
||||
"target": "test_artifact.qcow2"
|
||||
}
|
||||
],
|
||||
"post-processors": [
|
||||
{
|
||||
"type": "yandex-import",
|
||||
"token": "{{user `token`}}",
|
||||
"folder_id": "b1g8jvfcgmitdrslcn86",
|
||||
"service_account_id": "ajeui8kdvg8qs44fbrbr",
|
||||
|
||||
"bucket": "bucket1",
|
||||
|
||||
"image_name": "my-first-imported-image-{{isotime \"02-Jan-06-03-04-05\" | lower }}",
|
||||
|
||||
"keep_input_artifact": false
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
|
@ -712,10 +712,6 @@
|
|||
"title": "ProfitBricks",
|
||||
"path": "builders/profitbricks"
|
||||
},
|
||||
{
|
||||
"title": "Yandex.Cloud",
|
||||
"path": "builders/yandex"
|
||||
},
|
||||
{
|
||||
"title": "Custom",
|
||||
"path": "builders/custom"
|
||||
|
@ -815,14 +811,6 @@
|
|||
"title": "Shell (Local)",
|
||||
"path": "post-processors/shell-local"
|
||||
},
|
||||
{
|
||||
"title": "Yandex.Cloud Compute Export",
|
||||
"path": "post-processors/yandex-export"
|
||||
},
|
||||
{
|
||||
"title": "Yandex.Cloud Compute Import",
|
||||
"path": "post-processors/yandex-import"
|
||||
},
|
||||
{
|
||||
"title": "Community-Supported",
|
||||
"path": "post-processors/community-supported"
|
||||
|
|
|
@ -213,5 +213,12 @@
|
|||
"path": "vmware",
|
||||
"repo": "hashicorp/packer-plugin-vmware",
|
||||
"version": "latest"
|
||||
},
|
||||
{
|
||||
"title": "Yandex",
|
||||
"path": "yandex",
|
||||
"repo": "hashicorp/packer-plugin-yandex",
|
||||
"version": "latest",
|
||||
"pluginTier": "community"
|
||||
}
|
||||
]
|
||||
|
|
Loading…
Reference in New Issue