Merge pull request #10488 from Direnol/yandex/update-dump-method
yandex/update-dump-method
This commit is contained in:
commit
ab98409069
|
@ -16,7 +16,7 @@ const (
|
|||
|
||||
const (
|
||||
cloudInitIPv6Config = `#!/usr/bin/env bash
|
||||
dhclient -6 -D LL -nw -pf /run/dhclient_ipv6.eth0.pid -lf /var/lib/dhcp/dhclient_ipv6.eth0.leases eth0
|
||||
dhclient -6 eth0
|
||||
`
|
||||
)
|
||||
|
||||
|
|
|
@ -232,3 +232,32 @@ func (c *InstanceConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiErr
|
|||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
type SourceImageConfig struct {
|
||||
// The source image family to create the new image
|
||||
// from. You can also specify source_image_id instead. Just one of a source_image_id or
|
||||
// source_image_family must be specified. Example: `ubuntu-1804-lts`.
|
||||
SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
|
||||
// The ID of the folder containing the source image.
|
||||
SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"`
|
||||
// The source image ID to use to create the new image from.
|
||||
SourceImageID string `mapstructure:"source_image_id" required:"false"`
|
||||
// The source image name to use to create the new image
|
||||
// from. Name will be looked up in `source_image_folder_id`.
|
||||
SourceImageName string `mapstructure:"source_image_name"`
|
||||
}
|
||||
|
||||
func (c *SourceImageConfig) Prepare(errs *packersdk.MultiError) *packersdk.MultiError {
|
||||
// Process required parameters.
|
||||
if c.SourceImageID == "" {
|
||||
if c.SourceImageFamily == "" && c.SourceImageName == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("a source_image_name or source_image_family must be specified"))
|
||||
}
|
||||
if c.SourceImageFamily != "" && c.SourceImageName != "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("one of source_image_name or source_image_family must be specified, not both"))
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
package yandex
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
|
@ -22,20 +21,10 @@ type Config struct {
|
|||
CommonConfig `mapstructure:",squash"`
|
||||
ImageConfig `mapstructure:",squash"`
|
||||
|
||||
SourceImageConfig `mapstructure:",squash"`
|
||||
// Service account identifier to assign to instance.
|
||||
ServiceAccountID string `mapstructure:"service_account_id" required:"false"`
|
||||
|
||||
// The source image family to create the new image
|
||||
// from. You can also specify source_image_id instead. Just one of a source_image_id or
|
||||
// source_image_family must be specified. Example: `ubuntu-1804-lts`.
|
||||
SourceImageFamily string `mapstructure:"source_image_family" required:"true"`
|
||||
// The ID of the folder containing the source image.
|
||||
SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"`
|
||||
// The source image ID to use to create the new image from.
|
||||
SourceImageID string `mapstructure:"source_image_id" required:"false"`
|
||||
// The source image name to use to create the new image
|
||||
// from. Name will be looked up in `source_image_folder_id`.
|
||||
SourceImageName string `mapstructure:"source_image_name"`
|
||||
// The ID of the folder to save built image in.
|
||||
// This defaults to value of 'folder_id'.
|
||||
TargetImageFolderID string `mapstructure:"target_image_folder_id" required:"false"`
|
||||
|
@ -60,6 +49,7 @@ func (c *Config) Prepare(raws ...interface{}) ([]string, error) {
|
|||
|
||||
errs = c.CommonConfig.Prepare(errs)
|
||||
errs = c.ImageConfig.Prepare(errs)
|
||||
errs = c.SourceImageConfig.Prepare(errs)
|
||||
|
||||
if c.ImageMinDiskSizeGb == 0 {
|
||||
c.ImageMinDiskSizeGb = c.DiskSizeGb
|
||||
|
@ -79,18 +69,6 @@ func (c *Config) Prepare(raws ...interface{}) ([]string, error) {
|
|||
errs = packersdk.MultiErrorAppend(errs, es...)
|
||||
}
|
||||
|
||||
// Process required parameters.
|
||||
if c.SourceImageID == "" {
|
||||
if c.SourceImageFamily == "" && c.SourceImageName == "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("a source_image_name or source_image_family must be specified"))
|
||||
}
|
||||
if c.SourceImageFamily != "" && c.SourceImageName != "" {
|
||||
errs = packersdk.MultiErrorAppend(
|
||||
errs, errors.New("one of source_image_name or source_image_family must be specified, not both"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.TargetImageFolderID == "" {
|
||||
c.TargetImageFolderID = c.FolderID
|
||||
}
|
||||
|
|
|
@ -98,11 +98,11 @@ type FlatConfig struct {
|
|||
ImageLabels map[string]string `mapstructure:"image_labels" required:"false" cty:"image_labels" hcl:"image_labels"`
|
||||
ImageMinDiskSizeGb *int `mapstructure:"image_min_disk_size_gb" required:"false" cty:"image_min_disk_size_gb" hcl:"image_min_disk_size_gb"`
|
||||
ImageProductIDs []string `mapstructure:"image_product_ids" required:"false" cty:"image_product_ids" hcl:"image_product_ids"`
|
||||
ServiceAccountID *string `mapstructure:"service_account_id" required:"false" cty:"service_account_id" hcl:"service_account_id"`
|
||||
SourceImageFamily *string `mapstructure:"source_image_family" required:"true" cty:"source_image_family" hcl:"source_image_family"`
|
||||
SourceImageFolderID *string `mapstructure:"source_image_folder_id" required:"false" cty:"source_image_folder_id" hcl:"source_image_folder_id"`
|
||||
SourceImageID *string `mapstructure:"source_image_id" required:"false" cty:"source_image_id" hcl:"source_image_id"`
|
||||
SourceImageName *string `mapstructure:"source_image_name" cty:"source_image_name" hcl:"source_image_name"`
|
||||
ServiceAccountID *string `mapstructure:"service_account_id" required:"false" cty:"service_account_id" hcl:"service_account_id"`
|
||||
TargetImageFolderID *string `mapstructure:"target_image_folder_id" required:"false" cty:"target_image_folder_id" hcl:"target_image_folder_id"`
|
||||
}
|
||||
|
||||
|
@ -206,11 +206,11 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"image_labels": &hcldec.AttrSpec{Name: "image_labels", Type: cty.Map(cty.String), Required: false},
|
||||
"image_min_disk_size_gb": &hcldec.AttrSpec{Name: "image_min_disk_size_gb", Type: cty.Number, Required: false},
|
||||
"image_product_ids": &hcldec.AttrSpec{Name: "image_product_ids", Type: cty.List(cty.String), Required: false},
|
||||
"service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false},
|
||||
"source_image_family": &hcldec.AttrSpec{Name: "source_image_family", Type: cty.String, Required: false},
|
||||
"source_image_folder_id": &hcldec.AttrSpec{Name: "source_image_folder_id", Type: cty.String, Required: false},
|
||||
"source_image_id": &hcldec.AttrSpec{Name: "source_image_id", Type: cty.String, Required: false},
|
||||
"source_image_name": &hcldec.AttrSpec{Name: "source_image_name", Type: cty.String, Required: false},
|
||||
"service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false},
|
||||
"target_image_folder_id": &hcldec.AttrSpec{Name: "target_image_folder_id", Type: cty.String, Required: false},
|
||||
}
|
||||
return s
|
||||
|
|
|
@ -1,81 +0,0 @@
|
|||
// CODE GENERATED. DO NOT EDIT
|
||||
package yandexexport
|
||||
|
||||
var (
|
||||
CloudInitScript = `#!/usr/bin/env bash
|
||||
|
||||
GetMetadata() {
|
||||
curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/attributes/$1 2>/dev/null
|
||||
}
|
||||
|
||||
[[ "$(GetMetadata debug)" == "1" || "$(GetMetadata debug)" == "true" ]] && set -x
|
||||
|
||||
InstallPackages() {
|
||||
sudo apt-get update -qq && sudo apt-get install -y qemu-utils awscli
|
||||
}
|
||||
|
||||
WaitFile() {
|
||||
local RETRIES=60
|
||||
while [[ ${RETRIES} -gt 0 ]]; do
|
||||
echo "Wait ${1}"
|
||||
if [ -e "${1}" ]; then
|
||||
echo "[${1}] has been found"
|
||||
return 0
|
||||
fi
|
||||
RETRIES=$((RETRIES-1))
|
||||
sleep 5
|
||||
done
|
||||
echo "[${1}] not found"
|
||||
return 1
|
||||
}
|
||||
|
||||
PATHS=$(GetMetadata paths)
|
||||
S3_ENDPOINT="https://storage.yandexcloud.net"
|
||||
DISK_EXPORT_PATH="/dev/disk/by-id/virtio-doexport"
|
||||
export AWS_SHARED_CREDENTIALS_FILE="/tmp/aws-credentials"
|
||||
export AWS_REGION=ru-central1
|
||||
|
||||
Exit() {
|
||||
for i in ${PATHS}; do
|
||||
LOGDEST="${i}.exporter.log"
|
||||
echo "Uploading exporter log to ${LOGDEST}..."
|
||||
aws s3 --endpoint-url="${S3_ENDPOINT}" cp /var/log/syslog "${LOGDEST}"
|
||||
done
|
||||
|
||||
exit $1
|
||||
}
|
||||
|
||||
InstallPackages
|
||||
|
||||
echo "####### Export configuration #######"
|
||||
echo "Export paths - ${PATHS}"
|
||||
echo "####################################"
|
||||
|
||||
if ! WaitFile "${AWS_SHARED_CREDENTIALS_FILE}"; then
|
||||
echo "Failed wait credentials"
|
||||
Exit 1
|
||||
fi
|
||||
udevadm trigger || true
|
||||
|
||||
if ! WaitFile "${DISK_EXPORT_PATH}"; then
|
||||
echo "Failed wait attach disk"
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
echo "Dumping disk..."
|
||||
if ! qemu-img convert -O qcow2 -o cluster_size=2M "${DISK_EXPORT_PATH}" disk.qcow2; then
|
||||
echo "Failed to dump disk to qcow2 image."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
for i in ${PATHS}; do
|
||||
echo "Uploading qcow2 disk image to ${i}..."
|
||||
if ! aws s3 --endpoint-url="${S3_ENDPOINT}" cp disk.qcow2 "${i}"; then
|
||||
echo "Failed to upload image to ${i}."
|
||||
FAIL=1
|
||||
fi
|
||||
done
|
||||
|
||||
Exit ${FAIL}
|
||||
`
|
||||
)
|
|
@ -1,33 +1,39 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type Config
|
||||
//go:generate go run ./scripts/script-to-var.go ./scripts/export.sh CloudInitScript cloud-init-script.go
|
||||
|
||||
package yandexexport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/hashicorp/hcl/v2/hcldec"
|
||||
"github.com/hashicorp/packer-plugin-sdk/common"
|
||||
"github.com/hashicorp/packer-plugin-sdk/communicator"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep/commonsteps"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/packerbuilderdata"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer/builder/file"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
"github.com/hashicorp/packer/post-processor/artifice"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStorageEndpoint = "storage.yandexcloud.net"
|
||||
defaultStorageRegion = "ru-central1"
|
||||
defaultStorageEndpoint = "storage.yandexcloud.net"
|
||||
defaultStorageRegion = "ru-central1"
|
||||
defaultSourceImageFamily = "ubuntu-1604-lts"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
@ -45,11 +51,27 @@ type Config struct {
|
|||
|
||||
// Path to a PEM encoded private key file to use to authenticate with SSH.
|
||||
// The `~` can be used in path and will be expanded to the home directory
|
||||
// of current user. Login for attach: `ubuntu`
|
||||
// of current user.
|
||||
SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file" required:"false"`
|
||||
// Number of attempts to wait for export (must be greater than 0). Default: 1000
|
||||
Tries int `mapstructure:"tries" required:"false"`
|
||||
ctx interpolate.Context
|
||||
// The username to connect to SSH with. Default `ubuntu`
|
||||
SSHUsername string `mapstructure:"ssh_username" required:"false"`
|
||||
// The ID of the folder containing the source image. Default `standard-images`.
|
||||
SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"`
|
||||
// The source image family to start export process. Default `ubuntu-1604-lts`.
|
||||
// Image must contains utils or supported package manager: `apt` or `yum` -
|
||||
// requires `root` or `sudo` without password.
|
||||
// Utils: `qemu-img`, `aws`. The `qemu-img` utility requires `root` user or
|
||||
// `sudo` access without password.
|
||||
SourceImageFamily string `mapstructure:"source_image_family" required:"false"`
|
||||
// The source image ID to use to create the new image from. Just one of a source_image_id or
|
||||
// source_image_family must be specified.
|
||||
SourceImageID string `mapstructure:"source_image_id" required:"false"`
|
||||
// The extra size of the source disk in GB. This defaults to `0GB`.
|
||||
// Requires `losetup` utility on the instance.
|
||||
// > **Careful!** Increases payment cost.
|
||||
// > See [perfomance](https://cloud.yandex.com/docs/compute/concepts/disk#performance).
|
||||
SourceDiskExtraSize int `mapstructure:"source_disk_extra_size" required:"false"`
|
||||
ctx interpolate.Context
|
||||
}
|
||||
|
||||
type PostProcessor struct {
|
||||
|
@ -83,8 +105,19 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
|
|||
if p.config.DiskSizeGb == 0 {
|
||||
p.config.DiskSizeGb = 100
|
||||
}
|
||||
if p.config.Tries <= 0 {
|
||||
p.config.Tries = 1000
|
||||
if p.config.SSHUsername == "" {
|
||||
p.config.SSHUsername = "ubuntu"
|
||||
}
|
||||
if p.config.SourceImageID == "" {
|
||||
if p.config.SourceImageFamily == "" {
|
||||
p.config.SourceImageFamily = defaultSourceImageFamily
|
||||
}
|
||||
if p.config.SourceImageFolderID == "" {
|
||||
p.config.SourceImageFolderID = yandex.StandardImagesFolderID
|
||||
}
|
||||
}
|
||||
if p.config.SourceDiskExtraSize < 0 {
|
||||
errs = packer.MultiErrorAppend(errs, fmt.Errorf("source_disk_extra_size must be greater than zero"))
|
||||
}
|
||||
|
||||
errs = p.config.CommonConfig.Prepare(errs)
|
||||
|
@ -131,12 +164,20 @@ func (p *PostProcessor) Configure(raws ...interface{}) error {
|
|||
}
|
||||
|
||||
func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifact packersdk.Artifact) (packersdk.Artifact, bool, bool, error) {
|
||||
imageID := ""
|
||||
switch artifact.BuilderId() {
|
||||
case yandex.BuilderID, artifice.BuilderId:
|
||||
break
|
||||
imageID = artifact.State("ImageID").(string)
|
||||
case file.BuilderId:
|
||||
fileName := artifact.Files()[0]
|
||||
if content, err := ioutil.ReadFile(fileName); err == nil {
|
||||
imageID = strings.TrimSpace(string(content))
|
||||
} else {
|
||||
return nil, false, false, err
|
||||
}
|
||||
default:
|
||||
err := fmt.Errorf(
|
||||
"Unknown artifact type: %s\nCan only export from Yandex Cloud builder artifact or Artifice post-processor artifact.",
|
||||
"Unknown artifact type: %s\nCan only export from Yandex Cloud builder artifact or File builder or Artifice post-processor artifact.",
|
||||
artifact.BuilderId())
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
@ -166,25 +207,23 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifa
|
|||
|
||||
log.Printf("Rendered path items: %v", p.config.Paths)
|
||||
|
||||
imageID := artifact.State("ImageID").(string)
|
||||
ui.Say(fmt.Sprintf("Exporting image %v to destination: %v", imageID, p.config.Paths))
|
||||
|
||||
driver, err := yandex.NewDriverYC(ui, &p.config.AccessConfig)
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
imageDescription, err := driver.SDK().Compute().Image().Get(ctx, &compute.GetImageRequest{
|
||||
ImageId: imageID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
p.config.DiskConfig.DiskSizeGb = chooseBetterDiskSize(ctx, int(imageDescription.GetMinDiskSize()), p.config.DiskConfig.DiskSizeGb)
|
||||
|
||||
// Set up exporter instance configuration.
|
||||
exporterName := fmt.Sprintf("%s-exporter", artifact.Id())
|
||||
yandexConfig := ycSaneDefaults(&p.config,
|
||||
map[string]string{
|
||||
"image_id": imageID,
|
||||
"name": exporterName,
|
||||
"paths": strings.Join(p.config.Paths, " "),
|
||||
"user-data": CloudInitScript,
|
||||
"zone": p.config.Zone,
|
||||
},
|
||||
)
|
||||
exporterName := strings.ToLower(fmt.Sprintf("%s-exporter", artifact.Id()))
|
||||
yandexConfig := ycSaneDefaults(&p.config, nil)
|
||||
if yandexConfig.InstanceConfig.InstanceName == "" {
|
||||
yandexConfig.InstanceConfig.InstanceName = exporterName
|
||||
}
|
||||
|
@ -234,10 +273,16 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifa
|
|||
&StepAttachDisk{
|
||||
CommonConfig: p.config.CommonConfig,
|
||||
ImageID: imageID,
|
||||
ExtraSize: p.config.SourceDiskExtraSize,
|
||||
},
|
||||
new(StepUploadSecrets),
|
||||
&StepWaitCloudInitScript{
|
||||
Tries: p.config.Tries,
|
||||
new(StepPrepareTools),
|
||||
&StepDump{
|
||||
ExtraSize: p.config.SourceDiskExtraSize != 0,
|
||||
SizeLimit: imageDescription.GetMinDiskSize(),
|
||||
},
|
||||
&StepUploadToS3{
|
||||
Paths: p.config.Paths,
|
||||
},
|
||||
&yandex.StepTeardownInstance{
|
||||
SerialLogFile: yandexConfig.SerialLogFile,
|
||||
|
@ -267,13 +312,11 @@ func ycSaneDefaults(c *Config, md map[string]string) yandex.Config {
|
|||
Communicator: communicator.Config{
|
||||
Type: "ssh",
|
||||
SSH: communicator.SSH{
|
||||
SSHUsername: "ubuntu",
|
||||
SSHUsername: c.SSHUsername,
|
||||
SSHPrivateKeyFile: c.SSHPrivateKeyFile,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.SSHPrivateKeyFile != "" {
|
||||
yandexConfig.Communicator.SSH.SSHPrivateKeyFile = c.SSHPrivateKeyFile
|
||||
}
|
||||
if yandexConfig.Metadata == nil {
|
||||
yandexConfig.Metadata = md
|
||||
} else {
|
||||
|
@ -282,9 +325,9 @@ func ycSaneDefaults(c *Config, md map[string]string) yandex.Config {
|
|||
}
|
||||
}
|
||||
|
||||
yandexConfig.SourceImageFamily = "ubuntu-1604-lts"
|
||||
yandexConfig.SourceImageFolderID = yandex.StandardImagesFolderID
|
||||
|
||||
yandexConfig.SourceImageFamily = c.SourceImageFamily
|
||||
yandexConfig.SourceImageFolderID = c.SourceImageFolderID
|
||||
yandexConfig.SourceImageID = c.SourceImageID
|
||||
yandexConfig.ServiceAccountID = c.ServiceAccountID
|
||||
|
||||
return yandexConfig
|
||||
|
@ -305,3 +348,8 @@ func validateServiceAccount(ctx context.Context, ycsdk *ycsdk.SDK, serviceAccoun
|
|||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func chooseBetterDiskSize(ctx context.Context, minSizeBytes, oldSizeGB int) int {
|
||||
max := math.Max(float64(minSizeBytes), float64((datasize.GB * datasize.ByteSize(oldSizeGB)).Bytes()))
|
||||
return int(math.Ceil(datasize.ByteSize(max).GBytes()))
|
||||
}
|
||||
|
|
|
@ -46,7 +46,11 @@ type FlatConfig struct {
|
|||
ServiceAccountID *string `mapstructure:"service_account_id" required:"true" cty:"service_account_id" hcl:"service_account_id"`
|
||||
Paths []string `mapstructure:"paths" required:"true" cty:"paths" hcl:"paths"`
|
||||
SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" required:"false" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"`
|
||||
Tries *int `mapstructure:"tries" required:"false" cty:"tries" hcl:"tries"`
|
||||
SSHUsername *string `mapstructure:"ssh_username" required:"false" cty:"ssh_username" hcl:"ssh_username"`
|
||||
SourceImageFolderID *string `mapstructure:"source_image_folder_id" required:"false" cty:"source_image_folder_id" hcl:"source_image_folder_id"`
|
||||
SourceImageFamily *string `mapstructure:"source_image_family" required:"false" cty:"source_image_family" hcl:"source_image_family"`
|
||||
SourceImageID *string `mapstructure:"source_image_id" required:"false" cty:"source_image_id" hcl:"source_image_id"`
|
||||
SourceDiskExtraSize *int `mapstructure:"source_disk_extra_size" required:"false" cty:"source_disk_extra_size" hcl:"source_disk_extra_size"`
|
||||
}
|
||||
|
||||
// FlatMapstructure returns a new FlatConfig.
|
||||
|
@ -97,7 +101,11 @@ func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec {
|
|||
"service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false},
|
||||
"paths": &hcldec.AttrSpec{Name: "paths", Type: cty.List(cty.String), Required: false},
|
||||
"ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false},
|
||||
"tries": &hcldec.AttrSpec{Name: "tries", Type: cty.Number, Required: false},
|
||||
"ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false},
|
||||
"source_image_folder_id": &hcldec.AttrSpec{Name: "source_image_folder_id", Type: cty.String, Required: false},
|
||||
"source_image_family": &hcldec.AttrSpec{Name: "source_image_family", Type: cty.String, Required: false},
|
||||
"source_image_id": &hcldec.AttrSpec{Name: "source_image_id", Type: cty.String, Required: false},
|
||||
"source_disk_extra_size": &hcldec.AttrSpec{Name: "source_disk_extra_size", Type: cty.Number, Required: false},
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
GetMetadata() {
|
||||
curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/attributes/$1 2>/dev/null
|
||||
}
|
||||
|
||||
[[ "$(GetMetadata debug)" == "1" || "$(GetMetadata debug)" == "true" ]] && set -x
|
||||
|
||||
InstallPackages() {
|
||||
sudo apt-get update -qq && sudo apt-get install -y qemu-utils awscli
|
||||
}
|
||||
|
||||
WaitFile() {
|
||||
local RETRIES=60
|
||||
while [[ ${RETRIES} -gt 0 ]]; do
|
||||
echo "Wait ${1}"
|
||||
if [ -e "${1}" ]; then
|
||||
echo "[${1}] has been found"
|
||||
return 0
|
||||
fi
|
||||
RETRIES=$((RETRIES-1))
|
||||
sleep 5
|
||||
done
|
||||
echo "[${1}] not found"
|
||||
return 1
|
||||
}
|
||||
|
||||
PATHS=$(GetMetadata paths)
|
||||
S3_ENDPOINT="https://storage.yandexcloud.net"
|
||||
DISK_EXPORT_PATH="/dev/disk/by-id/virtio-doexport"
|
||||
export AWS_SHARED_CREDENTIALS_FILE="/tmp/aws-credentials"
|
||||
export AWS_REGION=ru-central1
|
||||
|
||||
Exit() {
|
||||
for i in ${PATHS}; do
|
||||
LOGDEST="${i}.exporter.log"
|
||||
echo "Uploading exporter log to ${LOGDEST}..."
|
||||
aws s3 --endpoint-url="${S3_ENDPOINT}" cp /var/log/syslog "${LOGDEST}"
|
||||
done
|
||||
|
||||
exit $1
|
||||
}
|
||||
|
||||
InstallPackages
|
||||
|
||||
echo "####### Export configuration #######"
|
||||
echo "Export paths - ${PATHS}"
|
||||
echo "####################################"
|
||||
|
||||
if ! WaitFile "${AWS_SHARED_CREDENTIALS_FILE}"; then
|
||||
echo "Failed wait credentials"
|
||||
Exit 1
|
||||
fi
|
||||
udevadm trigger || true
|
||||
|
||||
if ! WaitFile "${DISK_EXPORT_PATH}"; then
|
||||
echo "Failed wait attach disk"
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
echo "Dumping disk..."
|
||||
if ! qemu-img convert -O qcow2 -o cluster_size=2M "${DISK_EXPORT_PATH}" disk.qcow2; then
|
||||
echo "Failed to dump disk to qcow2 image."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
for i in ${PATHS}; do
|
||||
echo "Uploading qcow2 disk image to ${i}..."
|
||||
if ! aws s3 --endpoint-url="${S3_ENDPOINT}" cp disk.qcow2 "${i}"; then
|
||||
echo "Failed to upload image to ${i}."
|
||||
FAIL=1
|
||||
fi
|
||||
done
|
||||
|
||||
Exit ${FAIL}
|
|
@ -1,79 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
var (
|
||||
tmpl = template.Must(template.New("var").Parse(`
|
||||
// CODE GENERATED. DO NOT EDIT
|
||||
package {{.PkgName }}
|
||||
var (
|
||||
{{ .Name }} = ` + "`" + `{{.Value}}` + "`" + `
|
||||
)
|
||||
|
||||
`))
|
||||
)
|
||||
|
||||
type vars struct {
|
||||
PkgName string
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
if len(os.Args) < 3 {
|
||||
log.Fatalf("Usage: %s file varname [output]", os.Args[0])
|
||||
}
|
||||
fname := os.Args[1]
|
||||
targetVar := os.Args[2]
|
||||
pkg := os.Getenv("GOPACKAGE")
|
||||
absFilePath, err := filepath.Abs(fname)
|
||||
|
||||
targetFName := strings.ToLower(targetVar) + ".go"
|
||||
if len(os.Args) > 3 {
|
||||
targetFName = os.Args[3]
|
||||
}
|
||||
log.Println(absFilePath, "=>", targetFName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
b, err := ioutil.ReadFile(fname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := os.Stat(absFilePath); err != nil {
|
||||
os.Remove(absFilePath)
|
||||
}
|
||||
buff := bytes.Buffer{}
|
||||
err = tmpl.Execute(&buff, vars{
|
||||
Name: targetVar,
|
||||
Value: string(b),
|
||||
PkgName: pkg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := imports.Process(targetFName, buff.Bytes(), nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
f, err := os.Create(targetFName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = f.Write(data)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/c2h5oh/datasize"
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
|
@ -12,7 +13,8 @@ import (
|
|||
|
||||
type StepAttachDisk struct {
|
||||
yandex.CommonConfig
|
||||
ImageID string
|
||||
ImageID string
|
||||
ExtraSize int
|
||||
}
|
||||
|
||||
func (c *StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
|
@ -34,7 +36,7 @@ func (c *StepAttachDisk) Run(ctx context.Context, state multistep.StateBag) mult
|
|||
ImageId: c.ImageID,
|
||||
},
|
||||
Name: fmt.Sprintf("export-%s-disk", instanceID),
|
||||
Size: imageDesc.GetMinDiskSize(),
|
||||
Size: int64(datasize.ByteSize(c.ExtraSize)*datasize.GB) + imageDesc.GetMinDiskSize(),
|
||||
ZoneId: c.Zone,
|
||||
FolderId: c.FolderID,
|
||||
TypeId: c.DiskType,
|
||||
|
|
|
@ -0,0 +1,94 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
)
|
||||
|
||||
type StepDump struct {
|
||||
ExtraSize bool
|
||||
SizeLimit int64
|
||||
}
|
||||
|
||||
const (
|
||||
dumpCommand = "%sqemu-img convert -O qcow2 -o cluster_size=2M %s disk.qcow2 2>&1"
|
||||
)
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepDump) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
|
||||
device := "/dev/disk/by-id/virtio-doexport"
|
||||
cmdDumpCheckAccess := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf("qemu-img info %s", device),
|
||||
}
|
||||
if err := comm.Start(ctx, cmdDumpCheckAccess); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
sudo := ""
|
||||
if cmdDumpCheckAccess.Wait() != 0 {
|
||||
sudo = "sudo "
|
||||
}
|
||||
|
||||
if s.ExtraSize && which(ctx, comm, "losetup") == nil {
|
||||
ui.Say("Map loop device...")
|
||||
buff := new(bytes.Buffer)
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf("%slosetup --show -r --sizelimit %d -f %s", sudo, s.SizeLimit, device),
|
||||
Stdout: buff,
|
||||
}
|
||||
if err := comm.Start(ctx, cmd); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
if cmd.Wait() != 0 {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("Cannot losetup: %d", cmd.ExitStatus()))
|
||||
}
|
||||
device = strings.TrimSpace(buff.String())
|
||||
if device == "" {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("Bad lo device"))
|
||||
}
|
||||
}
|
||||
wg := new(sync.WaitGroup)
|
||||
defer wg.Wait()
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: "while true ; do sleep 3; sudo kill -s SIGUSR1 $(pidof qemu-img); done",
|
||||
}
|
||||
|
||||
err := cmd.RunWithUi(ctxWithCancel, comm, ui)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
ui.Error("qemu-img signal sender error: " + err.Error())
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
cmdDump := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf(dumpCommand, sudo, device),
|
||||
}
|
||||
ui.Say("Dumping...")
|
||||
if err := cmdDump.RunWithUi(ctx, comm, ui); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
if cmdDump.ExitStatus() != 0 {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("Cannot dump disk, exit code: %d", cmdDump.ExitStatus()))
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup nothing
|
||||
func (s *StepDump) Cleanup(state multistep.StateBag) {}
|
|
@ -0,0 +1,150 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
)
|
||||
|
||||
type StepPrepareTools struct{}
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepPrepareTools) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
pkgManager, errPkgManager := detectPkgManager(ctx, comm)
|
||||
|
||||
if which(ctx, comm, "qemu-img") != nil {
|
||||
if errPkgManager != nil {
|
||||
return yandex.StepHaltWithError(state, errPkgManager)
|
||||
}
|
||||
ui.Message("Install qemu-img...")
|
||||
if err := pkgManager.InstallQemuIMG(ctx, comm); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
}
|
||||
if which(ctx, comm, "aws") != nil {
|
||||
if errPkgManager != nil {
|
||||
return yandex.StepHaltWithError(state, errPkgManager)
|
||||
}
|
||||
ui.Message("Install aws...")
|
||||
if err := pkgManager.InstallAWS(ctx, comm); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup nothing
|
||||
func (s *StepPrepareTools) Cleanup(state multistep.StateBag) {}
|
||||
|
||||
func detectPkgManager(ctx context.Context, comm packersdk.Communicator) (pkgManager, error) {
|
||||
if err := which(ctx, comm, "apt"); err == nil {
|
||||
return &apt{}, nil
|
||||
}
|
||||
if err := which(ctx, comm, "yum"); err == nil {
|
||||
return &yum{}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Cannot detect package manager")
|
||||
}
|
||||
|
||||
func which(ctx context.Context, comm packersdk.Communicator, what string) error {
|
||||
cmdCheckAPT := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf("which %s", what),
|
||||
}
|
||||
if err := comm.Start(ctx, cmdCheckAPT); err != nil {
|
||||
return err
|
||||
}
|
||||
if cmdCheckAPT.Wait() == 0 {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Not found: %s", what)
|
||||
}
|
||||
|
||||
type pkgManager interface {
|
||||
InstallQemuIMG(ctx context.Context, comm packersdk.Communicator) error
|
||||
InstallAWS(ctx context.Context, comm packersdk.Communicator) error
|
||||
}
|
||||
|
||||
type apt struct {
|
||||
updated bool
|
||||
}
|
||||
|
||||
func (p *apt) InstallAWS(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if err := p.Update(ctx, comm); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := execCMDWithSudo(ctx, comm, "apt install -y awscli"); err != nil {
|
||||
return fmt.Errorf("Cannot install awscli")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *apt) InstallQemuIMG(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if err := p.Update(ctx, comm); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := execCMDWithSudo(ctx, comm, "apt install -y qemu-utils"); err != nil {
|
||||
return fmt.Errorf("Cannot install qemu-utils")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (p *apt) Update(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if p.updated {
|
||||
return nil
|
||||
}
|
||||
if err := execCMDWithSudo(ctx, comm, "apt update"); err != nil {
|
||||
return fmt.Errorf("Cannot update: %s", err)
|
||||
}
|
||||
p.updated = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type yum struct{}
|
||||
|
||||
func (p *yum) InstallAWS(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if which(ctx, comm, "pip3") != nil {
|
||||
if err := execCMDWithSudo(ctx, comm, "yum install -y python3-pip"); err != nil {
|
||||
return fmt.Errorf("Cannot install qemu-img: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := execCMDWithSudo(ctx, comm, "pip3 install awscli"); err != nil {
|
||||
return fmt.Errorf("Install awscli: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *yum) InstallQemuIMG(ctx context.Context, comm packersdk.Communicator) error {
|
||||
if err := execCMDWithSudo(ctx, comm, "yum install -y libgcrypt qemu-img"); err != nil {
|
||||
return fmt.Errorf("Cannot install qemu-img: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func execCMDWithSudo(ctx context.Context, comm packersdk.Communicator, cmdStr string) error {
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: cmdStr,
|
||||
}
|
||||
if err := comm.Start(ctx, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
if cmd.Wait() != 0 {
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf("sudo %s", cmdStr),
|
||||
}
|
||||
if err := comm.Start(ctx, cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
if cmd.Wait() != 0 {
|
||||
return fmt.Errorf("Bad exit code: %d", cmd.ExitStatus())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -13,6 +13,10 @@ import (
|
|||
|
||||
type StepUploadSecrets struct{}
|
||||
|
||||
const (
|
||||
sharedAWSCredFile = "/tmp/aws-credentials"
|
||||
)
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepUploadSecrets) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
|
@ -28,7 +32,7 @@ func (s *StepUploadSecrets) Run(ctx context.Context, state multistep.StateBag) m
|
|||
s3Secret.GetAccessKey().GetKeyId(),
|
||||
s3Secret.GetSecret())
|
||||
|
||||
err := comm.Upload("/tmp/aws-credentials", strings.NewReader(creds), nil)
|
||||
err := comm.Upload(sharedAWSCredFile, strings.NewReader(creds), nil)
|
||||
if err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
)
|
||||
|
||||
type StepUploadToS3 struct {
|
||||
Paths []string
|
||||
}
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepUploadToS3) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
|
||||
cmdUploadToS3 := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf(
|
||||
"%s=%s aws s3 --region=%s --endpoint-url=https://%s cp disk.qcow2 %s",
|
||||
"AWS_SHARED_CREDENTIALS_FILE",
|
||||
sharedAWSCredFile,
|
||||
defaultStorageRegion,
|
||||
defaultStorageEndpoint,
|
||||
s.Paths[0],
|
||||
),
|
||||
}
|
||||
ui.Say("Upload to S3...")
|
||||
if err := cmdUploadToS3.RunWithUi(ctx, comm, ui); err != nil {
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
if cmdUploadToS3.ExitStatus() != 0 {
|
||||
return yandex.StepHaltWithError(state, fmt.Errorf("Cannout upload to S3, exit code %d", cmdUploadToS3.ExitStatus()))
|
||||
}
|
||||
|
||||
versionExtraFlags, err := getVersionExtraFlags(ctx, comm)
|
||||
if err != nil {
|
||||
ui.Message(fmt.Sprintf("[WARN] Cannot upload to other storage: %s", err))
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
wg := new(sync.WaitGroup)
|
||||
defer wg.Wait()
|
||||
for _, path := range s.Paths[1:] {
|
||||
|
||||
wg.Add(1)
|
||||
go func(path string) {
|
||||
defer wg.Done()
|
||||
ui.Message(fmt.Sprintf("Start copy %s to %s...", s.Paths[0], path))
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: fmt.Sprintf(
|
||||
"%s=%s aws s3 --region=%s --endpoint-url=https://%s cp %s %s %s",
|
||||
"AWS_SHARED_CREDENTIALS_FILE",
|
||||
sharedAWSCredFile,
|
||||
defaultStorageRegion,
|
||||
defaultStorageEndpoint,
|
||||
versionExtraFlags,
|
||||
s.Paths[0],
|
||||
path,
|
||||
),
|
||||
}
|
||||
if err := cmd.RunWithUi(ctx, comm, ui); err != nil {
|
||||
ui.Message(fmt.Sprintf("[WARN] Failed upload to %s", path))
|
||||
}
|
||||
if cmd.ExitStatus() != 0 {
|
||||
ui.Message(fmt.Sprintf("[WARN] Failed upload to %s", path))
|
||||
}
|
||||
}(path)
|
||||
}
|
||||
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup nothing
|
||||
func (s *StepUploadToS3) Cleanup(state multistep.StateBag) {}
|
||||
|
||||
func getVersionExtraFlags(ctx context.Context, comm packersdk.Communicator) (string, error) {
|
||||
buff := new(bytes.Buffer)
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: "aws --version",
|
||||
Stdout: buff,
|
||||
}
|
||||
if err := comm.Start(ctx, cmd); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if cmd.Wait() != 0 {
|
||||
return "", fmt.Errorf("Cannot detect aws version")
|
||||
}
|
||||
vsn := buff.String()
|
||||
switch {
|
||||
case strings.HasPrefix(vsn, "aws-cli/2."):
|
||||
return "--copy-props metadata-directive", nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/packer-plugin-sdk/multistep"
|
||||
packersdk "github.com/hashicorp/packer-plugin-sdk/packer"
|
||||
"github.com/hashicorp/packer-plugin-sdk/retry"
|
||||
"github.com/hashicorp/packer/builder/yandex"
|
||||
)
|
||||
|
||||
type StepWaitCloudInitScript struct {
|
||||
Tries int
|
||||
}
|
||||
|
||||
type cloudInitStatus struct {
|
||||
V1 struct {
|
||||
Errors []interface{}
|
||||
}
|
||||
}
|
||||
|
||||
type cloudInitError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *cloudInitError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Run reads the instance metadata and looks for the log entry
|
||||
// indicating the cloud-init script finished.
|
||||
func (s *StepWaitCloudInitScript) Run(ctx context.Context, state multistep.StateBag) multistep.StepAction {
|
||||
ui := state.Get("ui").(packersdk.Ui)
|
||||
comm := state.Get("communicator").(packersdk.Communicator)
|
||||
|
||||
ui.Say("Waiting for any running cloud-init script to finish...")
|
||||
|
||||
ctxWithCancel, cancelCtx := context.WithCancel(ctx)
|
||||
|
||||
defer cancelCtx()
|
||||
|
||||
go func() {
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: "tail -f /var/log/cloud-init-output.log",
|
||||
}
|
||||
|
||||
err := cmd.RunWithUi(ctxWithCancel, comm, ui)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
ui.Error(err.Error())
|
||||
return
|
||||
}
|
||||
ui.Message("Cloud-init output closed")
|
||||
}()
|
||||
|
||||
// periodically show progress by sending SIGUSR1 to `qemu-img` process
|
||||
go func() {
|
||||
cmd := &packersdk.RemoteCmd{
|
||||
Command: "until pid=$(pidof qemu-img) ; do sleep 1 ; done ; " +
|
||||
"while true ; do sudo kill -s SIGUSR1 ${pid}; sleep 10 ; done",
|
||||
}
|
||||
|
||||
err := cmd.RunWithUi(ctxWithCancel, comm, ui)
|
||||
if err != nil && !errors.Is(err, context.Canceled) {
|
||||
ui.Error("qemu-img signal sender error: " + err.Error())
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
// Keep checking the serial port output to see if the cloud-init script is done.
|
||||
retryConfig := &retry.Config{
|
||||
ShouldRetry: func(e error) bool {
|
||||
switch e.(type) {
|
||||
case *cloudInitError:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
Tries: s.Tries,
|
||||
RetryDelay: (&retry.Backoff{InitialBackoff: 10 * time.Second, MaxBackoff: 60 * time.Second, Multiplier: 2}).Linear,
|
||||
}
|
||||
|
||||
err := retryConfig.Run(ctx, func(ctx context.Context) error {
|
||||
buff := bytes.Buffer{}
|
||||
err := comm.Download("/var/run/cloud-init/result.json", &buff)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Waiting cloud-init script status: %s", err)
|
||||
return err
|
||||
}
|
||||
result := &cloudInitStatus{}
|
||||
err = json.Unmarshal(buff.Bytes(), result)
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Failed parse result: %s", err)
|
||||
return &cloudInitError{Err: err}
|
||||
}
|
||||
if len(result.V1.Errors) != 0 {
|
||||
err := fmt.Errorf("Result: %v", result.V1.Errors)
|
||||
return &cloudInitError{Err: err}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
err := fmt.Errorf("Error waiting for cloud-init script to finish: %s", err)
|
||||
return yandex.StepHaltWithError(state, err)
|
||||
}
|
||||
ui.Say("Cloud-init script has finished running.")
|
||||
return multistep.ActionContinue
|
||||
}
|
||||
|
||||
// Cleanup.
|
||||
func (s *StepWaitCloudInitScript) Cleanup(state multistep.StateBag) {}
|
|
@ -80,6 +80,8 @@ can also be supplied to override the typical auto-generated key:
|
|||
|
||||
@include 'builder/yandex/Config-required.mdx'
|
||||
|
||||
@include 'builder/yandex/SourceImageConfig-required.mdx'
|
||||
|
||||
#### Common
|
||||
|
||||
@include 'builder/yandex/CloudConfig-required.mdx'
|
||||
|
@ -94,6 +96,8 @@ can also be supplied to override the typical auto-generated key:
|
|||
|
||||
@include 'builder/yandex/Config-not-required.mdx'
|
||||
|
||||
@include 'builder/yandex/SourceImageConfig-not-required.mdx'
|
||||
|
||||
#### Common
|
||||
|
||||
@include 'builder/yandex/CommonConfig-not-required.mdx'
|
||||
|
|
|
@ -2,12 +2,5 @@
|
|||
|
||||
- `service_account_id` (string) - Service account identifier to assign to instance.
|
||||
|
||||
- `source_image_folder_id` (string) - The ID of the folder containing the source image.
|
||||
|
||||
- `source_image_id` (string) - The source image ID to use to create the new image from.
|
||||
|
||||
- `source_image_name` (string) - The source image name to use to create the new image
|
||||
from. Name will be looked up in `source_image_folder_id`.
|
||||
|
||||
- `target_image_folder_id` (string) - The ID of the folder to save built image in.
|
||||
This defaults to value of 'folder_id'.
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
<!-- Code generated from the comments of the SourceImageConfig struct in builder/yandex/common_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `source_image_folder_id` (string) - The ID of the folder containing the source image.
|
||||
|
||||
- `source_image_id` (string) - The source image ID to use to create the new image from.
|
||||
|
||||
- `source_image_name` (string) - The source image name to use to create the new image
|
||||
from. Name will be looked up in `source_image_folder_id`.
|
|
@ -0,0 +1,5 @@
|
|||
<!-- Code generated from the comments of the SourceImageConfig struct in builder/yandex/common_config.go; DO NOT EDIT MANUALLY -->
|
||||
|
||||
- `source_image_family` (string) - The source image family to create the new image
|
||||
from. You can also specify source_image_id instead. Just one of a source_image_id or
|
||||
source_image_family must be specified. Example: `ubuntu-1804-lts`.
|
|
@ -2,6 +2,22 @@
|
|||
|
||||
- `ssh_private_key_file` (string) - Path to a PEM encoded private key file to use to authenticate with SSH.
|
||||
The `~` can be used in path and will be expanded to the home directory
|
||||
of current user. Login for attach: `ubuntu`
|
||||
of current user.
|
||||
|
||||
- `tries` (int) - Number of attempts to wait for export (must be greater than 0). Default: 1000
|
||||
- `ssh_username` (string) - The username to connect to SSH with. Default `ubuntu`
|
||||
|
||||
- `source_image_folder_id` (string) - The ID of the folder containing the source image. Default `standard-images`.
|
||||
|
||||
- `source_image_family` (string) - The source image family to start export process. Default `ubuntu-1604-lts`.
|
||||
Image must contains utils or supported package manager: `apt` or `yum` -
|
||||
requires `root` or `sudo` without password.
|
||||
Utils: `qemu-img`, `aws`. The `qemu-img` utility requires `root` user or
|
||||
`sudo` access without password.
|
||||
|
||||
- `source_image_id` (string) - The source image ID to use to create the new image from. Just one of a source_image_id or
|
||||
source_image_family must be specified.
|
||||
|
||||
- `source_disk_extra_size` (int) - The extra size of the source disk in GB. This defaults to `0GB`.
|
||||
Requires `losetup` utility on the instance.
|
||||
> **Careful!** Increases payment cost.
|
||||
> See [perfomance](https://cloud.yandex.com/docs/compute/concepts/disk#performance).
|
||||
|
|
Loading…
Reference in New Issue