parent
4c0adb7d67
commit
adb4a14471
|
@ -218,6 +218,12 @@ func (s *StepCreateInstance) Run(ctx context.Context, state multistep.StateBag)
|
|||
}
|
||||
}
|
||||
|
||||
secDisk, ok := state.GetOk("secondary_disk")
|
||||
if !ok {
|
||||
secDisk = []*compute.AttachedDiskSpec{}
|
||||
}
|
||||
secDiskSpec := secDisk.([]*compute.AttachedDiskSpec)
|
||||
|
||||
req := &compute.CreateInstanceRequest{
|
||||
FolderId: config.FolderID,
|
||||
Name: config.InstanceName,
|
||||
|
@ -239,6 +245,7 @@ func (s *StepCreateInstance) Run(ctx context.Context, state multistep.StateBag)
|
|||
DiskId: disk.Id,
|
||||
},
|
||||
},
|
||||
SecondaryDiskSpecs: secDiskSpec,
|
||||
NetworkInterfaceSpecs: []*compute.NetworkInterfaceSpec{
|
||||
{
|
||||
SubnetId: instanceSubnetID,
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
// CODE GENERATED. DO NOT EDIT
|
||||
package yandexexport
|
||||
|
||||
var (
|
||||
CloudInitScript = `#!/usr/bin/env bash
|
||||
|
||||
GetMetadata() {
|
||||
curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/attributes/$1 2>/dev/null
|
||||
}
|
||||
|
||||
GetInstanceId() {
|
||||
curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/id 2>/dev/null
|
||||
}
|
||||
|
||||
GetServiceAccountId() {
|
||||
yc compute instance get "${INSTANCE_ID}" | grep service_account | cut -f2 -d' '
|
||||
}
|
||||
|
||||
InstallYc() {
|
||||
curl -s "${S3_ENDPOINT}/yandexcloud-yc/install.sh" | sudo bash -s -- -n -i /usr/local
|
||||
}
|
||||
|
||||
InstallAwsCli() {
|
||||
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
|
||||
unzip -o awscliv2.zip >/dev/null
|
||||
sudo ./aws/install
|
||||
}
|
||||
|
||||
InstallPackages() {
|
||||
sudo apt-get update -qq && sudo apt-get install -y unzip jq qemu-utils
|
||||
}
|
||||
|
||||
InstallTools() {
|
||||
InstallPackages
|
||||
InstallYc
|
||||
InstallAwsCli
|
||||
}
|
||||
|
||||
INSTANCE_ID=$(GetInstanceId)
|
||||
PATHS=$(GetMetadata paths)
|
||||
S3_ENDPOINT="https://storage.yandexcloud.net"
|
||||
|
||||
Exit() {
|
||||
for i in ${PATHS}; do
|
||||
LOGDEST="${i}.exporter.log"
|
||||
echo "Uploading exporter log to ${LOGDEST}..."
|
||||
aws s3 --region ru-central1 --endpoint-url="${S3_ENDPOINT}" cp /var/log/syslog "${LOGDEST}"
|
||||
done
|
||||
|
||||
echo "Delete static access key..."
|
||||
if ! yc iam access-key delete "${YC_SK_ID}"; then
|
||||
echo "Failed to delete static access key."
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
if [ $1 -ne 0 ]; then
|
||||
echo "Set metadata key 'cloud-init-status' to 'cloud-init-error' value"
|
||||
if ! yc compute instance update "${INSTANCE_ID}" --metadata cloud-init-status=cloud-init-error; then
|
||||
echo "Failed to update metadata key 'cloud-init-status'."
|
||||
exit 111
|
||||
fi
|
||||
fi
|
||||
|
||||
exit $1
|
||||
}
|
||||
|
||||
InstallTools
|
||||
|
||||
echo "####### Export configuration #######"
|
||||
echo "Instance ID - ${INSTANCE_ID}"
|
||||
echo "Export paths - ${PATHS}"
|
||||
echo "####################################"
|
||||
|
||||
echo "Detect Service Account ID..."
|
||||
SERVICE_ACCOUNT_ID=$(GetServiceAccountId)
|
||||
echo "Use Service Account ID: ${SERVICE_ACCOUNT_ID}"
|
||||
|
||||
echo "Create static access key..."
|
||||
SEC_json=$(yc iam access-key create --service-account-id "${SERVICE_ACCOUNT_ID}" \
|
||||
--description "this key is for export image to storage" --format json)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create static access key."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
echo "Setup env variables to access storage..."
|
||||
eval "$(jq -r '@sh "export YC_SK_ID=\(.access_key.id); export AWS_ACCESS_KEY_ID=\(.access_key.key_id); export AWS_SECRET_ACCESS_KEY=\(.secret)"' <<<${SEC_json})"
|
||||
|
||||
for i in ${PATHS}; do
|
||||
bucket=$(echo "${i}" | sed 's/\(s3:\/\/[^\/]*\).*/\1/')
|
||||
echo "Check access to storage: '${bucket}'..."
|
||||
if ! aws s3 --region ru-central1 --endpoint-url="${S3_ENDPOINT}" ls "${bucket}" >/dev/null; then
|
||||
echo "Failed to access storage: '${bucket}'."
|
||||
Exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Dumping disk..."
|
||||
if ! qemu-img convert -O qcow2 -o cluster_size=2M /dev/disk/by-id/virtio-doexport disk.qcow2; then
|
||||
echo "Failed to dump disk to qcow2 image."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
for i in ${PATHS}; do
|
||||
echo "Uploading qcow2 disk image to ${i}..."
|
||||
if ! aws s3 --region ru-central1 --endpoint-url="${S3_ENDPOINT}" cp disk.qcow2 "${i}"; then
|
||||
echo "Failed to upload image to ${i}."
|
||||
FAIL=1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Set metadata key 'cloud-init-status' to 'cloud-init-done' value"
|
||||
if ! yc compute instance update "${INSTANCE_ID}" --metadata cloud-init-status=cloud-init-done; then
|
||||
echo "Failed to update metadata key to 'cloud-init-status'."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
Exit ${FAIL}
|
||||
`
|
||||
)
|
|
@ -1,5 +1,6 @@
|
|||
//go:generate struct-markdown
|
||||
//go:generate mapstructure-to-hcl2 -type Config
|
||||
//go:generate go run ./scripts/script-to-var.go ./scripts/export.sh CloudInitScript cloud-init-script.go
|
||||
|
||||
package yandexexport
|
||||
|
||||
|
@ -21,6 +22,7 @@ import (
|
|||
"github.com/hashicorp/packer/packer-plugin-sdk/template/config"
|
||||
"github.com/hashicorp/packer/packer-plugin-sdk/template/interpolate"
|
||||
"github.com/hashicorp/packer/post-processor/artifice"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/compute/v1"
|
||||
"github.com/yandex-cloud/go-genproto/yandex/cloud/iam/v1"
|
||||
ycsdk "github.com/yandex-cloud/go-sdk"
|
||||
)
|
||||
|
@ -175,7 +177,31 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifa
|
|||
"user-data": CloudInitScript,
|
||||
"zone": p.config.Zone,
|
||||
}
|
||||
driver, err := yandex.NewDriverYC(ui, &p.config.AccessConfig)
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
imageDesc, err := driver.SDK().Compute().Image().Get(ctx, &compute.GetImageRequest{
|
||||
ImageId: imageID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
secDiskSpec := []*compute.AttachedDiskSpec{
|
||||
{
|
||||
AutoDelete: true,
|
||||
DeviceName: "doexport",
|
||||
Disk: &compute.AttachedDiskSpec_DiskSpec_{
|
||||
DiskSpec: &compute.AttachedDiskSpec_DiskSpec{
|
||||
Source: &compute.AttachedDiskSpec_DiskSpec_ImageId{
|
||||
ImageId: imageID,
|
||||
},
|
||||
Size: imageDesc.MinDiskSize,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
yandexConfig := ycSaneDefaults()
|
||||
yandexConfig.DiskName = exporterName
|
||||
yandexConfig.InstanceName = exporterName
|
||||
|
@ -193,11 +219,6 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifa
|
|||
yandexConfig.PlatformID = p.config.PlatformID
|
||||
}
|
||||
|
||||
driver, err := yandex.NewDriverYC(ui, &p.config.AccessConfig)
|
||||
if err != nil {
|
||||
return nil, false, false, err
|
||||
}
|
||||
|
||||
ui.Say(fmt.Sprintf("Validating service_account_id: '%s'...", yandexConfig.ServiceAccountID))
|
||||
if err := validateServiceAccount(ctx, driver.SDK(), yandexConfig.ServiceAccountID); err != nil {
|
||||
return nil, false, false, err
|
||||
|
@ -209,6 +230,7 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifa
|
|||
state.Put("driver", driver)
|
||||
state.Put("sdk", driver.SDK())
|
||||
state.Put("ui", ui)
|
||||
state.Put("secondary_disk", secDiskSpec)
|
||||
|
||||
// Build the steps.
|
||||
steps := []multistep.Step{
|
||||
|
|
|
@ -1,160 +0,0 @@
|
|||
package yandexexport
|
||||
|
||||
var CloudInitScript string = `#!/usr/bin/env bash
|
||||
GetMetadata () {
|
||||
echo "$(curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/attributes/$1 2> /dev/null)"
|
||||
}
|
||||
|
||||
GetInstanceId () {
|
||||
echo "$(curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/id 2> /dev/null)"
|
||||
}
|
||||
|
||||
GetServiceAccountId () {
|
||||
yc compute instance get ${INSTANCE_ID} | grep service_account | cut -f2 -d' '
|
||||
}
|
||||
|
||||
InstallYc () {
|
||||
curl -s https://storage.yandexcloud.net/yandexcloud-yc/install.sh | sudo bash -s -- -n -i /usr/local
|
||||
}
|
||||
|
||||
InstallAwsCli () {
|
||||
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
|
||||
unzip -o awscliv2.zip > /dev/null
|
||||
sudo ./aws/install
|
||||
}
|
||||
|
||||
InstallPackages () {
|
||||
sudo apt-get update -qq && sudo apt-get install -y unzip jq qemu-utils
|
||||
}
|
||||
|
||||
InstallTools () {
|
||||
InstallPackages
|
||||
InstallYc
|
||||
InstallAwsCli
|
||||
}
|
||||
|
||||
IMAGE_ID=$(GetMetadata image_id)
|
||||
INSTANCE_ID=$(GetInstanceId)
|
||||
DISKNAME=${INSTANCE_ID}-toexport
|
||||
PATHS=$(GetMetadata paths)
|
||||
ZONE=$(GetMetadata zone)
|
||||
|
||||
Exit () {
|
||||
for i in ${PATHS}; do
|
||||
LOGDEST="${i}.exporter.log"
|
||||
echo "Uploading exporter log to ${LOGDEST}..."
|
||||
aws s3 --region ru-central1 --endpoint-url=https://storage.yandexcloud.net cp /var/log/syslog ${LOGDEST}
|
||||
done
|
||||
|
||||
echo "Delete static access key..."
|
||||
if ! yc iam access-key delete ${YC_SK_ID} ; then
|
||||
echo "Failed to delete static access key."
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
if [ $1 -ne 0 ]; then
|
||||
echo "Set metadata key 'cloud-init-status' to 'cloud-init-error' value"
|
||||
if ! yc compute instance update ${INSTANCE_ID} --metadata cloud-init-status=cloud-init-error ; then
|
||||
echo "Failed to update metadata key 'cloud-init-status'."
|
||||
exit 111
|
||||
fi
|
||||
fi
|
||||
|
||||
exit $1
|
||||
}
|
||||
|
||||
InstallTools
|
||||
|
||||
echo "####### Export configuration #######"
|
||||
echo "Image ID - ${IMAGE_ID}"
|
||||
echo "Instance ID - ${INSTANCE_ID}"
|
||||
echo "Instance zone - ${ZONE}"
|
||||
echo "Disk name - ${DISKNAME}"
|
||||
echo "Export paths - ${PATHS}"
|
||||
echo "####################################"
|
||||
|
||||
echo "Detect Service Account ID..."
|
||||
SERVICE_ACCOUNT_ID=$(GetServiceAccountId)
|
||||
echo "Use Service Account ID: ${SERVICE_ACCOUNT_ID}"
|
||||
|
||||
echo "Create static access key..."
|
||||
SEC_json=$(yc iam access-key create --service-account-id ${SERVICE_ACCOUNT_ID} \
|
||||
--description "this key is for export image to storage" --format json)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create static access key."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
echo "Setup env variables to access storage..."
|
||||
eval "$(jq -r '@sh "export YC_SK_ID=\(.access_key.id); export AWS_ACCESS_KEY_ID=\(.access_key.key_id); export AWS_SECRET_ACCESS_KEY=\(.secret)"' <<<${SEC_json} )"
|
||||
|
||||
for i in ${PATHS}; do
|
||||
bucket=$(echo ${i} | sed 's/\(s3:\/\/[^\/]*\).*/\1/')
|
||||
echo "Check access to storage: '${bucket}'..."
|
||||
if ! aws s3 --region ru-central1 --endpoint-url=https://storage.yandexcloud.net ls ${bucket} > /dev/null ; then
|
||||
echo "Failed to access storage: '${bucket}'."
|
||||
Exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Creating disk from image to be exported..."
|
||||
if ! yc compute disk create --name ${DISKNAME} --source-image-id ${IMAGE_ID} --zone ${ZONE}; then
|
||||
echo "Failed to create disk."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
echo "Attaching disk..."
|
||||
if ! yc compute instance attach-disk ${INSTANCE_ID} --disk-name ${DISKNAME} --device-name doexport --auto-delete ; then
|
||||
echo "Failed to attach disk."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
DISK_LINK="/dev/disk/by-id/virtio-doexport"
|
||||
echo "Waiting for disk..."
|
||||
for attempt in 1 2 3; do
|
||||
sleep 3
|
||||
/sbin/udevadm trigger
|
||||
if [ -L "${DISK_LINK}" ]; then
|
||||
break
|
||||
fi
|
||||
echo "Attempt ${attempt}"
|
||||
if [ ${attempt} -eq 3 ]; then
|
||||
echo "Symlink ${DISK_LINK} not found"
|
||||
Exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Dumping disk..."
|
||||
if ! qemu-img convert -O qcow2 -o cluster_size=2M "${DISK_LINK}" disk.qcow2 ; then
|
||||
echo "Failed to dump disk to qcow2 image."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
echo "Detaching disk..."
|
||||
if ! yc compute instance detach-disk ${INSTANCE_ID} --disk-name ${DISKNAME} ; then
|
||||
echo "Failed to detach disk."
|
||||
fi
|
||||
|
||||
FAIL=0
|
||||
echo "Deleting disk..."
|
||||
if ! yc compute disk delete --name ${DISKNAME} ; then
|
||||
echo "Failed to delete disk."
|
||||
FAIL=1
|
||||
fi
|
||||
for i in ${PATHS}; do
|
||||
echo "Uploading qcow2 disk image to ${i}..."
|
||||
if ! aws s3 --region ru-central1 --endpoint-url=https://storage.yandexcloud.net cp disk.qcow2 ${i}; then
|
||||
echo "Failed to upload image to ${i}."
|
||||
FAIL=1
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
echo "Set metadata key 'cloud-init-status' to 'cloud-init-done' value"
|
||||
if ! yc compute instance update ${INSTANCE_ID} --metadata cloud-init-status=cloud-init-done ; then
|
||||
echo "Failed to update metadata key to 'cloud-init-status'."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
Exit ${FAIL}`
|
|
@ -0,0 +1,115 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
GetMetadata() {
|
||||
curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/attributes/$1 2>/dev/null
|
||||
}
|
||||
|
||||
GetInstanceId() {
|
||||
curl -f -H "Metadata-Flavor: Google" http://169.254.169.254/computeMetadata/v1/instance/id 2>/dev/null
|
||||
}
|
||||
|
||||
GetServiceAccountId() {
|
||||
yc compute instance get "${INSTANCE_ID}" | grep service_account | cut -f2 -d' '
|
||||
}
|
||||
|
||||
InstallYc() {
|
||||
curl -s "${S3_ENDPOINT}/yandexcloud-yc/install.sh" | sudo bash -s -- -n -i /usr/local
|
||||
}
|
||||
|
||||
InstallAwsCli() {
|
||||
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
|
||||
unzip -o awscliv2.zip >/dev/null
|
||||
sudo ./aws/install
|
||||
}
|
||||
|
||||
InstallPackages() {
|
||||
sudo apt-get update -qq && sudo apt-get install -y unzip jq qemu-utils
|
||||
}
|
||||
|
||||
InstallTools() {
|
||||
InstallPackages
|
||||
InstallYc
|
||||
InstallAwsCli
|
||||
}
|
||||
|
||||
INSTANCE_ID=$(GetInstanceId)
|
||||
PATHS=$(GetMetadata paths)
|
||||
S3_ENDPOINT="https://storage.yandexcloud.net"
|
||||
|
||||
Exit() {
|
||||
for i in ${PATHS}; do
|
||||
LOGDEST="${i}.exporter.log"
|
||||
echo "Uploading exporter log to ${LOGDEST}..."
|
||||
aws s3 --region ru-central1 --endpoint-url="${S3_ENDPOINT}" cp /var/log/syslog "${LOGDEST}"
|
||||
done
|
||||
|
||||
echo "Delete static access key..."
|
||||
if ! yc iam access-key delete "${YC_SK_ID}"; then
|
||||
echo "Failed to delete static access key."
|
||||
FAIL=1
|
||||
fi
|
||||
|
||||
if [ $1 -ne 0 ]; then
|
||||
echo "Set metadata key 'cloud-init-status' to 'cloud-init-error' value"
|
||||
if ! yc compute instance update "${INSTANCE_ID}" --metadata cloud-init-status=cloud-init-error; then
|
||||
echo "Failed to update metadata key 'cloud-init-status'."
|
||||
exit 111
|
||||
fi
|
||||
fi
|
||||
|
||||
exit $1
|
||||
}
|
||||
|
||||
InstallTools
|
||||
|
||||
echo "####### Export configuration #######"
|
||||
echo "Instance ID - ${INSTANCE_ID}"
|
||||
echo "Export paths - ${PATHS}"
|
||||
echo "####################################"
|
||||
|
||||
echo "Detect Service Account ID..."
|
||||
SERVICE_ACCOUNT_ID=$(GetServiceAccountId)
|
||||
echo "Use Service Account ID: ${SERVICE_ACCOUNT_ID}"
|
||||
|
||||
echo "Create static access key..."
|
||||
SEC_json=$(yc iam access-key create --service-account-id "${SERVICE_ACCOUNT_ID}" \
|
||||
--description "this key is for export image to storage" --format json)
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Failed to create static access key."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
echo "Setup env variables to access storage..."
|
||||
eval "$(jq -r '@sh "export YC_SK_ID=\(.access_key.id); export AWS_ACCESS_KEY_ID=\(.access_key.key_id); export AWS_SECRET_ACCESS_KEY=\(.secret)"' <<<${SEC_json})"
|
||||
|
||||
for i in ${PATHS}; do
|
||||
bucket=$(echo "${i}" | sed 's/\(s3:\/\/[^\/]*\).*/\1/')
|
||||
echo "Check access to storage: '${bucket}'..."
|
||||
if ! aws s3 --region ru-central1 --endpoint-url="${S3_ENDPOINT}" ls "${bucket}" >/dev/null; then
|
||||
echo "Failed to access storage: '${bucket}'."
|
||||
Exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Dumping disk..."
|
||||
if ! qemu-img convert -O qcow2 -o cluster_size=2M /dev/disk/by-id/virtio-doexport disk.qcow2; then
|
||||
echo "Failed to dump disk to qcow2 image."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
for i in ${PATHS}; do
|
||||
echo "Uploading qcow2 disk image to ${i}..."
|
||||
if ! aws s3 --region ru-central1 --endpoint-url="${S3_ENDPOINT}" cp disk.qcow2 "${i}"; then
|
||||
echo "Failed to upload image to ${i}."
|
||||
FAIL=1
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Set metadata key 'cloud-init-status' to 'cloud-init-done' value"
|
||||
if ! yc compute instance update "${INSTANCE_ID}" --metadata cloud-init-status=cloud-init-done; then
|
||||
echo "Failed to update metadata key to 'cloud-init-status'."
|
||||
Exit 1
|
||||
fi
|
||||
|
||||
Exit ${FAIL}
|
|
@ -0,0 +1,79 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
var (
|
||||
tmpl = template.Must(template.New("var").Parse(`
|
||||
// CODE GENERATED. DO NOT EDIT
|
||||
package {{.PkgName }}
|
||||
var (
|
||||
{{ .Name }} = ` + "`" + `{{.Value}}` + "`" + `
|
||||
)
|
||||
|
||||
`))
|
||||
)
|
||||
|
||||
type vars struct {
|
||||
PkgName string
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||
if len(os.Args) < 3 {
|
||||
log.Fatalf("Usage: %s file varname [output]", os.Args[0])
|
||||
}
|
||||
fname := os.Args[1]
|
||||
targetVar := os.Args[2]
|
||||
pkg := os.Getenv("GOPACKAGE")
|
||||
absFilePath, err := filepath.Abs(fname)
|
||||
|
||||
targetFName := strings.ToLower(targetVar) + ".go"
|
||||
if len(os.Args) > 3 {
|
||||
targetFName = os.Args[3]
|
||||
}
|
||||
log.Println(absFilePath, "=>", targetFName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
b, err := ioutil.ReadFile(fname)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if _, err := os.Stat(absFilePath); err != nil {
|
||||
os.Remove(absFilePath)
|
||||
}
|
||||
buff := bytes.Buffer{}
|
||||
err = tmpl.Execute(&buff, vars{
|
||||
Name: targetVar,
|
||||
Value: string(b),
|
||||
PkgName: pkg,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
data, err := imports.Process(targetFName, buff.Bytes(), nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
f, err := os.Create(targetFName)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
_, err = f.Write(data)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue